commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
346bb232062ff3068882cb29fa123779a19e4ea6 | fix stderr comment, clarify stdout vs stderr | uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco | raco/test_style.py | raco/test_style.py | from nose.plugins.skip import SkipTest
import subprocess
import sys
import unittest
def check_output_and_print_stderr(args):
"""Run the specified command. If it does not exit cleanly, print the stderr
of the command to stdout. Note that stderr prints are displayed as tests
run, whereas stdout prints show up next to the failed test. We want the
latter."""
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print e.output
raise
class StyleTest(unittest.TestCase):
"run flake8 with the right arguments and ensure all files pass"
def test_style(self):
"run flake8 with the right arguments and ensure all files pass"
check_output_and_print_stderr(['flake8', '--ignore=F', 'raco'])
def test_pylint(self):
"run pylint -E to catch obvious errors"
# TODO fix this. Most related to "flexible" use of types in
# Grappa, C, and Pipelines.
raise SkipTest()
check_output_and_print_stderr(['pylint', '-E', 'raco'])
| from nose.plugins.skip import SkipTest
import subprocess
import sys
import unittest
def check_output_and_print_stderr(args):
"""Run the specified command. If it does not exit cleanly, print the stderr
of the command to stderr"""
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print e.output
raise
class StyleTest(unittest.TestCase):
"run flake8 with the right arguments and ensure all files pass"
def test_style(self):
"run flake8 with the right arguments and ensure all files pass"
check_output_and_print_stderr(['flake8', '--ignore=F', 'raco'])
def test_pylint(self):
"run pylint -E to catch obvious errors"
# TODO fix this. Most related to "flexible" use of types in
# Grappa, C, and Pipelines.
raise SkipTest()
check_output_and_print_stderr(['pylint', '-E', 'raco'])
| bsd-3-clause | Python |
0039eefbfa546f24b3f10031e664341d60e4055c | Use previews in ranger fzf | darthdeus/dotfiles,darthdeus/dotfiles,darthdeus/dotfiles,darthdeus/dotfiles | ranger/commands.py | ranger/commands.py | from ranger.api.commands import Command
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os.path
if self.quantifier:
# match only directories
command="fd -t d --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
else:
# match files and directories
command="fd --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
fzf = self.fm.execute_command(command, universal_newlines=True, stdout=subprocess.PIPE)
stdout, stderr = fzf.communicate()
if fzf.returncode == 0:
fzf_file = os.path.abspath(stdout.rstrip('\n'))
if os.path.isdir(fzf_file):
self.fm.cd(fzf_file)
else:
self.fm.select_file(fzf_file)
| from ranger.api.commands import Command
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os.path
if self.quantifier:
# match only directories
command="fd -t d --hidden | fzf +m"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
else:
# match files and directories
command="fd --hidden | fzf +m"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
fzf = self.fm.execute_command(command, universal_newlines=True, stdout=subprocess.PIPE)
stdout, stderr = fzf.communicate()
if fzf.returncode == 0:
fzf_file = os.path.abspath(stdout.rstrip('\n'))
if os.path.isdir(fzf_file):
self.fm.cd(fzf_file)
else:
self.fm.select_file(fzf_file)
| mit | Python |
82073a946ff76a07d907cfb0a0cd8885055f36b3 | Bump version | zimeon/rdiffb | rdiffb/__init__.py | rdiffb/__init__.py | """Module config for rdiffb."""
from .rdiffb import *
# This is the one place the version number for rdiffb is stored,
# there is a regex for it in setup.py.
__version__ = '0.3.0'
| """Module config for rdiffb."""
from .rdiffb import *
# This is the one place the version number for rdiffb is stored,
# there is a regex for it in setup.py.
__version__ = '0.2.0'
| apache-2.0 | Python |
370a7b2a31d8e63b14d302f5205298f3cad0eb39 | Allow conversion of named tab for xlsx files | unpingco/csvkit,archaeogeek/csvkit,doganmeh/csvkit,snuggles08/csvkit,nriyer/csvkit,wireservice/csvkit,haginara/csvkit,matterker/csvkit,onyxfish/csvkit,gepuro/csvkit,moradology/csvkit,jpalvarezf/csvkit,elcritch/csvkit,Tabea-K/csvkit,reubano/csvkit,arowla/csvkit,bmispelon/csvkit,barentsen/csvkit,bradparks/csvkit__query_join_filter_CSV_cli,KarrieK/csvkit,cypreess/csvkit,dannguyen/csvkit,aequitas/csvkit,tlevine/csvkit,metasoarous/csvkit,Jobava/csvkit,wjr1985/csvkit,kyeoh/csvkit,themiurgo/csvkit | csvkit/convert/xlsx.py | csvkit/convert/xlsx.py | #!/usr/bin/env python
from cStringIO import StringIO
import datetime
from openpyxl.reader.excel import load_workbook
from csvkit import CSVKitWriter
from csvkit.typeinference import NULL_TIME
def normalize_datetime(dt):
if dt.microsecond == 0:
return dt
ms = dt.microsecond
if ms < 1000:
return dt.replace(microsecond=0)
elif ms > 999000:
return dt.replace(second=dt.second + 1, microsecond=0)
return dt
def xlsx2csv(f, output=None, **kwargs):
"""
Convert an Excel .xlsx file to csv.
Note: Unlike other convertor's, this one allows output columns to contain mixed data types.
Blank headers are also possible.
"""
streaming = True if output else False
if not streaming:
output = StringIO()
writer = CSVKitWriter(output)
book = load_workbook(f, use_iterators=True)
if 'sheet' in kwargs:
sheetn = kwargs['sheet']
sheet = book.get_sheet_by_name(sheetn)
else:
sheet = book.get_active_sheet()
for i, row in enumerate(sheet.iter_rows()):
if i == 0:
writer.writerow([c.internal_value for c in row])
continue
out_row = []
for c in row:
value = c.internal_value
if value.__class__ is datetime.datetime:
if value.time() != NULL_TIME:
value = normalize_datetime(value)
else:
value = value.date()
elif value.__class__ is float:
if value % 1 == 0:
value = int(value)
if value.__class__ in (datetime.datetime, datetime.date, datetime.time):
value = value.isoformat()
out_row.append(value)
writer.writerow(out_row)
if not streaming:
data = output.getvalue()
return data
# Return empty string when streaming
return ''
| #!/usr/bin/env python
from cStringIO import StringIO
import datetime
from openpyxl.reader.excel import load_workbook
from csvkit import CSVKitWriter
from csvkit.typeinference import NULL_TIME
def normalize_datetime(dt):
if dt.microsecond == 0:
return dt
ms = dt.microsecond
if ms < 1000:
return dt.replace(microsecond=0)
elif ms > 999000:
return dt.replace(second=dt.second + 1, microsecond=0)
return dt
def xlsx2csv(f, output=None, **kwargs):
"""
Convert an Excel .xlsx file to csv.
Note: Unlike other convertor's, this one allows output columns to contain mixed data types.
Blank headers are also possible.
"""
streaming = True if output else False
if not streaming:
output = StringIO()
writer = CSVKitWriter(output)
book = load_workbook(f, use_iterators=True)
sheet = book.get_active_sheet()
for i, row in enumerate(sheet.iter_rows()):
if i == 0:
writer.writerow([c.internal_value for c in row])
continue
out_row = []
for c in row:
value = c.internal_value
if value.__class__ is datetime.datetime:
if value.time() != NULL_TIME:
value = normalize_datetime(value)
else:
value = value.date()
elif value.__class__ is float:
if value % 1 == 0:
value = int(value)
if value.__class__ in (datetime.datetime, datetime.date, datetime.time):
value = value.isoformat()
out_row.append(value)
writer.writerow(out_row)
if not streaming:
data = output.getvalue()
return data
# Return empty string when streaming
return ''
| mit | Python |
f94dc5eb7135bdf51f8ca0c71b6f6f49c2ec3fec | Update version in pip package to 0.1.2 (#23) | qiuminxu/tensorboard,shakedel/tensorboard,shakedel/tensorboard,agrubb/tensorboard,qiuminxu/tensorboard,ioeric/tensorboard,qiuminxu/tensorboard,francoisluus/tensorboard-supervise,tensorflow/tensorboard,francoisluus/tensorboard-supervise,tensorflow/tensorboard,shakedel/tensorboard,francoisluus/tensorboard-supervise,qiuminxu/tensorboard,agrubb/tensorboard,ioeric/tensorboard,ioeric/tensorboard,agrubb/tensorboard,shakedel/tensorboard,ioeric/tensorboard,tensorflow/tensorboard,agrubb/tensorboard,francoisluus/tensorboard-supervise,tensorflow/tensorboard,agrubb/tensorboard,tensorflow/tensorboard,qiuminxu/tensorboard,tensorflow/tensorboard,shakedel/tensorboard,francoisluus/tensorboard-supervise,qiuminxu/tensorboard,agrubb/tensorboard,shakedel/tensorboard,ioeric/tensorboard,francoisluus/tensorboard-supervise,ioeric/tensorboard,tensorflow/tensorboard | tensorboard/pip_package/setup.py | tensorboard/pip_package/setup.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from setuptools import find_packages, setup
# This version string is semver compatible.
_VERSION = '0.1.2'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
'six >= 1.10.0',
'protobuf >= 3.2.0',
'werkzeug >= 0.11.10',
'html5lib == 0.9999999', # identical to 1.0b8
'markdown == 2.2.0',
'bleach == 1.5.0',
'tensorflow >= 1.2.0',
]
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
CONSOLE_SCRIPTS = [
'tensorboard = tensorboard.main:main',
]
setup(
name='tensorflow-tensorboard',
version=_VERSION.replace('-', ''),
description='TensorBoard lets you watch Tensors Flow',
long_description='',
url='http://tensorflow.org/',
author='Google Inc.',
author_email='opensource@google.com',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
package_data={
'tensorboard': [
'components/index.html',
'TAG',
],
},
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow tensorboard tensor machine learning visualizer',
)
| # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from setuptools import find_packages, setup
# This version string is semver compatible.
# Backwards-incompatible changes to Python API or plugin compatibility will
# result in a change to the MAJOR version.
_VERSION = '0.1.1'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
'six >= 1.10.0',
'protobuf >= 3.2.0',
'werkzeug >= 0.11.10',
'html5lib == 0.9999999', # identical to 1.0b8
'markdown == 2.2.0',
'bleach == 1.5.0',
'tensorflow >= 1.2.0',
]
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
CONSOLE_SCRIPTS = [
'tensorboard = tensorboard.main:main',
]
setup(
name='tensorflow-tensorboard',
version=_VERSION.replace('-', ''),
description='TensorBoard lets you watch Tensors Flow',
long_description='',
url='http://tensorflow.org/',
author='Google Inc.',
author_email='opensource@google.com',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
package_data={
'tensorboard': [
'components/index.html',
'TAG',
],
},
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow tensorboard tensor machine learning visualizer',
) | apache-2.0 | Python |
64078daac7791af4061bc1de7913d8a76254a4c1 | Rewrite search to use api | Aeronautics/aero | aero/adapters/pip.py | aero/adapters/pip.py | # -*- coding: utf-8 -*-
__author__ = 'nickl-'
from string import strip
from aero.__version__ import __version__
from importlib import import_module
from .base import BaseAdapter
class Pip(BaseAdapter):
"""
Pip adapter.
"""
def search(self, query):
m = import_module('pip.commands.search')
lst = {}
for r in m.transform_hits(m.SearchCommand().search(query, 'http://pypi.python.org/pypi')):
summary = ' '.join(map(strip, r['summary'].split('\n'))).replace(' ', ' ')
lst[self.package_name(r['name'])] = 'Version: {:<14} Score:{:>4}\n{}'.format(
max(r['versions']),
r['score'],
(summary if len(summary) < 200 else summary[:190] + '...').replace(' ', ' ')
)
return lst
def install(self, query):
self.shell([
'install',
'--force-reinstall',
'--timeout', '30',
'--egg',
'--log', '~/.aero/log/pip.log',
'--download-cache', '~/.aero/cache/pip',
query
])
return {}
| # -*- coding: utf-8 -*-
__author__ = 'nickl-'
from string import strip
from aero.__version__ import __version__
from importlib import import_module
from .base import BaseAdapter
class Pip(BaseAdapter):
"""
Pip adapter.
"""
def search(self, query):
response = self.command(['search', query])[0]
lst = {}
from re import match
for key, line in [map(
strip, self.package_name(l).split(' - ', 1)
) for l in response.splitlines() if ' - ' in l]:
parts = match('(.*)[ <\\(]?(http.*?)?[ >\\)]?(.*)', line).groups()
lst[key] = parts[0] + ' ' + parts[2] + ('\n' + parts[1] if parts[1] else '')
if lst:
return lst
return {}
def install(self, query):
self.shell([
'install',
'--force-reinstall',
'--timeout', '30',
'--egg',
'--log', '~/.aero/log/pip.log',
'--download-cache', '~/.aero/cache/pip',
query
])
return {}
| bsd-3-clause | Python |
9ff47d0702e63b93938f882f75887ddf70e06a4c | Fix User.is_active(); recentchanges_userindex uses spaces in usernames. | harej/reports_bot,harej/wikiproject_scripts | reportsbot/user.py | reportsbot/user.py | # -*- coding: utf-8 -*-
from .util import to_wiki_format
__all__ = ["User"]
class User:
"""Represents a user on a particular site.
Users can be part of multiple WikiProjects.
"""
def __init__(self, bot, name):
self._bot = bot
self._name = to_wiki_format(name)
@property
def name(self):
"""Return the user's name."""
return self._name
def is_active(self):
"""Return whether or not the user meets a basic threshold of activity.
Threshold is at least one edit in the past 30 days.
"""
query = """SELECT COUNT(*)
FROM recentchanges_userindex
WHERE rc_user_text = %s AND
TIMESTAMP(rc_timestamp) > DATE_SUB(NOW(), INTERVAL 30 DAY)"""
with self._bot.wikidb as cursor:
cursor.execute(query, (self._name),)
count = cursor.fetchall()[0][0]
return count > 0
| # -*- coding: utf-8 -*-
from .util import to_sql_format, to_wiki_format
__all__ = ["User"]
class User:
"""Represents a user on a particular site.
Users can be part of multiple WikiProjects.
"""
def __init__(self, bot, name):
self._bot = bot
self._name = to_wiki_format(name)
@property
def name(self):
"""Return the user's name."""
return self._name
def is_active(self):
"""Return whether or not the user meets a basic threshold of activity.
Threshold is at least one edit in the past 30 days.
"""
query = """SELECT COUNT(*)
FROM recentchanges_userindex
WHERE rc_user_text = %s AND
TIMESTAMP(rc_timestamp) > DATE_SUB(NOW(), INTERVAL 30 DAY)"""
with self._bot.wikidb as cursor:
cursor.execute(query, (to_sql_format(self._name),))
count = cursor.fetchall()[0][0]
return count > 0
| mit | Python |
9a9eb4333285d2582655ead70801c5ab7ed7d43f | add dummy local settings when not found | matiaslindgren/not-enough-bogo,matiaslindgren/not-enough-bogo,matiaslindgren/not-enough-bogo | bogo/bogoapp/settings.py | bogo/bogoapp/settings.py | try:
from bogoapp import local_settings
except ImportError:
local_settings = object()
LOGO = getattr(local_settings, "LOGO", None)
SQL_DRIVER_LIB = getattr(local_settings, "SQL_DRIVER_LIB", None)
DATABASE_PATH = getattr(local_settings, "DATABASE_PATH", None)
SQL_SCHEMA_PATH = getattr(local_settings, "SQL_SCHEMA_PATH", None)
ODBC_DNS = f"Driver={SQL_DRIVER_LIB};Database={DATABASE_PATH}"
TEMPLATE_PATH="templates"
RANDOM_SEED=1
MINIMUM_SEQUENCE_STOP = 5
MAXIMUM_SEQUENCE_STOP = 15
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
TIMESPEC = "milliseconds"
| try:
from bogoapp import local_settings
except ImportError:
pass # probably running ci tests
LOGO = getattr(local_settings, "LOGO", None)
SQL_DRIVER_LIB = getattr(local_settings, "SQL_DRIVER_LIB", None)
DATABASE_PATH = getattr(local_settings, "DATABASE_PATH", None)
SQL_SCHEMA_PATH = getattr(local_settings, "SQL_SCHEMA_PATH", None)
ODBC_DNS = f"Driver={SQL_DRIVER_LIB};Database={DATABASE_PATH}"
TEMPLATE_PATH="templates"
RANDOM_SEED=1
MINIMUM_SEQUENCE_STOP = 5
MAXIMUM_SEQUENCE_STOP = 15
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
TIMESPEC = "milliseconds"
| mit | Python |
7e2bd3fd525a3461ef2077ab7bc2e46a3121351f | Cut default RSS caching from 10 to 5 min | theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs | bulbs/feeds/views.py | bulbs/feeds/views.py | from django.template import RequestContext
from django.utils.timezone import now
from django.views.decorators.cache import cache_control
from bulbs.content.views import ContentListView
from bulbs.special_coverage.models import SpecialCoverage
class RSSView(ContentListView):
"""Really simply, this syndicates Content."""
template_name = "feeds/rss.xml"
paginate_by = 20
feed_title = "RSS Feed"
utm_params = "utm_medium=RSS&utm_campaign=feeds"
def get_template_names(self):
return ["feeds/rss.xml", "feeds/_rss.xml"]
@cache_control(max_age=300)
def get(self, request, *args, **kwargs):
response = super(RSSView, self).get(request, *args, **kwargs)
response["Content-Type"] = "application/rss+xml"
return response
def get_context_data(self, *args, **kwargs):
context = super(RSSView, self).get_context_data(*args, **kwargs)
context["full"] = (self.request.GET.get("full", "false").lower() == "true")
context["images"] = (self.request.GET.get("images", "false").lower() == "true")
context["build_date"] = now()
context["title"] = self.feed_title
context["feed_url"] = self.request.build_absolute_uri()
context["search_url"] = self.request.build_absolute_uri(
u"/search?%s" % self.request.META["QUERY_STRING"])
# OK, so this is kinda brutal. Stay with me here.
for content in context["page_obj"].object_list:
feed_path = content.get_absolute_url() + "?" + self.utm_params
content.feed_url = self.request.build_absolute_uri(feed_path)
return RequestContext(self.request, context)
class SpecialCoverageRSSView(RSSView):
"""Really simply, this syndicates Content."""
feed_title = "Special Coverage RSS Feed"
def get_queryset(self):
sc_id = self.request.GET.get("special_coverage_id")
sc_slug = self.request.GET.get("special_coverage_slug")
if sc_id:
sc = SpecialCoverage.objects.get(id=sc_id)
elif sc_slug:
sc = SpecialCoverage.objects.get(slug=sc_slug)
else:
return self.model.objects.none()
return sc.get_content()[:self.paginate_by]
| from django.template import RequestContext
from django.utils.timezone import now
from django.views.decorators.cache import cache_control
from bulbs.content.views import ContentListView
from bulbs.special_coverage.models import SpecialCoverage
class RSSView(ContentListView):
"""Really simply, this syndicates Content."""
template_name = "feeds/rss.xml"
paginate_by = 20
feed_title = "RSS Feed"
utm_params = "utm_medium=RSS&utm_campaign=feeds"
def get_template_names(self):
return ["feeds/rss.xml", "feeds/_rss.xml"]
@cache_control(max_age=600)
def get(self, request, *args, **kwargs):
response = super(RSSView, self).get(request, *args, **kwargs)
response["Content-Type"] = "application/rss+xml"
return response
def get_context_data(self, *args, **kwargs):
context = super(RSSView, self).get_context_data(*args, **kwargs)
context["full"] = (self.request.GET.get("full", "false").lower() == "true")
context["images"] = (self.request.GET.get("images", "false").lower() == "true")
context["build_date"] = now()
context["title"] = self.feed_title
context["feed_url"] = self.request.build_absolute_uri()
context["search_url"] = self.request.build_absolute_uri(
u"/search?%s" % self.request.META["QUERY_STRING"])
# OK, so this is kinda brutal. Stay with me here.
for content in context["page_obj"].object_list:
feed_path = content.get_absolute_url() + "?" + self.utm_params
content.feed_url = self.request.build_absolute_uri(feed_path)
return RequestContext(self.request, context)
class SpecialCoverageRSSView(RSSView):
"""Really simply, this syndicates Content."""
feed_title = "Special Coverage RSS Feed"
def get_queryset(self):
sc_id = self.request.GET.get("special_coverage_id")
sc_slug = self.request.GET.get("special_coverage_slug")
if sc_id:
sc = SpecialCoverage.objects.get(id=sc_id)
elif sc_slug:
sc = SpecialCoverage.objects.get(slug=sc_slug)
else:
return self.model.objects.none()
return sc.get_content()[:self.paginate_by]
| mit | Python |
b700cf323ff21d8c943df93b277ce7b957f36452 | Refactor example script. | michaelconnor00/gbdxtools,michaelconnor00/gbdxtools | examples/launch_cloud_harness.py | examples/launch_cloud_harness.py | import json
import os
from osgeo import gdal
from gbdxtools import Interface
from gbdx_task_template import TaskTemplate, Task, InputPort, OutputPort
gbdx = Interface()
# data = "s3://receiving-dgcs-tdgplatform-com/054813633050_01_003" # WV02 Image over San Francisco
# aoptask = gbdx.Task("AOP_Strip_Processor", data=data, enable_acomp=True, enable_pansharpen=True)
class RasterMetaApp(TaskTemplate):
task = Task("RasterMetaTask")
task.input_raster = InputPort(value="/Users/michaelconnor/demo_image")
task.output_meta = OutputPort(value="/Users/michaelconnor")
def invoke(self):
images = self.task.input_raster.list_files(extensions=[".tiff", ".tif"])
# Magic Starts here
for img in images:
header = "META FOR %s\n\n" % os.path.basename(img)
gtif = gdal.Open(img)
self.task.output_meta.write('metadata.txt', header)
self.task.output_meta.write('metadata.txt', json.dumps(gtif.GetMetadata(), indent=2))
# Create a cloud-harness
ch_task = gbdx.Task(RasterMetaApp)
# NOTE: This will override the value in the class definition above.
ch_task.inputs.input_raster = 's3://test-tdgplatform-com/data/envi_src/sm_tiff' # Overwrite the value from
workflow = gbdx.Workflow([ch_task])
# workflow = gbdx.Workflow([aoptask, ch_task])
workflow.savedata(ch_task.outputs.output_meta, location='CH_OUT')
# workflow.savedata(aoptask.outputs.data, location='AOP_OUT')
# NOTE: Always required because the source bundle must be uploaded.
ch_task.upload_input_ports()
print(workflow.generate_workflow_description())
print(workflow.execute())
| import json
import os
# from osgeo import gdal
from gbdxtools import Interface
from task_template import TaskTemplate, Task, InputPort, OutputPort
gbdx = Interface()
# data = "s3://receiving-dgcs-tdgplatform-com/054813633050_01_003" # WV02 Image over San Francisco
# aoptask = gbdx.Task("AOP_Strip_Processor", data=data, enable_acomp=True, enable_pansharpen=True)
class RasterMetaApp(TaskTemplate):
task = Task("RasterMetaTask")
task.input_raster = InputPort(value="/Users/michaelconnor/demo_image")
task.output_meta = OutputPort(value="/Users/michaelconnor")
def invoke(self):
images = self.task.input_raster.list_files(extensions=[".tif", ".TIF"])
# Magic Starts here
for img in images:
header = "META FOR %s\n\n" % os.path.basename(img)
# gtif = gdal.Open(img)
self.task.output_meta.write('metadata.txt', header)
# self.task.output_meta.write('metadata.txt', json.dumps(gtif.GetMetadata(), indent=2))
ch_task = gbdx.Task(RasterMetaApp)
workflow = gbdx.Workflow([ch_task])
# workflow = gbdx.Workflow([aoptask, ch_task])
workflow.savedata(ch_task.outputs.output_meta, location='CH_OUT')
# workflow.savedata(aoptask.outputs.data, location='AOP_OUT')
workflow.execute()
| mit | Python |
10701a83c8867225e94b710324e0ed21eeb945a1 | remove debug code | x89/botologist,x89/botologist,moopie/botologist,anlutro/botologist | plugins/pcdb.py | plugins/pcdb.py | import requests
import botologist.plugin
class PCDB:
comments = []
@classmethod
def get_random(cls):
if not cls.comments:
response = requests.get('http://pcdb.lutro.me',
headers={'accept': 'application/json'})
cls.comments = [c['body'] for c in response.json()['comments']]
return cls.comments.pop()
class PcdbPlugin(botologist.plugin.Plugin):
"""porn comments database plugin."""
@botologist.plugin.command('pcdb', alias='random')
def get_pcdb_random(self, cmd):
return PCDB.get_random().replace('\n', ' ')
| import requests
import botologist.plugin
class PCDB:
comments = []
@classmethod
def get_random(cls):
if not cls.comments:
print('requesting')
response = requests.get('http://pcdb.lutro.me',
headers={'accept': 'application/json'})
cls.comments = [c['body'] for c in response.json()['comments']]
return cls.comments.pop()
class PcdbPlugin(botologist.plugin.Plugin):
"""porn comments database plugin."""
@botologist.plugin.command('pcdb', alias='random')
def get_pcdb_random(self, cmd):
return PCDB.get_random().replace('\n', ' ')
| mit | Python |
1ec10ebd7b2e1bdf4a7af46c45094ed57e8e6d77 | Update __main__.py | rogersprates/word2vec-financial-sentiment | pmi/__main__.py | pmi/__main__.py | from pmi import pmi_daily, pmi_weekly, pmi_odds_daily, pmi_odds_weekly
from pmi_odds import pmi_odds_weekly
def main():
pmi_daily()
pmi_weekly()
pmi_odds_daily()
pmi_odds_weekly()
if __name__ == "__main__":
main()
| from pmi import pmi_weekly
from pmi_odds import pmi_odds_weekly
def main():
pmi_weekly()
pmi_daily()
pmi_odds_daily()
pmi_odds_weekly()
if __name__ == "__main__":
main()
| mit | Python |
f8f3f6427a83871e60871e8fe1d048e29c7c97fc | fix hamilton bugs | opencivicdata/scrapers-ca,opencivicdata/scrapers-ca | ca_on_hamilton/people.py | ca_on_hamilton/people.py | # coding: utf-8
from pupa.scrape import Scraper
from utils import lxmlize, CanadianLegislator as Legislator
import re
COUNCIL_PAGE = 'http://www.hamilton.ca/YourElectedOfficials/WardCouncillors/'
class HamiltonPersonScraper(Scraper):
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
council_node = page.xpath('//span[@id="RadEditorPlaceHolderControl0"]')[0]
councillor_urls = council_node.xpath('./table[2]//p/a[not(img)]/@href')
for councillor_url in councillor_urls:
yield councillor_data(councillor_url)
yield mayor_data(council_node.xpath('./table[1]/tbody/tr')[0])
def councillor_data(url):
page = lxmlize(url)
name, district = page.xpath('string(//span[@id="_hpcPageTitle"])').split('-')
info_node = page.xpath('//span[@id="RadEditorPlaceHolderControl0"]')[0]
# strip the word 'Phone:' from the beginning of the number
phone = info_node.xpath('string(.//b[1])')[7:]
email = info_node.xpath('string(.//a)')
photo_url = info_node.xpath('string(.//img/@src)')
p = Legislator(name=name, post_id=district, role='Councillor')
p.add_source(COUNCIL_PAGE)
p.add_source(url)
p.add_contact('email', email, None)
if phone:
p.add_contact('voice', phone, 'legislature')
if photo_url:
p.image = photo_url
return p
def mayor_data(node):
name = node.xpath('string(.//strong)')[6:]
phone = node.xpath('string(.//p[2]/text()[1])')
email = node.xpath('string((.//a)[1])')
photo_url = node.xpath('string(.//img/@src)')
p = Legislator(name=name, post_id='Hamilton', role='Mayor')
p.add_source(COUNCIL_PAGE)
p.add_contact('email', email, None)
p.add_contact('voice', phone, 'legislature')
p.image = photo_url
return p
| # coding: utf-8
from pupa.scrape import Scraper
from utils import lxmlize, CanadianLegislator as Legislator
import re
COUNCIL_PAGE = 'http://www.hamilton.ca/YourElectedOfficials/WardCouncillors/'
class HamiltonPersonScraper(Scraper):
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
council_node = page.xpath('//span[@id="RadEditorPlaceHolderControl0"]')[0]
councillor_urls = council_node.xpath('./table[2]//a/@href')
for councillor_url in councillor_urls:
yield councillor_data(councillor_url)
yield mayor_data(council_node.xpath('./table[1]/tbody/tr')[0])
def councillor_data(url):
page = lxmlize(url)
name, district = page.xpath('string(//span[@id="_hpcPageTitle"])').split('-')
info_node = page.xpath('//span[@id="RadEditorPlaceHolderControl0"]')[0]
# strip the word 'Phone:' from the beginning of the number
phone = info_node.xpath('string(.//b[1])')[7:]
email = info_node.xpath('string(.//a)')
photo_url = info_node.xpath('string(.//img/@src)')
p = Legislator(name=name, post_id=district, role='Councillor')
p.add_source(COUNCIL_PAGE)
p.add_source(url)
p.add_contact('email', email, None)
p.add_contact('voice', phone, 'legislature')
p.image = photo_url
return p
def mayor_data(node):
name = node.xpath('string(.//strong)')[6:]
phone = node.xpath('string(.//p[2]/text()[1])')
email = node.xpath('string((.//a)[1])')
photo_url = node.xpath('string(.//img/@src)')
p = Legislator(name=name, post_id='Hamilton', role='Mayor')
p.add_source(COUNCIL_PAGE)
p.add_contact('email', email, None)
p.add_contact('voice', phone, 'legislature')
p.image = photo_url
return p
| mit | Python |
f5e8bfaf5c4f7a2131fbe0ffd0f8d14a316b907e | Add exception for cli command line to run interactively. | schae234/Camoco,schae234/Camoco | camoco/Exceptions.py | camoco/Exceptions.py | # Exception abstract class
class CamocoError(Exception):
pass
class CamocoExistsError(CamocoError):
'''
You tried to create a camoco object which already exists
under the same name,type combination.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = (
'You are trying to create a Camoco based object'
'That already exists' + message.format(*args)
)
class CamocoGeneNameError(CamocoError):
'''
Gene names must be beautiful snowflakes.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = 'Gene names must be unique:' + message.format(args)
class CamocoAccessionNameError(CamocoError):
'''
Accession names must be Unique.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = (
'Accession names must be unique:' + message.format(args)
)
class CamocoZeroWindowError(CamocoError):
def __init__(self,expr,message,*args):
self.expr = expr
self.message = (
'Operation requiring window, but window is 0:' + \
message.format(args)
)
class CamocoInteractive(CamocoError):
def __init__(self,expr=None,message='',*args):
self.expr = expr
self.message = 'Camoco interactive ipython session.'
| # Exception abstract class
class CamocoError(Exception):
pass
class CamocoExistsError(CamocoError):
'''
You tried to create a camoco object which already exists
under the same name,type combination.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = (
'You are trying to create a Camoco based object'
'That already exists' + message.format(*args)
)
class CamocoGeneNameError(CamocoError):
'''
Gene names must be beautiful snowflakes.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = 'Gene names must be unique:' + message.format(args)
class CamocoAccessionNameError(CamocoError):
'''
Accession names must be Unique.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = (
'Accession names must be unique:' + message.format(args)
)
class CamocoZeroWindowError(CamocoError):
def __init__(self,expr,message,*args):
self.expr = expr
self.message = (
'Operation requiring window, but window is 0:' + \
message.format(args)
)
| mit | Python |
23cb20a82cd725df104d39fa12b1a71d4b54d459 | Write audit log to stdout if LOG_FOLDER unconfigured | uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal | portal/audit.py | portal/audit.py | """AUDIT module
Maintain a log exclusively used for recording auditable events.
Any action deemed an auditable event should make a call to
auditable_event()
Audit data is also persisted in the database *audit* table.
"""
import os
import sys
import logging
from flask import current_app
from .database import db
from .models.audit import Audit
# special log level for auditable events
# initial goal was to isolate all auditable events to one log handler
# revised to be a level less than ERROR, so auditable events aren't
# considered errors for error mail handling (see SMTPHandler)
AUDIT = (logging.WARN + logging.ERROR) / 2
def auditable_event(message, user_id, subject_id, context="other"):
"""Record auditable event
message: The message to record, i.e. "log in via facebook"
user_id: The authenticated user id performing the action
subject_id: The user id upon which the action was performed
"""
text = "performed by {0} on {1}: {2}: {3}".format(user_id, subject_id, context, message)
current_app.logger.log(AUDIT, text)
with db.session.no_autoflush:
db.session.add(Audit(
user_id=user_id, subject_id=subject_id, comment=message,
context=context))
db.session.commit()
def configure_audit_log(app): # pragma: no cover
"""Configure audit logging.
The audit log is only active when running as a service (not during
database updates, etc.) It should only received auditable events
and never be rotated out.
"""
# Skip config when running tests or maintenance
if ('manage.py' in sys.argv and 'runserver' not in sys.argv) or\
app.testing:
return
logging.addLevelName('AUDIT', AUDIT)
audit_log_handler = logging.StreamHandler(sys.stdout)
if app.config.get('LOG_FOLDER', None):
audit_log = os.path.join(app.config['LOG_FOLDER'], 'audit.log')
audit_log_handler = logging.FileHandler(audit_log, delay=True)
audit_log_handler.setLevel(AUDIT)
audit_log_handler.setFormatter(
logging.Formatter('%(asctime)s: %(message)s'))
app.logger.addHandler(audit_log_handler)
| """AUDIT module
Maintain a log exclusively used for recording auditable events.
Any action deemed an auditable event should make a call to
auditable_event()
Audit data is also persisted in the database *audit* table.
"""
import os
import sys
import logging
from flask import current_app
from .database import db
from .models.audit import Audit
# special log level for auditable events
# initial goal was to isolate all auditable events to one log handler
# revised to be a level less than ERROR, so auditable events aren't
# considered errors for error mail handling (see SMTPHandler)
AUDIT = (logging.WARN + logging.ERROR) / 2
def auditable_event(message, user_id, subject_id, context="other"):
"""Record auditable event
message: The message to record, i.e. "log in via facebook"
user_id: The authenticated user id performing the action
subject_id: The user id upon which the action was performed
"""
text = "performed by {0} on {1}: {2}: {3}".format(user_id, subject_id, context, message)
current_app.logger.log(AUDIT, text)
with db.session.no_autoflush:
db.session.add(Audit(
user_id=user_id, subject_id=subject_id, comment=message,
context=context))
db.session.commit()
def configure_audit_log(app): # pragma: no cover
"""Configure audit logging.
The audit log is only active when running as a service (not during
database updates, etc.) It should only received auditable events
and never be rotated out.
"""
# Skip config when running tests or maintenance
if ('manage.py' in sys.argv and 'runserver' not in sys.argv) or\
app.testing:
return
logging.addLevelName('AUDIT', AUDIT)
audit_log = os.path.join(app.config['LOG_FOLDER'], 'audit.log')
audit_log_handler = logging.FileHandler(audit_log, delay=True)
audit_log_handler.setLevel(AUDIT)
audit_log_handler.setFormatter(
logging.Formatter('%(asctime)s: %(message)s'))
app.logger.addHandler(audit_log_handler)
| bsd-3-clause | Python |
9ec2eb47260f463750f8c810c04b41a04aa7db4b | add forgotten log handler | simbuerg/benchbuild,simbuerg/benchbuild | pprof/driver.py | pprof/driver.py | #!/usr/bin/env python
# encoding: utf-8
from plumbum import cli
from pprof import *
from sys import stderr
import logging
class PollyProfiling(cli.Application):
""" Frontend for running/building the pprof study framework """
VERSION = "0.9.6"
@cli.switch(["-v", "--verbose"], help="Enable verbose output")
def verbose(self):
LOG = logging.getLogger()
LOG.addHandler(logging.StreamHandler(stderr))
LOG.setLevel(logging.DEBUG)
def main(self, *args):
if args:
print "Unknown command %r" % (args[0],)
return 1
if not self.nested_command:
print "No command given"
return 1
def main(*args):
return PollyProfiling.run(*args)
| #!/usr/bin/env python
# encoding: utf-8
from plumbum import cli
from pprof import *
import logging
class PollyProfiling(cli.Application):
""" Frontend for running/building the pprof study framework """
VERSION = "0.9.6"
@cli.switch(["-v", "--verbose"], help="Enable verbose output")
def verbose(self):
LOG = logging.getLogger()
LOG.setLevel(logging.DEBUG)
def main(self, *args):
if args:
print "Unknown command %r" % (args[0],)
return 1
if not self.nested_command:
print "No command given"
return 1
def main(*args):
return PollyProfiling.run(*args)
| mit | Python |
718e8c5ebf24e77bb55d34c18d676ff2fd1aedcf | Bump version to 1.0.2 | portfoliome/cenaming | cenaming/_version.py | cenaming/_version.py | version_info = (1, 0, 2)
__version__ = '.'.join(map(str, version_info))
| version_info = (1, 0, 1)
__version__ = '.'.join(map(str, version_info))
| mit | Python |
ff58d9ae580bf759fe1f2d87f304e6d178aa6f9d | Bump @graknlabs_behaviour | graknlabs/grakn,lolski/grakn,lolski/grakn,lolski/grakn,graknlabs/grakn,graknlabs/grakn,graknlabs/grakn,lolski/grakn | dependencies/graknlabs/repositories.bzl | dependencies/graknlabs/repositories.bzl | #
# Copyright (C) 2021 Grakn Labs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def graknlabs_dependencies():
git_repository(
name = "graknlabs_dependencies",
remote = "https://github.com/graknlabs/dependencies",
commit = "7b11ab5a195b82b69d05ff20e3b985f364367520", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_dependencies
)
def graknlabs_common():
git_repository(
name = "graknlabs_common",
remote = "https://github.com/graknlabs/common",
tag = "2.0.0-alpha-6", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_common
)
def graknlabs_graql():
git_repository(
name = "graknlabs_graql",
remote = "https://github.com/graknlabs/graql",
commit = "a92e7ae110343e56ecc4a0baf033bab2fd905660", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_graql
)
def graknlabs_protocol():
git_repository(
name = "graknlabs_protocol",
remote = "https://github.com/graknlabs/protocol",
commit = "c3b95a43a7b72a178dc740590d5a3b6c4ccf9cdc", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_protocol
)
def graknlabs_grabl_tracing():
git_repository(
name = "graknlabs_grabl_tracing",
remote = "https://github.com/graknlabs/grabl-tracing",
tag = "2.0.0-alpha" # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_grabl_tracing
)
def graknlabs_behaviour():
git_repository(
name = "graknlabs_behaviour",
remote = "https://github.com/graknlabs/behaviour",
commit = "b5234deedf3443f316be5f23b2154457f87b21e1", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_behaviour
)
| #
# Copyright (C) 2021 Grakn Labs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def graknlabs_dependencies():
git_repository(
name = "graknlabs_dependencies",
remote = "https://github.com/graknlabs/dependencies",
commit = "7b11ab5a195b82b69d05ff20e3b985f364367520", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_dependencies
)
def graknlabs_common():
git_repository(
name = "graknlabs_common",
remote = "https://github.com/graknlabs/common",
tag = "2.0.0-alpha-6", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_common
)
def graknlabs_graql():
git_repository(
name = "graknlabs_graql",
remote = "https://github.com/graknlabs/graql",
commit = "a92e7ae110343e56ecc4a0baf033bab2fd905660", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_graql
)
def graknlabs_protocol():
git_repository(
name = "graknlabs_protocol",
remote = "https://github.com/graknlabs/protocol",
commit = "c3b95a43a7b72a178dc740590d5a3b6c4ccf9cdc", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_protocol
)
def graknlabs_grabl_tracing():
git_repository(
name = "graknlabs_grabl_tracing",
remote = "https://github.com/graknlabs/grabl-tracing",
tag = "2.0.0-alpha" # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_grabl_tracing
)
def graknlabs_behaviour():
git_repository(
name = "graknlabs_behaviour",
remote = "https://github.com/graknlabs/behaviour",
commit = "f7d9732cc21110cbc30d91ece2e771b954c45fb0", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_behaviour
)
| agpl-3.0 | Python |
ec68a7e723494fdf008f0a7b3159fe7c8eb49636 | fix pipeline name for travis | sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana | test/sphinxext/test_sphinxext.py | test/sphinxext/test_sphinxext.py | import tempfile
import os
from sequana.sphinxext import snakemakerule
from sequana.sphinxext import sequana_pipeline
from sphinx.application import Sphinx
data = """import sys, os
import sphinx
sys.path.insert(0, os.path.abspath('sphinxext'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
"sequana.sphinxext.snakemakerule",
"sequana.sphinxext.sequana_pipeline"
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = "sequana"
copyright = "2016"
version = '1.0'
release = "1.0"
exclude_patterns = []
add_module_names = False
pygments_style = 'sphinx'
intersphinx_mapping = {}
"""
def test_sequana_pipeline():
res = sequana_pipeline.get_rule_doc("quality_control")
with tempfile.TemporaryDirectory() as tmpdir:
# Create the conf and index in tmpdir
with open(tmpdir+os.sep+"index.rst", "w") as fh:
fh.write(".. snakemakerule:: dag\n")
with open(tmpdir+os.sep+"conf.py", "w") as fh:
print(fh.name)
fh.write(data)
app = Sphinx(tmpdir, tmpdir, tmpdir+"/temp", tmpdir, "html")
app.build()
def test_doc():
res = snakemakerule.get_rule_doc("dag")
res = snakemakerule.get_rule_doc("fastqc_dynamic")
try:
res = snakemakerule.get_rule_doc("dummy")
assert False
except FileNotFoundError:
assert True
except:
assert False
with tempfile.TemporaryDirectory() as tmpdir:
# Create the conf and index in tmpdir
with open(tmpdir+os.sep+"index.rst", "w") as fh:
fh.write(".. snakemakerule:: dag\n")
with open(tmpdir+os.sep+"conf.py", "w") as fh:
print(fh.name)
fh.write(data)
# srcdir, confdir, outdir, doctreedir, buildername
app = Sphinx(tmpdir, tmpdir, tmpdir+"/temp", tmpdir, "html")
app.build()
| import tempfile
import os
from sequana.sphinxext import snakemakerule
from sequana.sphinxext import sequana_pipeline
from sphinx.application import Sphinx
data = """import sys, os
import sphinx
sys.path.insert(0, os.path.abspath('sphinxext'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
"sequana.sphinxext.snakemakerule",
"sequana.sphinxext.sequana_pipeline"
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = "sequana"
copyright = "2016"
version = '1.0'
release = "1.0"
exclude_patterns = []
add_module_names = False
pygments_style = 'sphinx'
intersphinx_mapping = {}
"""
def test_sequana_pipeline():
res = sequana_pipeline.get_rule_doc("variant_calling")
with tempfile.TemporaryDirectory() as tmpdir:
# Create the conf and index in tmpdir
with open(tmpdir+os.sep+"index.rst", "w") as fh:
fh.write(".. snakemakerule:: dag\n")
with open(tmpdir+os.sep+"conf.py", "w") as fh:
print(fh.name)
fh.write(data)
app = Sphinx(tmpdir, tmpdir, tmpdir+"/temp", tmpdir, "html")
app.build()
def test_doc():
res = snakemakerule.get_rule_doc("dag")
res = snakemakerule.get_rule_doc("fastqc_dynamic")
try:
res = snakemakerule.get_rule_doc("dummy")
assert False
except FileNotFoundError:
assert True
except:
assert False
with tempfile.TemporaryDirectory() as tmpdir:
# Create the conf and index in tmpdir
with open(tmpdir+os.sep+"index.rst", "w") as fh:
fh.write(".. snakemakerule:: dag\n")
with open(tmpdir+os.sep+"conf.py", "w") as fh:
print(fh.name)
fh.write(data)
# srcdir, confdir, outdir, doctreedir, buildername
app = Sphinx(tmpdir, tmpdir, tmpdir+"/temp", tmpdir, "html")
app.build()
| bsd-3-clause | Python |
ca6a9c84db1607f27a15ea98e53d00e52c75e7ce | Bump version to 0.10 | calve/cerberus,dkellner/cerberus,nicolaiarocci/cerberus,funkyfuture/cerberus,pyeve/cerberus,calve/cerberus,dkellner/cerberus,pyeve/cerberus,funkyfuture/cerberus,nicolaiarocci/cerberus,MacHu-GWU/cerberus-sanhe,MacHu-GWU/cerberus-sanhe | cerberus/__init__.py | cerberus/__init__.py | """
Extensible validation for Python dictionaries.
:copyright: 2012-2015 by Nicola Iarocci.
:license: ISC, see LICENSE for more details.
Full documentation is available at http://cerberus.readthedocs.org/
"""
from .cerberus import Validator, ValidationError, SchemaError
__version__ = "0.10"
__all__ = [
Validator.__name__,
ValidationError.__name__,
SchemaError.__name__
]
| """
Extensible validation for Python dictionaries.
:copyright: 2012-2015 by Nicola Iarocci.
:license: ISC, see LICENSE for more details.
Full documentation is available at http://cerberus.readthedocs.org/
"""
from .cerberus import Validator, ValidationError, SchemaError
__version__ = "0.9.1"
__all__ = [
Validator.__name__,
ValidationError.__name__,
SchemaError.__name__
]
| isc | Python |
130c37035b6eae9cc9172faecdf828509d9fd80e | Bump version | numirias/firefed | firefed/__version__.py | firefed/__version__.py | __title__ = 'firefed'
__version__ = '0.1.14'
__description__ = 'A tool for Firefox profile analysis, data extraction, \
forensics and hardening'
__url__ = 'https://github.com/numirias/firefed'
__author__ = 'numirias'
__author_email__ = 'numirias@users.noreply.github.com'
__license__ = 'MIT'
__keywords__ = 'firefox security privacy forensics'
| __title__ = 'firefed'
__version__ = '0.1.13'
__description__ = 'A tool for Firefox profile analysis, data extraction, \
forensics and hardening'
__url__ = 'https://github.com/numirias/firefed'
__author__ = 'numirias'
__author_email__ = 'numirias@users.noreply.github.com'
__license__ = 'MIT'
__keywords__ = 'firefox security privacy forensics'
| mit | Python |
390ffbea26155832ca8baae3e2a5176a43d936f3 | Update emoji set. | Fillll/reddit2telegram,nsiregar/reddit2telegram,Fillll/reddit2telegram,nsiregar/reddit2telegram | channels/ch_boobs/app.py | channels/ch_boobs/app.py | #encoding:utf-8
import time
from utils import get_url
subreddit = 'boobs'
t_channel = '-1001052042617'
def send_post(submission, r2t):
what, url, ext = get_url(submission)
title = submission.title
link = submission.short_link
text = '{}\n{}'.format(title, link)
if what in ('gif', 'img'):
r2t.send_text('🔞🔞🔞🔞🔞🔞')
time.sleep(10)
success = r2t.send_gif_img(what, url, ext, text)
if success is False:
return False
for i in range(4):
time.sleep(3.14159 / 2.718281828)
r2t.send_text('🔞🔞🔞🔞🔞🔞')
time.sleep(3.14159 / 2.718281828)
r2t.send_text('👆👆👆👆👆👆')
return True
else:
return False
| #encoding:utf-8
import time
from utils import get_url
subreddit = 'boobs'
t_channel = '-1001052042617'
def send_post(submission, r2t):
what, url, ext = get_url(submission)
title = submission.title
link = submission.short_link
text = '{}\n{}'.format(title, link)
if what in ('gif', 'img'):
r2t.send_text('🔞🔞🔞🔞🔞🔞')
time.sleep(10)
return r2t.send_gif_img(what, url, ext, text)
else:
return False
| mit | Python |
73d12ed0e09c948e0a92cc2f4e14ff61326f38b2 | Fix MySQL tests | rdmorganiser/rdmo,rdmorganiser/rdmo,DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,rdmorganiser/rdmo | testing/config/settings/mysql.py | testing/config/settings/mysql.py | DEBUG = True
SECRET_KEY = 'this is a not very secret key'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'rdmo',
'USER': 'root',
'PASSWORD': '',
'TEST': {
'CHARSET': 'utf8',
'COLLATION': 'utf8_general_ci',
}
}
}
| DEBUG = True
SECRET_KEY = 'this is a not very secret key'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'rdmo',
'USER': 'root',
'PASSWORD': ''
}
}
| apache-2.0 | Python |
96ecd1b71320b2e2da82dd06dee8f68e5101b8fc | add simple history module | IMIO/django-fixmystreet,IMIO/django-fixmystreet,IMIO/django-fixmystreet,IMIO/django-fixmystreet | django_fixmystreet/fixmystreet/admin.py | django_fixmystreet/fixmystreet/admin.py | from django.contrib import admin
from django import forms
from transmeta import canonical_fieldname
from simple_history.admin import SimpleHistoryAdmin
from django_fixmystreet.fixmystreet.models import ReportCategory, Report, ReportMainCategoryClass, FaqEntry, OrganisationEntity
class ReportCategoryClassAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(ReportMainCategoryClass,ReportCategoryClassAdmin)
class ReportCategoryAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(ReportCategory, ReportCategoryAdmin)
class FaqEntryAdmin(admin.ModelAdmin):
list_display = ('q', 'order')
# admin.site.register(FaqEntry, FaqEntryAdmin)
class ReportAdmin(SimpleHistoryAdmin):
list_display = ('responsible_entity', 'created_at', 'updated_at', 'category', 'secondary_category')
ordering = ['created_at']
exclude = ['photo']
admin.site.register(Report,ReportAdmin)
class OrganisationEntityAdmin(SimpleHistoryAdmin):
list_display = ('name',)
admin.site.register(OrganisationEntity, OrganisationEntityAdmin)
| from django.contrib import admin
from django import forms
from transmeta import canonical_fieldname
from simple_history.admin import SimpleHistoryAdmin
from django_fixmystreet.fixmystreet.models import ReportCategory, Report, ReportMainCategoryClass, FaqEntry, OrganisationEntity
class ReportCategoryClassAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(ReportMainCategoryClass,ReportCategoryClassAdmin)
class ReportCategoryAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(ReportCategory, ReportCategoryAdmin)
class FaqEntryAdmin(admin.ModelAdmin):
list_display = ('q', 'order')
# admin.site.register(FaqEntry, FaqEntryAdmin)
class ReportAdmin(admin.ModelAdmin):
list_display = ('responsible_entity', 'created_at', 'updated_at', 'category', 'secondary_category')
ordering = ['created_at']
exclude = ['photo']
admin.site.register(Report,ReportAdmin)
class OrganisationEntityAdmin(SimpleHistoryAdmin):
list_display = ('name',)
admin.site.register(OrganisationEntity, OrganisationEntityAdmin)
| agpl-3.0 | Python |
137c0f94f51d9f2f8cc84344b79ca8ad2c85b547 | Allow calling optimize from fontcrunch directly | googlefonts/fontcrunch,googlefonts/quadopt,googlefonts/quadopt,googlefonts/fontcrunch,moyogo/fontcrunch,moyogo/fontcrunch | fontcrunch/__init__.py | fontcrunch/__init__.py | from .fontcrunch import optimize
| apache-2.0 | Python | |
5232ba997d65cb2bdc52f36096f4be1216c48a4f | Fix UT | RoboCupULaval/StrategyIA,RoboCupULaval/StrategyIA | tests/STA/Tactic/go_kick_test.py | tests/STA/Tactic/go_kick_test.py |
import unittest
from time import sleep
from Util import Pose, Position
from ai.STA.Tactic.go_kick import GoKick, COMMAND_DELAY
from tests.STA.perfect_sim import PerfectSim
A_ROBOT_ID = 1
START_POSE = Pose.from_values(300, 0, 0)
START_BALL_POSITION = START_POSE.position + Position(100, 0)
GOAL_POSE = Pose.from_values(700, 0, 0)
MAX_TICK_UNTIL_KICK = 7
class TestGoKick(unittest.TestCase):
def setUp(self):
self.sim = PerfectSim(GoKick)
def test_givenARobotAndABall_thenKickTheBall(self):
self.sim.add_robot(A_ROBOT_ID, START_POSE)
self.sim.move_ball(START_BALL_POSITION)
self.sim.start(A_ROBOT_ID, target=GOAL_POSE)
self.sim.tick() # initialize
for _ in range(0, MAX_TICK_UNTIL_KICK):
self.sim.tick()
if self.sim.has_kick():
assert self.sim.has_hit_ball
return
assert False, "Reach max number of tick and no kick"
|
import unittest
from time import sleep
from Util import Pose, Position
from ai.STA.Tactic.go_kick import GoKick, COMMAND_DELAY
from tests.STA.perfect_sim import PerfectSim
A_ROBOT_ID = 1
START_POSE = Pose.from_values(300, 0, 0)
START_BALL_POSITION = START_POSE.position + Position(100, 0)
GOAL_POSE = Pose.from_values(700, 0, 0)
MAX_TICK_UNTIL_KICK = 7
class TestGoKick(unittest.TestCase):
def setUp(self):
self.sim = PerfectSim(GoKick)
def test_givenARobotAndABall_thenKickTheBall(self):
self.sim.add_robot(A_ROBOT_ID, START_POSE)
self.sim.move_ball(START_BALL_POSITION)
self.sim.start(A_ROBOT_ID, target=GOAL_POSE)
sleep(COMMAND_DELAY)
self.sim.tick() # Charge
for _ in range(0, MAX_TICK_UNTIL_KICK):
self.sim.tick()
if self.sim.has_kick():
assert self.sim.has_charge_kick
assert self.sim.has_hit_ball
return
assert False, "Reach max number of tick and no kick"
| mit | Python |
a0ca9f5394792592658686b2729d1ce6b1497e1d | Add webpack_args argument | mnieber/dodo_commands | extra/webdev_commands/webpack.py | extra/webdev_commands/webpack.py | """Run the webpack command."""
import argparse
from dodo_commands.defaults.commands.standard_commands import DodoCommand
class Command(DodoCommand): # noqa
decorators = ['docker']
def add_arguments_imp(self, parser): # noqa
parser.add_argument(
'--args',
dest="webpack_args",
required=False,
default=[],
nargs=argparse.REMAINDER
)
def handle_imp(self, webpack_args, **kwargs): # noqa
self.runcmd(
["webpack", "--watch-stdin", webpack_args],
cwd=self.get_config("/WEBPACK/webpack_dir")
)
| """Run the webpack command."""
from dodo_commands.defaults.commands.standard_commands import DodoCommand
class Command(DodoCommand): # noqa
decorators = ['docker']
def handle_imp(self, **kwargs): # noqa
self.runcmd(
["webpack", "--watch-stdin"],
cwd=self.get_config("/WEBPACK/webpack_dir")
)
| mit | Python |
82d2c597234b57c05d1dae26920522355101b0df | return list of shares from list_shares function | shish/python-clearskies | clearskies/client.py | clearskies/client.py | from clearskies.unixjsonsocket import UnixJsonSocket
import xdg.BaseDirectory
import os
class ProtocolException(Exception):
pass
class ClearSkies(object):
def __init__(self):
data_dir = xdg.BaseDirectory.save_data_path("clearskies")
control_path = os.path.join(data_dir, "control")
self.socket = UnixJsonSocket(control_path)
def connect(self):
self.socket.connect()
try:
handshake = self.socket.recv()
if handshake["protocol"] != 1:
raise ValueError("Only protocol V1 is currently supported")
except ValueError as e:
raise ProtocolException("Error in CS handshake: %s" % e)
def _cmd(self, cmd):
try:
self.socket.send(cmd)
return self.socket.recv()
except ValueError as e:
raise ProtocolException("Error decoding command: %s" % e)
def stop(self):
return self._cmd({
"type": "stop",
})
def pause(self):
return self._cmd({
"type": "pause",
})
def resume(self):
return self._cmd({
"type": "resume",
})
def status(self):
return self._cmd({
"type": "status",
})
def create_share(self, path):
return self._cmd({
"type": "create_share",
"path": path,
})
def list_shares(self):
return self._cmd({
"type": "list_shares",
})["shares"]
def create_access_code(self, path, mode):
return self._cmd({
"type": "create_access_code",
"path": path,
"mode": mode,
})
def add_share(self, code, path):
return self._cmd({
"type": "add_share",
"code": code,
"path": path,
})
def remove_share(self, path):
return self._cmd({
"type": "remove_share",
"path": path,
})
| from clearskies.unixjsonsocket import UnixJsonSocket
import xdg.BaseDirectory
import os
class ProtocolException(Exception):
pass
class ClearSkies(object):
def __init__(self):
data_dir = xdg.BaseDirectory.save_data_path("clearskies")
control_path = os.path.join(data_dir, "control")
self.socket = UnixJsonSocket(control_path)
def connect(self):
self.socket.connect()
try:
handshake = self.socket.recv()
if handshake["protocol"] != 1:
raise ValueError("Only protocol V1 is currently supported")
except ValueError as e:
raise ProtocolException("Error in CS handshake: %s" % e)
def _cmd(self, cmd):
try:
self.socket.send(cmd)
return self.socket.recv()
except ValueError as e:
raise ProtocolException("Error decoding command: %s" % e)
def stop(self):
return self._cmd({
"type": "stop",
})
def pause(self):
return self._cmd({
"type": "pause",
})
def resume(self):
return self._cmd({
"type": "resume",
})
def status(self):
return self._cmd({
"type": "status",
})
def create_share(self, path):
return self._cmd({
"type": "create_share",
"path": path,
})
def list_shares(self):
return self._cmd({
"type": "list_shares",
})
def create_access_code(self, path, mode):
return self._cmd({
"type": "create_access_code",
"path": path,
"mode": mode,
})
def add_share(self, code, path):
return self._cmd({
"type": "add_share",
"code": code,
"path": path,
})
def remove_share(self, path):
return self._cmd({
"type": "remove_share",
"path": path,
})
| mit | Python |
964a7be5f03a201305f5ba3165a2dc1257311cf4 | exclude c-extensions on Windows. | shinmorino/quant_sandbox,shinmorino/quant_sandbox,shinmorino/quant_sandbox | python/setup.py | python/setup.py | from setuptools import setup, find_packages, Extension
import numpy
def new_ext(name, srcs) :
ext_includes = [numpy.get_include(), '../libsqaod/include', '../libsqaod', '../libsqaod/eigen']
ext = Extension(name, srcs,
include_dirs=ext_includes,
extra_compile_args = ['-std=c++11'],
extra_link_args = ['-L../libsqaod/.libs', '-lsqaod'])
return ext
ext_modules = []
if platform.system() != 'Windows' :
ext_modules.append(new_ext('sqaod.cpu.cpu_dg_bf_searcher', ['sqaod/cpu/src/cpu_dg_bf_searcher.cpp']))
ext_modules.append(new_ext('sqaod.cpu.cpu_dg_annealer', ['sqaod/cpu/src/cpu_dg_annealer.cpp']))
ext_modules.append(new_ext('sqaod.cpu.cpu_bg_bf_searcher', ['sqaod/cpu/src/cpu_bg_bf_searcher.cpp']))
ext_modules.append(new_ext('sqaod.cpu.cpu_bg_annealer', ['sqaod/cpu/src/cpu_bg_annealer.cpp']))
ext_modules.append(new_ext('sqaod.cpu.cpu_formulas', ['sqaod/cpu/src/cpu_formulas.cpp']))
setup(
name='sqaod',
version='0.0.dev0',
packages=find_packages(exclude=['tests']),
install_requires=['numpy>=1.11'],
author='Shinya Morino',
author_email="shin.morino_at_gmail.com",
description='A collection of solvers for Quantum annealer.',
license='BSD 3-Clause License',
keywords='quantum annealing solver',
ext_modules=ext_modules,
)
| from setuptools import setup, find_packages, Extension
import numpy
def new_ext(name, srcs) :
ext_includes = [numpy.get_include(), '../libsqaod/include', '../libsqaod', '../libsqaod/eigen']
ext = Extension(name, srcs,
include_dirs=ext_includes,
extra_compile_args = ['-std=c++11'],
extra_link_args = ['-L../libsqaod/.libs', '-lsqaod'])
return ext
ext_modules = []
ext_modules.append(new_ext('sqaod.cpu.cpu_dg_bf_searcher', ['sqaod/cpu/src/cpu_dg_bf_searcher.cpp']))
ext_modules.append(new_ext('sqaod.cpu.cpu_dg_annealer', ['sqaod/cpu/src/cpu_dg_annealer.cpp']))
ext_modules.append(new_ext('sqaod.cpu.cpu_bg_bf_searcher', ['sqaod/cpu/src/cpu_bg_bf_searcher.cpp']))
ext_modules.append(new_ext('sqaod.cpu.cpu_bg_annealer', ['sqaod/cpu/src/cpu_bg_annealer.cpp']))
ext_modules.append(new_ext('sqaod.cpu.cpu_formulas', ['sqaod/cpu/src/cpu_formulas.cpp']))
setup(
name='sqaod',
version='0.0.dev0',
packages=find_packages(exclude=['tests']),
install_requires=['numpy>=1.11'],
author='Shinya Morino',
author_email="shin.morino_at_gmail.com",
description='A collection of solvers for Quantum annealer.',
license='BSD 3-Clause License',
keywords='quantum annealing solver',
ext_modules=ext_modules,
)
| bsd-3-clause | Python |
daa22f92807ea593374ce07de7b57650c559cc8f | Add all requirements to setup.py | machenslab/dPCA,machenslab/dPCA | python/setup.py | python/setup.py | from setuptools import setup
from os.path import join, dirname
try:
# obtain long description from README
readme_path = join(dirname(__file__), "README.rst")
with open(readme_path, encoding="utf-8") as f:
README = f.read()
# remove raw html not supported by PyPI
README = "\n".join(README.split("\n")[3:])
except IOError:
README = ""
DESCRIPTION = "Implements Demixed Principal Components Analysis"
NAME = "dPCA"
AUTHOR = "Machens Lab"
AUTHOR_EMAIL = "wieland.brendel@uni-tuebingen.de"
MAINTAINER = "Wieland Brendel"
MAINTAINER_EMAIL = "wieland.brendel@uni-tuebingen.de"
DOWNLOAD_URL = 'https://github.com/machenslab/dPCA/'
LICENSE = 'MIT'
VERSION = '1.0.5'
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=README,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=DOWNLOAD_URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['dPCA'],
package_data={},
install_requires=['numpy', 'scipy', 'sklearn', 'numexpr', 'numba']
)
| from setuptools import setup
from os.path import join, dirname
try:
# obtain long description from README
readme_path = join(dirname(__file__), "README.rst")
with open(readme_path, encoding="utf-8") as f:
README = f.read()
# remove raw html not supported by PyPI
README = "\n".join(README.split("\n")[3:])
except IOError:
README = ""
DESCRIPTION = "Implements Demixed Principal Components Analysis"
NAME = "dPCA"
AUTHOR = "Machens Lab"
AUTHOR_EMAIL = "wieland.brendel@uni-tuebingen.de"
MAINTAINER = "Wieland Brendel"
MAINTAINER_EMAIL = "wieland.brendel@uni-tuebingen.de"
DOWNLOAD_URL = 'https://github.com/machenslab/dPCA/'
LICENSE = 'MIT'
VERSION = '1.0.5'
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=README,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=DOWNLOAD_URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['dPCA'],
package_data={},
install_requires=['sklearn', 'numexpr', 'numba']
)
| mit | Python |
f81f60257c024c8a515aeba34137801e448c42ae | Correct comment | captainwhippet/glowshow | client/glowclient.py | client/glowclient.py | #
# Filename: glowclient.py
# Author: @captainwhippet
# Created: 7 March 2014
#
# Send a command to the server running the glowserver
import pickle, socket
def send_command(host, pattern):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(host)
f = s.makefile('b')
pickle.dump(pattern, f)
f.flush()
f.close()
s.close()
| #
# Filename: glowthread.py
# Author: @captainwhippet
# Created: 7 March 2014
#
# Send a command to the server running the glowserver
import pickle, socket
def send_command(host, pattern):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(host)
f = s.makefile('b')
pickle.dump(pattern, f)
f.flush()
f.close()
s.close()
| mit | Python |
632c95816ba77fcfd636d598346528b780efb4c5 | Disable output buffering. | bamos/python-scripts,bamos/python-scripts | python2.7/mt.py | python2.7/mt.py | #!/usr/bin/env python2
import argparse
import multitail
import sys
# http://stackoverflow.com/questions/107705
class Unbuffered(object):
def __init__(self, stream): self.stream = stream
def write(self, data): self.stream.write(data); self.stream.flush()
def __getattr__(self, attr): return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
parser = argparse.ArgumentParser()
parser.add_argument('files', type=str, nargs='+')
args = parser.parse_args()
for fn, line in multitail.multitail(args.files):
print("{}: {}".format(fn,line.strip()))
| #!/usr/bin/env python2
import argparse
import multitail
parser = argparse.ArgumentParser()
parser.add_argument('files', type=str, nargs='+')
args = parser.parse_args()
for fn, line in multitail.multitail(args.files):
print("{}: {}".format(fn,line.strip()))
| mit | Python |
404eef133bf6f8eeff1d4a40851db07fa8e15546 | Revert "Update version.py" | RasaHQ/rasa_nlu,RasaHQ/rasa_nlu,RasaHQ/rasa_nlu | rasa/version.py | rasa/version.py | __version__ = "1.3.0"
| __version__ = "1.3.1"
| apache-2.0 | Python |
e61917e18efa3340df1c68ff057732a8a9f77d2b | Remove unused code from hyperion/__init__.py | hyperion-rt/hyperion,hyperion-rt/hyperion,astrofrog/hyperion,bluescarni/hyperion,bluescarni/hyperion,hyperion-rt/hyperion,astrofrog/hyperion | hyperion/__init__.py | hyperion/__init__.py | from __future__ import print_function, division
import os
import glob
import hashlib
from .version import __version__
# Set up the test function
_test_runner = None
def _get_test_runner():
from .testing.helper import TestRunner
return TestRunner(__path__[0])
def test(package=None, test_path=None, args=None, plugins=None,
verbose=False, pastebin=None, generate_reference=False,
bit_level_tests=False):
"""
Run Hyperion tests using py.test. A proper set of arguments is
constructed and passed to `pytest.main`.
Parameters
----------
package : str, optional
The name of a specific package to test, e.g. 'model' or
'densities'. If nothing is specified all default Hyperion tests
are run.
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
args : str, optional
Additional arguments to be passed to `pytest.main` in the `args`
keyword argument.
plugins : list, optional
Plugins to be passed to `pytest.main` in the `plugins` keyword
argument.
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying `-v` in `args`.
pastebin : {'failed','all',None}, optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
generate_reference : str
Generate reference results for bit-level tests
bit_level_tests : bool
Run bit-level tests. These are time-consuming tests that check the
exact validity of the output, but they are disabled by default.
See Also
--------
pytest.main : py.test function wrapped by `run_tests`.
"""
test_runner = _get_test_runner()
return test_runner.run_tests(
package=package, test_path=test_path, args=args,
plugins=plugins, verbose=verbose, pastebin=pastebin,
generate_reference=generate_reference,
bit_level_tests=bit_level_tests)
| from __future__ import print_function, division
import os
import glob
import hashlib
import h5py
from .version import __version__
data_dir = __path__[0] + '/data/'
datafiles = {}
for datafile in glob.glob(os.path.join(data_dir, '*.hdf5')):
f = h5py.File(datafile)
hash = f.attrs['asciimd5'].decode('utf-8')
datafiles[hash] = os.path.abspath(datafile)
f.close()
def get_HDF5_datafile(filename):
h = hashlib.md5(file(filename,'rb').read()).hexdigest()
if h in datafiles:
return datafiles[h]
else:
raise Exception("File does not exist")
# Set up the test function
_test_runner = None
def _get_test_runner():
from .testing.helper import TestRunner
return TestRunner(__path__[0])
def test(package=None, test_path=None, args=None, plugins=None,
verbose=False, pastebin=None, generate_reference=False,
bit_level_tests=False):
"""
Run Hyperion tests using py.test. A proper set of arguments is
constructed and passed to `pytest.main`.
Parameters
----------
package : str, optional
The name of a specific package to test, e.g. 'model' or
'densities'. If nothing is specified all default Hyperion tests
are run.
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
args : str, optional
Additional arguments to be passed to `pytest.main` in the `args`
keyword argument.
plugins : list, optional
Plugins to be passed to `pytest.main` in the `plugins` keyword
argument.
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying `-v` in `args`.
pastebin : {'failed','all',None}, optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
generate_reference : str
Generate reference results for bit-level tests
bit_level_tests : bool
Run bit-level tests. These are time-consuming tests that check the
exact validity of the output, but they are disabled by default.
See Also
--------
pytest.main : py.test function wrapped by `run_tests`.
"""
test_runner = _get_test_runner()
return test_runner.run_tests(
package=package, test_path=test_path, args=args,
plugins=plugins, verbose=verbose, pastebin=pastebin,
generate_reference=generate_reference,
bit_level_tests=bit_level_tests)
| bsd-2-clause | Python |
95d70e79fc6a55b68db824714b6fea678bd619f8 | change uuid namespace to NAMESPACE_OID | WEIZIBIN/PersonalWebsite,WEIZIBIN/PersonalWebsite,WEIZIBIN/PersonalWebsite | flask_website/xiaoice_storage.py | flask_website/xiaoice_storage.py | import uuid
work_xiaoice = {}
free_xiaoice = {}
class Xiaoice():
def __init__(self, weibo):
self._weibo = weibo
def get_weibo(self):
return self._weibo
def send_msg(self, msg):
self._weibo.post_msg_to_xiaoice(msg)
def get_msg(self):
return self._weibo.get_msg_from_xiaoice()
def is_avail(self):
if self._weibo.im_ready:
return True
def get_xiaoice_by_client_id(client_id):
if client_id:
return work_xiaoice[client_id]
def add_xiaoice(weibo):
xiaoice = Xiaoice(weibo)
free_xiaoice[weibo.username] = xiaoice
def get_all_xiaoice():
return free_xiaoice
def get_avail_xiaoice_client_id():
for username, xiaoice in free_xiaoice.items():
if xiaoice.is_avail():
client_id = uuid.uuid3(uuid.NAMESPACE_OID, username)
work_xiaoice[client_id] = free_xiaoice.pop(username)
return client_id.__str__()
| import uuid
work_xiaoice={}
free_xiaoice={}
UUID_NAMESPACE_XIAOICE = 'CHAT_XIAOICE'
class Xiaoice():
def __init__(self, weibo):
self._weibo = weibo
def get_weibo(self):
return self._weibo
def send_msg(self, msg):
self._weibo.post_msg_to_xiaoice(msg)
def get_msg(self):
return self._weibo.get_msg_from_xiaoice()
def is_avail(self):
if self._weibo.im_ready:
return True
def get_xiaoice_by_client_id(client_id):
if client_id:
return work_xiaoice[client_id]
def add_xiaoice(weibo):
xiaoice = Xiaoice(weibo)
free_xiaoice[weibo.username] = xiaoice
def get_all_xiaoice():
return free_xiaoice
def get_avail_xiaoice_client_id():
for username, xiaoice in free_xiaoice.items():
if xiaoice.is_avail():
client_id = uuid.uuid3(UUID_NAMESPACE_XIAOICE, username)
work_xiaoice[client_id] = free_xiaoice.pop(username)
return client_id.__str__() | mit | Python |
2270dc5f5e59a24e566a2b71c01b30495524aa4c | fix the same damn thing again | flumotion-mirror/flumotion,Flumotion/flumotion,Flumotion/flumotion,timvideos/flumotion,Flumotion/flumotion,flumotion-mirror/flumotion,timvideos/flumotion,timvideos/flumotion,Flumotion/flumotion | flumotion/test/test_pygobject.py | flumotion/test/test_pygobject.py | # vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005 Fluendo, S.L. (www.fluendo.com). All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
from twisted.trial import unittest
import pygtk
pygtk.require('2.0')
import gobject
from flumotion.common.pygobject import gsignal, gproperty
from flumotion.common.pygobject import with_construct_properties
class TestPyGObject(unittest.TestCase):
def testPyGObject(self):
class Foo(gobject.GObject):
gsignal('hcf', bool, str)
gproperty(bool, 'burning', 'If the object is burning',
False, construct=True)
def __init__(xself):
gobject.GObject.__init__(xself)
xself.connect('hcf', xself.on_hcf)
__init__ = with_construct_properties (__init__)
def on_hcf(xself, again_self, x, y):
self.assert_(isinstance(x, bool))
self.assert_(isinstance(y, str))
xself.set_property('burning', True)
gobject.type_register(Foo)
o = Foo()
self.assertEquals(False, o.get_property('burning'))
o.emit('hcf', False, 'foogoober')
self.assertEquals(True, o.get_property('burning'))
| # vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005 Fluendo, S.L. (www.fluendo.com). All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
from twisted.trial import unittest
import pygtk
pygtk.require('2.0')
import gobject
from flumotion.common.pygobject import (gsignal, gproperty,
with_construct_properties)
class TestPyGObject(unittest.TestCase):
def testPyGObject(self):
class Foo(gobject.GObject):
gsignal('hcf', bool, str)
gproperty(bool, 'burning', 'If the object is burning',
False, construct=True)
def __init__(xself):
gobject.GObject.__init__(xself)
xself.connect('hcf', xself.on_hcf)
__init__ = with_construct_properties (__init__)
def on_hcf(xself, again_self, x, y):
self.assert_(isinstance(x, bool))
self.assert_(isinstance(y, str))
xself.set_property('burning', True)
gobject.type_register(Foo)
o = Foo()
self.assertEquals(False, o.get_property('burning'))
o.emit('hcf', False, 'foogoober')
self.assertEquals(True, o.get_property('burning'))
| lgpl-2.1 | Python |
3146b2a567788ea3775acc1b1b3b6810a5b247e7 | Add max_error_len to Github module. | drwahl/i3pystatus,claria/i3pystatus,fmarchenko/i3pystatus,ismaelpuerto/i3pystatus,eBrnd/i3pystatus,teto/i3pystatus,ismaelpuerto/i3pystatus,opatut/i3pystatus,paulollivier/i3pystatus,Arvedui/i3pystatus,juliushaertl/i3pystatus,enkore/i3pystatus,ncoop/i3pystatus,onkelpit/i3pystatus,Elder-of-Ozone/i3pystatus,fmarchenko/i3pystatus,asmikhailov/i3pystatus,MaicoTimmerman/i3pystatus,schroeji/i3pystatus,Elder-of-Ozone/i3pystatus,paulollivier/i3pystatus,asmikhailov/i3pystatus,richese/i3pystatus,plumps/i3pystatus,drwahl/i3pystatus,m45t3r/i3pystatus,eBrnd/i3pystatus,m45t3r/i3pystatus,richese/i3pystatus,teto/i3pystatus,facetoe/i3pystatus,facetoe/i3pystatus,enkore/i3pystatus,opatut/i3pystatus,schroeji/i3pystatus,ncoop/i3pystatus,yang-ling/i3pystatus,juliushaertl/i3pystatus,Arvedui/i3pystatus,MaicoTimmerman/i3pystatus,onkelpit/i3pystatus,claria/i3pystatus,yang-ling/i3pystatus,plumps/i3pystatus | i3pystatus/github.py | i3pystatus/github.py | from i3pystatus import IntervalModule
import requests
import json
from i3pystatus.core import ConfigError
from i3pystatus.core.util import user_open, internet, require
class Github(IntervalModule):
"""
Check Github for pending notifications.
Requires `requests`
Formatters:
* `{unread}` - contains the value of unread_marker when there are pending notifications
* `{unread_count}` - number of unread notifications, empty if 0
"""
max_error_len = 50
unread_marker = "●"
unread = ''
color = '#78EAF2'
username = ''
password = ''
format = '{unread}'
interval = 600
keyring_backend = None
on_leftclick = 'open_github'
settings = (
('format', 'format string'),
('keyring_backend', 'alternative keyring backend for retrieving credentials'),
('unread_marker', 'sets the string that the "unread" formatter shows when there are pending notifications'),
("username", ""),
("password", ""),
("color", "")
)
def open_github(self):
user_open('https://github.com/' + self.username)
@require(internet)
def run(self):
format_values = dict(unread_count='', unread='')
response = requests.get('https://api.github.com/notifications', auth=(self.username, self.password))
data = json.loads(response.text)
# Bad credentials
if isinstance(data, dict):
err_msg = data['message']
raise ConfigError(err_msg)
unread = len(data)
if unread > 0:
format_values['unread_count'] = unread
format_values['unread'] = self.unread_marker
self.output = {
'full_text': self.format.format(**format_values),
'color': self.color
}
| from i3pystatus import IntervalModule
import requests
import json
from i3pystatus.core import ConfigError
from i3pystatus.core.util import user_open, internet, require
class Github(IntervalModule):
"""
Check Github for pending notifications.
Requires `requests`
Formatters:
* `{unread}` - contains the value of unread_marker when there are pending notifications
* `{unread_count}` - number of unread notifications, empty if 0
"""
unread_marker = "●"
unread = ''
color = '#78EAF2'
username = ''
password = ''
format = '{unread}'
interval = 600
keyring_backend = None
on_leftclick = 'open_github'
settings = (
('format', 'format string'),
('keyring_backend', 'alternative keyring backend for retrieving credentials'),
('unread_marker', 'sets the string that the "unread" formatter shows when there are pending notifications'),
("username", ""),
("password", ""),
("color", "")
)
def open_github(self):
user_open('https://github.com/' + self.username)
@require(internet)
def run(self):
format_values = dict(unread_count='', unread='')
response = requests.get('https://api.github.com/notifications', auth=(self.username, self.password))
data = json.loads(response.text)
# Bad credentials
if isinstance(data, dict):
err_msg = data['message']
if len(err_msg) > 10:
err_msg = "%s%s" % (err_msg[:10], '...')
raise ConfigError(err_msg)
unread = len(data)
if unread > 0:
format_values['unread_count'] = unread
format_values['unread'] = self.unread_marker
self.output = {
'full_text': self.format.format(**format_values),
'color': self.color
}
| mit | Python |
bb23f661d259d1d272a632624ae1ee63df39983f | Update long_words.py | creativcoder/AlgorithmicProblems,creativcoder/AlgorithmicProblems,creativcoder/AlgorithmicProblems,creativcoder/AlgorithmicProblems | codeforces/long_words.py | codeforces/long_words.py | #http://codeforces.com/problemset/problem/71/A
T = int(raw_input())
while(not T == 0):
word = str(raw_input())
if len(word)>10:
print word[0]+str(len(word[1:len(word)-1]))+word[len(word)-1]
else:
print word
T-=1
| http://codeforces.com/problemset/problem/71/A
T = int(raw_input())
while(not T == 0):
word = str(raw_input())
if len(word)>10:
print word[0]+str(len(word[1:len(word)-1]))+word[len(word)-1]
else:
print word
T-=1
| mit | Python |
5da928fd9b08aeb0028b71535413159da18393b4 | Exclude inactive comics from sets editing, effectively throwing them out of the set when saved | datagutten/comics,klette/comics,jodal/comics,datagutten/comics,jodal/comics,klette/comics,jodal/comics,datagutten/comics,jodal/comics,klette/comics,datagutten/comics | comics/sets/forms.py | comics/sets/forms.py | import datetime
from django import forms
from django.template.defaultfilters import slugify
from comics.core.models import Comic
from comics.sets.models import Set
class NewSetForm(forms.ModelForm):
class Meta:
model = Set
fields = ('name',)
def save(self, commit=True):
set = super(NewSetForm, self).save(commit=False)
set.name = slugify(set.name)
set.last_modified = datetime.datetime.now()
set.last_loaded = datetime.datetime.now()
if commit:
set.save()
return set
class EditSetForm(forms.ModelForm):
comics = forms.ModelMultipleChoiceField(
Comic.objects.filter(active=True),
required=False,
widget=forms.CheckboxSelectMultiple)
add_new_comics = forms.BooleanField(
label='Automatically add new comics to the set', required=False)
hide_empty_comics = forms.BooleanField(
label='Hide comics without matching releases from view', required=False)
class Meta:
model = Set
fields = ('comics', 'add_new_comics', 'hide_empty_comics')
def save(self, commit=True):
comics_set = super(EditSetForm, self).save(commit=False)
comics_set.last_modified = datetime.datetime.now()
if commit:
comics_set.save()
self.save_m2m()
return comics_set
| import datetime
from django import forms
from django.template.defaultfilters import slugify
from comics.core.models import Comic
from comics.sets.models import Set
class NewSetForm(forms.ModelForm):
class Meta:
model = Set
fields = ('name',)
def save(self, commit=True):
set = super(NewSetForm, self).save(commit=False)
set.name = slugify(set.name)
set.last_modified = datetime.datetime.now()
set.last_loaded = datetime.datetime.now()
if commit:
set.save()
return set
class EditSetForm(forms.ModelForm):
comics = forms.ModelMultipleChoiceField(
Comic.objects.all(),
required=False,
widget=forms.CheckboxSelectMultiple)
add_new_comics = forms.BooleanField(
label='Automatically add new comics to the set', required=False)
hide_empty_comics = forms.BooleanField(
label='Hide comics without matching releases from view', required=False)
class Meta:
model = Set
fields = ('comics', 'add_new_comics', 'hide_empty_comics')
def save(self, commit=True):
comics_set = super(EditSetForm, self).save(commit=False)
comics_set.last_modified = datetime.datetime.now()
if commit:
comics_set.save()
self.save_m2m()
return comics_set
| agpl-3.0 | Python |
a5569ac905e3eb8faac59f2c6b7ec834235fb9e5 | Write file by dumping, not 'open()' | mcinglis/render-jinja | render_jinja.py | render_jinja.py |
# Copyright 2015 Malcolm Inglis <http://minglis.id.au>
#
# render-jinja is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# render-jinja is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with render-jinja. If not, see <https://gnu.org/licenses/>.
from __future__ import print_function
import os
from argparse import ArgumentParser, ArgumentTypeError
import jinja2
class TemplateLoader(jinja2.BaseLoader):
def get_source(self, environment, template_path):
filename = os.path.join(os.getcwd(), template_path)
mtime = os.path.getmtime(filename)
def uptodate():
return os.path.getmtime(filename) == mtime
contents = read_file(template_path)
return contents, filename, uptodate
def read_file(path):
with open(path, 'r') as f:
return f.read().decode()
def arg_parser(prog):
p = ArgumentParser(prog=prog, description='Renders a Jinja template.')
p.add_argument('path', type=str,
help='path to the template file')
p.add_argument('attrs', nargs='*', type=parse_attr, metavar='k=v',
help='attributes to render the template with')
p.add_argument('-o', '--output', type=str, default='/dev/stdout',
help='the path to write the rendered template to')
return p
def parse_args(argv):
p = arg_parser(argv[0])
args = p.parse_args(argv[1:])
args.attrs = dict(args.attrs)
return args
def parse_attr(s):
if '=' not in s:
raise ArgumentTypeError('`%s` doesn\'t contain a `=`' % s)
else:
return tuple(s.split('=', 1))
def main(argv):
args = parse_args(argv)
env = jinja2.Environment(loader=TemplateLoader(),
undefined=jinja2.StrictUndefined)
env.get_template(args.path)\
.stream(argv=argv, **args.attrs)\
.dump(args.output)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
# Copyright 2015 Malcolm Inglis <http://minglis.id.au>
#
# render-jinja is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# render-jinja is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with render-jinja. If not, see <https://gnu.org/licenses/>.
from __future__ import print_function
import os
from argparse import ArgumentParser, ArgumentTypeError
import jinja2
class Loader(jinja2.BaseLoader):
def get_source(self, environment, template_path):
filename = os.path.join(os.getcwd(), template_path)
mtime = os.path.getmtime(filename)
def uptodate():
return os.path.getmtime(filename) == mtime
contents = read_file(template_path)
return contents, filename, uptodate
def read_file(path):
with open(path, 'r') as f:
return f.read().decode()
def arg_parser(prog):
p = ArgumentParser(prog=prog, description='Renders a Jinja template.')
p.add_argument('path', type=str,
help='path to the template file')
p.add_argument('attrs', nargs='*', type=parse_attr, metavar='k=v',
help='attributes to render the template with')
p.add_argument('-o', '--output', type=str, default='/dev/stdout',
help='the path to write the rendered template to')
return p
def parse_args(argv):
p = arg_parser(argv[0])
args = p.parse_args(argv[1:])
args.attrs = dict(args.attrs)
return args
def parse_attr(s):
if '=' not in s:
raise ArgumentTypeError('`%s` doesn\'t contain a `=`' % s)
else:
return tuple(s.split('=', 1))
def main(cwd, argv):
args = parse_args(argv)
with open(args.output, 'w') as f:
env = jinja2.Environment(loader=Loader(),
undefined=jinja2.StrictUndefined)
tpl = env.get_template(args.path)
for part in tpl.stream(argv=argv, **args.attrs):
print(part, file=f)
if __name__ == '__main__':
import os
import sys
main(os.getcwd(), sys.argv)
| agpl-3.0 | Python |
a317656d37b0d1aa47a4133ce6ddcebec8377c75 | add fallback import for mocking library | siddhantgoel/tornado-sqlalchemy | tests/test_tornado_sqlalchemy.py | tests/test_tornado_sqlalchemy.py | from unittest import TestCase
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from tornado_sqlalchemy import (declarative_base, MissingFactoryError,
SessionFactory, SessionMixin)
from sqlalchemy import Column, BigInteger, String
database_url = 'postgres://postgres:@localhost/tornado_sqlalchemy'
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(BigInteger, primary_key=True)
username = Column(String(255), unique=True)
def __init__(self, username):
self.username = username
class SessionFactoryTestCase(TestCase):
def setUp(self):
self.factory = SessionFactory(database_url)
Base.metadata.create_all(self.factory.engine)
def tearDown(self):
Base.metadata.drop_all(self.factory.engine)
def test_make_session(self):
session = self.factory.make_session()
self.assertTrue(session)
self.assertEqual(session.query(User).count(), 0)
session.close()
class SessionMixinTestCase(TestCase):
def setUp(self):
self.factory = SessionFactory(database_url)
Base.metadata.create_all(self.factory.engine)
def tearDown(self):
Base.metadata.drop_all(self.factory.engine)
def test_mixin_ok(self):
class GoodHandler(SessionMixin):
def __init__(h_self):
h_self.application = Mock()
h_self.application.session_factory = self.factory
def run(h_self):
with h_self.make_session() as session:
return session.query(User).count()
self.assertEqual(GoodHandler().run(), 0)
def test_mixin_no_session_factory(self):
class BadHandler(SessionMixin):
def __init__(h_self):
h_self.application = None
def run(h_self):
with h_self.make_session() as session:
return session.query(User).count()
self.assertRaises(MissingFactoryError, BadHandler().run)
| from unittest import mock, TestCase
from tornado_sqlalchemy import (declarative_base, MissingFactoryError,
SessionFactory, SessionMixin)
from sqlalchemy import Column, BigInteger, String
database_url = 'postgres://postgres:@localhost/tornado_sqlalchemy'
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(BigInteger, primary_key=True)
username = Column(String(255), unique=True)
def __init__(self, username):
self.username = username
class SessionFactoryTestCase(TestCase):
def setUp(self):
self.factory = SessionFactory(database_url)
Base.metadata.create_all(self.factory.engine)
def tearDown(self):
Base.metadata.drop_all(self.factory.engine)
def test_make_session(self):
session = self.factory.make_session()
self.assertTrue(session)
self.assertEqual(session.query(User).count(), 0)
session.close()
class SessionMixinTestCase(TestCase):
def setUp(self):
self.factory = SessionFactory(database_url)
Base.metadata.create_all(self.factory.engine)
def tearDown(self):
Base.metadata.drop_all(self.factory.engine)
def test_mixin_ok(self):
class GoodHandler(SessionMixin):
def __init__(h_self):
h_self.application = mock.Mock()
h_self.application.session_factory = self.factory
def run(h_self):
with h_self.make_session() as session:
return session.query(User).count()
self.assertEqual(GoodHandler().run(), 0)
def test_mixin_no_session_factory(self):
class BadHandler(SessionMixin):
def __init__(h_self):
h_self.application = None
def run(h_self):
with h_self.make_session() as session:
return session.query(User).count()
self.assertRaises(MissingFactoryError, BadHandler().run)
| mit | Python |
8fc35cbc732ee3f9c21e80d7290ccd905b2817cb | work around different sql math in sqlite vs. mysql | therewillbecode/ichnaea,mozilla/ichnaea,mozilla/ichnaea,mozilla/ichnaea,therewillbecode/ichnaea,therewillbecode/ichnaea,mozilla/ichnaea | ichnaea/map_stats.py | ichnaea/map_stats.py | import csv
from cStringIO import StringIO
from ichnaea.db import Measure
def map_stats_request(request):
session = request.database.session()
query = session.query(Measure.lat, Measure.lon)
rows = StringIO()
csvwriter = csv.writer(rows)
csvwriter.writerow(('lat', 'lon'))
for lat, lon in query:
csvwriter.writerow(((lat // 10000) / 1000.0, (lon // 10000) / 1000.0))
return rows.getvalue()
| import csv
from cStringIO import StringIO
from ichnaea.db import Measure
def map_stats_request(request):
session = request.database.session()
query = session.query(Measure.lat / 10000, Measure.lon / 10000)
rows = StringIO()
csvwriter = csv.writer(rows)
csvwriter.writerow(('lat', 'lon'))
for lat, lon in query:
csvwriter.writerow((lat / 1000.0, lon / 1000.0))
return rows.getvalue()
| apache-2.0 | Python |
ba374b4b4d5eadf8c3ba7be4e9ac7e544a06ff12 | Change the get_service to name rather then label (#251) | ONSdigital/ras-frontstage,ONSdigital/ras-frontstage,ONSdigital/ras-frontstage | frontstage/cloud/cloudfoundry.py | frontstage/cloud/cloudfoundry.py | import cfenv
class ONSCloudFoundry(object):
def __init__(self):
self._cf_env = cfenv.AppEnv()
@property
def detected(self):
return self._cf_env.app
@property
def redis(self):
return self._cf_env.get_service(name='rm-redis')
| import cfenv
class ONSCloudFoundry(object):
def __init__(self):
self._cf_env = cfenv.AppEnv()
@property
def detected(self):
return self._cf_env.app
@property
def redis(self):
return self._cf_env.get_service(label='elasticache-broker')
| mit | Python |
a8e6c67bda11b4eff18d68db3bd85faf4093b9a9 | make sure base and quote are in uppercase | nilgradisnik/coinprice-indicator | coin/exchanges/wazirx.py | coin/exchanges/wazirx.py | # Wazirx
# https://api.wazirx.com/api/v2/tickers
# By Rishabh Rawat <rishabhrawat.rishu@gmail.com>
from exchange import Exchange, CURRENCY
class Wazirx(Exchange):
name = "Wazirx"
code = "wazirx"
ticker = "https://api.wazirx.com/api/v2/tickers"
discovery = "https://api.wazirx.com/api/v2/market-status"
default_label = "last"
@classmethod
def _get_discovery_url(cls):
return cls.discovery
def _get_ticker_url(self):
return self.ticker + '/' + self.pair
@staticmethod
def _parse_discovery(result):
asset_pairs = []
assets = result.get('markets')
for asset in assets:
base = asset.get('baseMarket')
quote = asset.get('quoteMarket')
asset_pair = {
'pair': base+quote,
'base': base.upper(),
'quote': quote.upper(),
'name': base + ' to ' + quote,
'currency': quote.lower(),
'volumecurrency': base
}
asset_pairs.append(asset_pair)
return asset_pairs
def _parse_ticker(self, asset):
asset = asset.get('ticker')
cur = asset.get('last')
bid = asset.get('buy')
high = asset.get('high')
low = asset.get('low')
ask = asset.get('sell')
vol = asset.get('vol')
return {
'cur': cur,
'bid': bid,
'high': high,
'low': low,
'ask': ask,
'vol': vol
}
| # Wazirx
# https://api.wazirx.com/api/v2/tickers
# By Rishabh Rawat <rishabhrawat.rishu@gmail.com>
from exchange import Exchange, CURRENCY
class Wazirx(Exchange):
name = "Wazirx"
code = "wazirx"
ticker = "https://api.wazirx.com/api/v2/tickers"
discovery = "https://api.wazirx.com/api/v2/market-status"
default_label = "last"
@classmethod
def _get_discovery_url(cls):
return cls.discovery
def _get_ticker_url(self):
return self.ticker + '/' + self.pair
@staticmethod
def _parse_discovery(result):
asset_pairs = []
assets = result.get('markets')
for asset in assets:
base = asset.get('baseMarket')
quote = asset.get('quoteMarket')
asset_pair = {
'pair': base+quote,
'base': base,
'quote': quote,
'name': base + ' to ' + quote,
'currency': quote.lower(),
'volumecurrency': base
}
asset_pairs.append(asset_pair)
return asset_pairs
def _parse_ticker(self, asset):
asset = asset.get('ticker')
cur = asset.get('last')
bid = asset.get('buy')
high = asset.get('high')
low = asset.get('low')
ask = asset.get('sell')
vol = asset.get('vol')
return {
'cur': cur,
'bid': bid,
'high': high,
'low': low,
'ask': ask,
'vol': vol
}
| mit | Python |
0e50da41eb93c54ad6942d6efe6e775c317b526d | Fix JSON as abstract mapping, but support list too | spiral-project/daybed,spiral-project/daybed | daybed/schemas/json.py | daybed/schemas/json.py | from __future__ import absolute_import
import re
import json
from pyramid.i18n import TranslationString as _
import six
from colander import Sequence, null, Invalid, List, Mapping
from .base import registry, TypeField
__all__ = ['JSONField']
def parse_json(node, cstruct):
if cstruct is null:
return cstruct
try:
appstruct = cstruct
if isinstance(cstruct, six.string_types):
appstruct = json.loads(cstruct)
except ValueError as e:
raise Invalid(node, six.text_type(e), cstruct)
return appstruct
class JSONType(Mapping):
"""A simple node type for JSON content."""
def __init__(self, *args, **kwargs):
kwargs['unknown'] = 'preserve'
super(JSONType, self).__init__(*args, **kwargs)
def deserialize(self, node, cstruct=null):
appstruct = parse_json(node, cstruct)
if not isinstance(appstruct, dict):
# If JSON is not a dict, bypass ``Mapping``
return appstruct
return super(JSONType, self).deserialize(node, appstruct)
@registry.add('json')
class JSONField(TypeField):
node = JSONType
hint = _('A JSON value')
class JSONSequence(Sequence):
"""A sequence of items in JSON-like format"""
def deserialize(self, node, cstruct, **kwargs):
appstruct = parse_json(node, cstruct)
return super(JSONSequence, self).deserialize(node, appstruct, **kwargs)
class JSONList(List):
"""Pure JSON or string, as serialized JSON or comma-separated values"""
def deserialize(self, node, cstruct, **kwargs):
try:
appstruct = parse_json(node, cstruct)
except Invalid:
cstruct = re.sub(r'^\s*\[(.*)\]\s*', r'\1', cstruct)
appstruct = re.split(r'\s*,\s*', cstruct)
return super(JSONList, self).deserialize(node, appstruct, **kwargs)
| from __future__ import absolute_import
import re
import json
from pyramid.i18n import TranslationString as _
import six
from colander import Sequence, null, Invalid, List, Mapping
from .base import registry, TypeField
__all__ = ['JSONField']
def parse_json(node, cstruct):
if cstruct is null:
return cstruct
try:
appstruct = cstruct
if isinstance(cstruct, six.string_types):
appstruct = json.loads(cstruct)
except ValueError as e:
raise Invalid(node, six.text_type(e), cstruct)
return appstruct
class JSONType(Mapping):
"""A simple node type for JSON content."""
def deserialize(self, node, cstruct=null):
appstruct = parse_json(node, cstruct)
return super(JSONType, self).deserialize(node, appstruct)
@registry.add('json')
class JSONField(TypeField):
node = JSONType
hint = _('A JSON value')
class JSONSequence(Sequence):
"""A sequence of items in JSON-like format"""
def deserialize(self, node, cstruct, **kwargs):
appstruct = parse_json(node, cstruct)
return super(JSONSequence, self).deserialize(node, appstruct, **kwargs)
class JSONList(List):
"""Pure JSON or string, as serialized JSON or comma-separated values"""
def deserialize(self, node, cstruct, **kwargs):
try:
appstruct = parse_json(node, cstruct)
except Invalid:
cstruct = re.sub(r'^\s*\[(.*)\]\s*', r'\1', cstruct)
appstruct = re.split(r'\s*,\s*', cstruct)
return super(JSONList, self).deserialize(node, appstruct, **kwargs)
| bsd-3-clause | Python |
a6f0713c39ea9c86cb1bfab3918fc5a450c35d93 | Change log level on reconcile_message() logging. | closeio/nylas,PriviPK/privipk-sync-engine,Eagles2F/sync-engine,Eagles2F/sync-engine,EthanBlackburn/sync-engine,jobscore/sync-engine,EthanBlackburn/sync-engine,Eagles2F/sync-engine,nylas/sync-engine,closeio/nylas,wakermahmud/sync-engine,jobscore/sync-engine,Eagles2F/sync-engine,EthanBlackburn/sync-engine,closeio/nylas,wakermahmud/sync-engine,PriviPK/privipk-sync-engine,PriviPK/privipk-sync-engine,EthanBlackburn/sync-engine,gale320/sync-engine,wakermahmud/sync-engine,wakermahmud/sync-engine,gale320/sync-engine,jobscore/sync-engine,ErinCall/sync-engine,gale320/sync-engine,jobscore/sync-engine,ErinCall/sync-engine,wakermahmud/sync-engine,ErinCall/sync-engine,nylas/sync-engine,ErinCall/sync-engine,PriviPK/privipk-sync-engine,ErinCall/sync-engine,nylas/sync-engine,PriviPK/privipk-sync-engine,closeio/nylas,gale320/sync-engine,EthanBlackburn/sync-engine,gale320/sync-engine,Eagles2F/sync-engine,nylas/sync-engine | inbox/models/util.py | inbox/models/util.py | from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from inbox.models.message import Message
from inbox.models.thread import Thread
from inbox.models.folder import Folder, FolderItem
from inbox.util.file import Lock
from inbox.log import get_logger
log = get_logger()
def reconcile_message(db_session, log, inbox_uid, new_msg):
"""
Identify a `Sent Mail` (or corresponding) message synced from the
remote backend as one we sent and reconcile it with the message we
created and stored in the local data store at the time of sending.
Notes
-----
Our current reconciliation strategy is to keep both messages i.e.
the one we sent and the one we synced.
"""
try:
message = db_session.query(Message).filter(
Message.public_id == inbox_uid).one()
assert message.is_created
message.resolved_message = new_msg
return message
# Don't raise here because message is an Inbox created message but
# not by this client i.e. the Inbox created version is not present in the
# local data store.
except NoResultFound:
log.warning('NoResultFound for this message, even though '
'it has the inbox-sent header: {0}'.format(inbox_uid))
except MultipleResultsFound:
log.error('MultipleResultsFound when reconciling message with '
'inbox-sent header: {0}'.format(inbox_uid))
raise
# Namespace Utils
def _db_write_lockfile_name(account_id):
return "/var/lock/inbox_datastore/{0}.lock".format(account_id)
def db_write_lock(namespace_id):
""" Protect updating this namespace's Inbox datastore data.
Note that you should also use this to wrap any code that _figures
out_ what to update the datastore with, because outside the lock
you can't guarantee no one is updating the data behind your back.
"""
return Lock(_db_write_lockfile_name(namespace_id), block=True)
def threads_for_folder(namespace_id, session, folder_name):
""" NOTE: Does not work for shared folders. """
return session.query(Thread).join(FolderItem).join(Folder).filter(
Thread.namespace_id == namespace_id,
Folder.name == folder_name)
| from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from inbox.models.message import Message
from inbox.models.thread import Thread
from inbox.models.folder import Folder, FolderItem
from inbox.util.file import Lock
from inbox.log import get_logger
log = get_logger()
def reconcile_message(db_session, log, inbox_uid, new_msg):
"""
Identify a `Sent Mail` (or corresponding) message synced from the
remote backend as one we sent and reconcile it with the message we
created and stored in the local data store at the time of sending.
Notes
-----
Our current reconciliation strategy is to keep both messages i.e.
the one we sent and the one we synced.
"""
try:
message = db_session.query(Message).filter(
Message.public_id == inbox_uid).one()
assert message.is_created
message.resolved_message = new_msg
return message
# Don't raise here because message is an Inbox created message but
# not by this client i.e. the Inbox created version is not present in the
# local data store.
except NoResultFound:
log.error('NoResultFound for this message, even though '
'it has the inbox-sent header: {0}'.format(inbox_uid))
except MultipleResultsFound:
log.error('MultipleResultsFound when reconciling message with '
'inbox-sent header: {0}'.format(inbox_uid))
raise
# Namespace Utils
def _db_write_lockfile_name(account_id):
return "/var/lock/inbox_datastore/{0}.lock".format(account_id)
def db_write_lock(namespace_id):
""" Protect updating this namespace's Inbox datastore data.
Note that you should also use this to wrap any code that _figures
out_ what to update the datastore with, because outside the lock
you can't guarantee no one is updating the data behind your back.
"""
return Lock(_db_write_lockfile_name(namespace_id), block=True)
def threads_for_folder(namespace_id, session, folder_name):
""" NOTE: Does not work for shared folders. """
return session.query(Thread).join(FolderItem).join(Folder).filter(
Thread.namespace_id == namespace_id,
Folder.name == folder_name)
| agpl-3.0 | Python |
077fcc44f5b3f960b9ae8d246c9815180328e973 | Add response status and content to DiamondashApiErrors. | praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go | go/dashboard/client.py | go/dashboard/client.py | import json
import requests
from django.conf import settings
class DiamondashApiError(Exception):
"""
Raised when we something goes wrong while trying to interact with
diamondash api.
"""
def __init__(self, code, content, message):
super(DiamondashApiError, self).__init__(message)
self.code = code
self.content = content
class DiamondashApiClient(object):
def make_api_url(self, path):
return '/'.join(
p.strip('/')
for p in [settings.DIAMONDASH_API_URL, path])
def get_api_auth(self):
username = getattr(settings, 'DIAMONDASH_API_USERNAME', None)
password = getattr(settings, 'DIAMONDASH_API_PASSWORD', None)
if username is not None and password is not None:
auth = (username, password)
else:
auth = None
return auth
def raw_request(self, method, path, content=""):
resp = requests.request(
method,
data=content,
url=self.make_api_url(path),
auth=self.get_api_auth())
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
raise DiamondashApiError(
resp.status_code, resp.content,
"%s: %s" % (e, resp.content))
return {
'code': resp.status_code,
'content': resp.content
}
def request(self, method, path, data=None):
resp = self.raw_request(method, path, content=json.dumps(data))
resp_data = json.loads(resp['content'])
return resp_data['data']
def replace_dashboard(self, config):
return self.request('put', 'dashboards', config)
def get_diamondash_api():
return DiamondashApiClient()
| import json
import requests
from django.conf import settings
class DiamondashApiError(Exception):
"""
Raised when we something goes wrong while trying to interact with
diamondash api.
"""
class DiamondashApiClient(object):
def make_api_url(self, path):
return '/'.join(
p.strip('/')
for p in [settings.DIAMONDASH_API_URL, path])
def get_api_auth(self):
username = getattr(settings, 'DIAMONDASH_API_USERNAME', None)
password = getattr(settings, 'DIAMONDASH_API_PASSWORD', None)
if username is not None and password is not None:
auth = (username, password)
else:
auth = None
return auth
def raw_request(self, method, path, content=""):
resp = requests.request(
method,
data=content,
url=self.make_api_url(path),
auth=self.get_api_auth())
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
raise DiamondashApiError(
"%s: %s" % (e, resp.content))
return {
'code': resp.status_code,
'content': resp.content
}
def request(self, method, path, data=None):
resp = self.raw_request(method, path, content=json.dumps(data))
resp_data = json.loads(resp['content'])
return resp_data['data']
def replace_dashboard(self, config):
return self.request('put', 'dashboards', config)
def get_diamondash_api():
return DiamondashApiClient()
| bsd-3-clause | Python |
8f8eef878a5753fe7c6adf0871188f9adcf842a3 | Simplify DiamondashApiClient.get_api_auth() | praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go | go/dashboard/client.py | go/dashboard/client.py | import json
import requests
from django.conf import settings
class DiamondashApiError(Exception):
"""
Raised when we something goes wrong while trying to interact with
diamondash api.
"""
class DiamondashApiClient(object):
def make_api_url(self, path):
return '/'.join(
p.strip('/')
for p in [settings.DIAMONDASH_API_URL, path])
def get_api_auth(self):
username = getattr(settings, 'DIAMONDASH_API_USERNAME', None)
password = getattr(settings, 'DIAMONDASH_API_PASSWORD', None)
if username is not None and password is not None:
auth = (username, password)
else:
auth = None
return auth
def raw_request(self, method, path, content=""):
resp = requests.request(
method,
data=content,
url=self.make_api_url(path),
auth=self.get_api_auth())
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
raise DiamondashApiError(
"%s: %s" % (e, resp.content))
return {
'code': resp.status_code,
'content': resp.content
}
def request(self, method, path, data=None):
resp = self.raw_request(method, path, content=json.dumps(data))
resp_data = json.loads(resp['content'])
return resp_data['data']
def replace_dashboard(self, config):
return self.request('put', 'dashboards', config)
def get_diamondash_api():
return DiamondashApiClient()
| import json
import requests
from django.conf import settings
class DiamondashApiError(Exception):
"""
Raised when we something goes wrong while trying to interact with
diamondash api.
"""
class DiamondashApiClient(object):
def make_api_url(self, path):
return '/'.join(
p.strip('/')
for p in [settings.DIAMONDASH_API_URL, path])
def get_api_auth(self):
username = None
password = None
if hasattr(settings, 'DIAMONDASH_API_USERNAME'):
username = settings.DIAMONDASH_API_USERNAME
if hasattr(settings, 'DIAMONDASH_API_PASSWORD'):
password = settings.DIAMONDASH_API_PASSWORD
if username is not None and password is not None:
auth = (username, password)
else:
auth = None
return auth
def raw_request(self, method, path, content=""):
resp = requests.request(
method,
data=content,
url=self.make_api_url(path),
auth=self.get_api_auth())
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
raise DiamondashApiError(
"%s: %s" % (e, resp.content))
return {
'code': resp.status_code,
'content': resp.content
}
def request(self, method, path, data=None):
resp = self.raw_request(method, path, content=json.dumps(data))
resp_data = json.loads(resp['content'])
return resp_data['data']
def replace_dashboard(self, config):
return self.request('put', 'dashboards', config)
def get_diamondash_api():
return DiamondashApiClient()
| bsd-3-clause | Python |
3f34777ba55b104b5adc8fc0194e4408f2828a6a | call outmonitor in more command | melmothx/jsonbot,melmothx/jsonbot,melmothx/jsonbot | gozerlib/plugs/more.py | gozerlib/plugs/more.py | # plugs/more.py
#
#
""" access the output cache. """
from gozerlib.commands import cmnds
from gozerlib.examples import examples
def handle_morestatus(bot, ievent):
ievent.reply("%s more entries available" % len(ievent.chan.data.outcache))
cmnds.add('more-status', handle_morestatus, ['USER', 'OPER', 'GUEST'])
examples.add('more-status', "show nr op more items available", 'more-status')
def handle_more(bot, ievent):
""" pop message from the output cache. """
try:
txt = ievent.chan.data.outcache.pop(0)
except IndexError:
txt = None
if not txt:
ievent.reply('no more data available for %s' % ievent.channel)
return
ievent.chan.save()
nritems = len(ievent.chan.data.outcache)
if nritems:
txt += "<b> - %s more</b>" % str(nritems)
ievent.write(txt)
bot.outmonitor(ievent.userhost, ievent.channel, txt)
cmnds.add('more', handle_more, ['USER', 'GUEST', 'CLOUD'], threaded=True)
examples.add('more', 'return txt from output cache', 'more')
def handle_clear(bot, ievent):
""" clear messages from the output cache. """
ievent.chan.data.outcache = []
ievent.chan.save()
ievent.done()
cmnds.add('clear', handle_clear, ['USER', 'GUEST'], threaded=True)
examples.add('clear', 'clear the outputcache', 'clear')
| # plugs/more.py
#
#
""" access the output cache. """
from gozerlib.commands import cmnds
from gozerlib.examples import examples
def handle_morestatus(bot, ievent):
ievent.reply("%s more entries available" % len(ievent.chan.data.outcache))
cmnds.add('more-status', handle_morestatus, ['USER', 'OPER', 'GUEST'])
examples.add('more-status', "show nr op more items available", 'more-status')
def handle_more(bot, ievent):
""" pop message from the output cache. """
try:
txt = ievent.chan.data.outcache.pop(0)
except IndexError:
txt = None
if not txt:
ievent.reply('no more data available for %s' % ievent.channel)
return
ievent.chan.save()
nritems = len(ievent.chan.data.outcache)
if nritems:
txt += "<b> - %s more</b>" % str(nritems)
ievent.write(txt)
cmnds.add('more', handle_more, ['USER', 'GUEST', 'CLOUD'], threaded=True)
examples.add('more', 'return txt from output cache', 'more')
def handle_clear(bot, ievent):
""" clear messages from the output cache. """
ievent.chan.data.outcache = []
ievent.chan.save()
ievent.done()
cmnds.add('clear', handle_clear, ['USER', 'GUEST'], threaded=True)
examples.add('clear', 'clear the outputcache', 'clear')
| mit | Python |
85558a8fba824fdef70e26a8cc035f6c1351a450 | test improvements and refactoring | lionleaf/dwitter,lionleaf/dwitter,lionleaf/dwitter | dwitter/tests/dweet/test_dweet_views.py | dwitter/tests/dweet/test_dweet_views.py | from django.test import TransactionTestCase, Client
from django.contrib.auth.models import User
from dwitter.models import Dweet
from dwitter.dweet.views import fullscreen_dweet, blank_dweet
from django.utils import timezone
def wrap_content(content):
return 'function u(t) {\n ' + content + '\n }'
def assertResponse(self, response, **kwargs):
self.assertEqual(response.resolver_match.func, kwargs['view'])
self.assertEqual(response.status_code, kwargs['status_code'])
self.assertEqual([template.name for template in response.templates],
kwargs['templates'])
class DweetTestCase(TransactionTestCase):
def setUp(self):
self.client = Client(HTTP_HOST='dweet.example.com')
self.user = User.objects.create(username="user", password="")
self.dweet = Dweet.objects.create(id=1,
code="dweet code",
posted=timezone.now(),
_author=self.user)
def test_fullscreen_dweet_returns_404_if_dweet_does_not_exist(self):
response = self.client.get('/id/2')
assertResponse(self, response,
view=fullscreen_dweet,
status_code=404,
templates=['404_dweet.html'])
self.assertEqual(response.resolver_match.func, fullscreen_dweet)
self.assertEqual(response.status_code, 404)
self.assertEqual([template.name for template in response.templates],
['404_dweet.html'])
def test_fullscreen_dweet_returns_dweet_with_correct_code(self):
response = self.client.get('/id/1')
assertResponse(self, response,
view=fullscreen_dweet,
status_code=200,
templates=['dweet/dweet.html'])
self.assertIn(wrap_content(self.dweet.code), response.content)
def test_blank_dweet_renders_with_correct_template(self):
response = self.client.get('/blank')
assertResponse(self, response,
view=blank_dweet,
status_code=200,
templates=['dweet/dweet.html'])
self.assertIn(wrap_content(response.context['code']), response.content)
| from django.test import TransactionTestCase, Client
from django.contrib.auth.models import User
from dwitter.models import Dweet
from django.utils import timezone
class DweetTestCase(TransactionTestCase):
def setUp(self):
self.client = Client(HTTP_HOST='dweet.example.com')
self.user = User.objects.create(username="user", password="")
self.dweet = Dweet.objects.create(id=1,
code="dweet code",
posted=timezone.now(),
_author=self.user)
def test_fullscreen_dweet_returns_404_if_dweet_does_not_exist(self):
response = self.client.get('/id/2')
self.assertEqual(response.status_code, 404)
with open('dwitter/templates/404_dweet.html') as f:
self.assertEqual(response.content, f.read())
def test_fullscreen_dweet_returns_dweet_with_correct_code(self):
response = self.client.get('/id/1')
self.assertEqual(response.status_code, 200)
self.assertIn(self.dweet.code, response.content)
def test_blank_dweet_renders_with_correct_template(self):
response = self.client.get('/blank')
self.assertEqual(response.status_code, 200)
self.assertIn(response.context['code'], response.content)
| apache-2.0 | Python |
96807c87bf17406169c24d07406678fdcf5e7549 | use the default visualization model | manuelli/director,mithrandir123/director,edowson/director,rdeits/director,mitdrc/director,mithrandir123/director,rdeits/director,RussTedrake/director,mitdrc/director,manuelli/director,empireryan/director,gizatt/director,manuelli/director,patmarion/director,openhumanoids/director,mithrandir123/director,RussTedrake/director,empireryan/director,manuelli/director,gizatt/director,mitdrc/director,mitdrc/director,openhumanoids/director,edowson/director,gizatt/director,RobotLocomotion/director,RobotLocomotion/director,mithrandir123/director,RussTedrake/director,rdeits/director,RussTedrake/director,RobotLocomotion/director,mithrandir123/director,edowson/director,mitdrc/director,manuelli/director,patmarion/director,openhumanoids/director,gizatt/director,patmarion/director,edowson/director,empireryan/director,openhumanoids/director,RobotLocomotion/director,gizatt/director,empireryan/director,RobotLocomotion/director,RussTedrake/director,openhumanoids/director,rdeits/director,patmarion/director,edowson/director,rdeits/director,empireryan/director,patmarion/director | src/python/ddapp/footstepsdriverpanel.py | src/python/ddapp/footstepsdriverpanel.py | import PythonQt
from PythonQt import QtCore, QtGui
from ddapp import lcmUtils
from ddapp import applogic as app
from ddapp.utime import getUtime
from ddapp import objectmodel as om
from ddapp.timercallback import TimerCallback
import numpy as np
import math
def _makeButton(text, func):
b = QtGui.QPushButton(text)
b.connect('clicked()', func)
return b
def getDefaultRobotModel():
return om.findObjectByName('robot state model')
class FootstepsPanel(object):
def __init__(self, driver):
self.driver = driver
self.widget = QtGui.QWidget()
self.widget.setWindowTitle('Footsteps Panel')
l = QtGui.QVBoxLayout(self.widget)
l.addWidget(_makeButton('new walking goal', self.onNewWalkingGoal))
l.addWidget(QtGui.QLabel(''))
l.addWidget(_makeButton('goal steps', self.onGoalSteps))
l.addWidget(QtGui.QLabel(''))
l.addWidget(_makeButton('execute footstep plan', self.onExecute))
l.addWidget(QtGui.QLabel(''))
l.addWidget(_makeButton('stop walking', self.onStop))
l.addStretch()
def onNewWalkingGoal(self):
model = getDefaultRobotModel()
self.driver.createWalkingGoal(model)
def onGoalSteps(self):
model = getDefaultRobotModel()
self.driver.createGoalSteps(model)
def onExecute(self):
self.driver.commitFootstepPlan(self.driver.lastFootstepPlan)
def onStop(self):
self.driver.sendStopWalking()
def toggleWidgetShow():
if dock.isVisible():
dock.hide()
else:
dock.show()
def init(driver):
global panel
global dock
panel = FootstepsPanel(driver)
dock = app.addWidgetToDock(panel.widget)
dock.hide()
actionName = 'ActionFootstepPanel'
action = app.getToolBarActions()[actionName]
action.triggered.connect(toggleWidgetShow)
return panel
| import PythonQt
from PythonQt import QtCore, QtGui
from ddapp import lcmUtils
from ddapp import applogic as app
from ddapp.utime import getUtime
from ddapp import objectmodel as om
from ddapp.timercallback import TimerCallback
import numpy as np
import math
def _makeButton(text, func):
b = QtGui.QPushButton(text)
b.connect('clicked()', func)
return b
def getVisibleRobotModel():
for obj in om.objects.values():
if isinstance(obj, om.RobotModelItem) and obj.getProperty('Visible'):
return obj
class FootstepsPanel(object):
def __init__(self, driver):
self.driver = driver
self.widget = QtGui.QWidget()
self.widget.setWindowTitle('Footsteps Panel')
l = QtGui.QVBoxLayout(self.widget)
l.addWidget(_makeButton('new walking goal', self.onNewWalkingGoal))
l.addWidget(QtGui.QLabel(''))
l.addWidget(_makeButton('goal steps', self.onGoalSteps))
l.addWidget(QtGui.QLabel(''))
l.addWidget(_makeButton('execute footstep plan', self.onExecute))
l.addWidget(QtGui.QLabel(''))
l.addWidget(_makeButton('stop walking', self.onStop))
l.addStretch()
def onNewWalkingGoal(self):
model = getVisibleRobotModel()
self.driver.createWalkingGoal(model)
def onGoalSteps(self):
model = getVisibleRobotModel()
self.driver.createGoalSteps(model)
def onExecute(self):
self.driver.commitFootstepPlan(self.driver.lastFootstepPlan)
def onStop(self):
self.driver.sendStopWalking()
def toggleWidgetShow():
if dock.isVisible():
dock.hide()
else:
dock.show()
def init(driver):
global panel
global dock
panel = FootstepsPanel(driver)
dock = app.addWidgetToDock(panel.widget)
dock.hide()
actionName = 'ActionFootstepPanel'
action = app.getToolBarActions()[actionName]
action.triggered.connect(toggleWidgetShow)
return panel
| bsd-3-clause | Python |
cdefedaf5a7f8f2affbb8e691dd3eceb93e1708c | Update organizers.py | pyconca/2017-web,pyconca/2017-web,pyconca/2017-web,pyconca/2017-web | config/organizers.py | config/organizers.py |
ORGANIZERS = {
('Francis Deslauriers', 'https://twitter.com/frdeso_'),
('Myles Braithwaite', 'https://mylesb.ca/'),
('Peter McCormick', 'https://twitter.com/pdmccormick'),
('Terry Yanchynskyy', 'https://github.com/onebit0fme'),
('Ryan Wilson-Perkin', 'https://github.com/ryanwilsonperkin'),
('Annaelle Duff', 'http://annaelleduff.info/'),
('Mathieu Leduc-Hamel', 'https://twitter.com/mlhamel'),
('Anarosa Paredes', 'https://twitter.com/aarosered'),
# Add you name and url here ^ and submit a pull request
# Order does not matter, final result is sorted by name
}
|
ORGANIZERS = {
('Francis Deslauriers', 'https://twitter.com/frdeso_'),
('Myles Braithwaite', 'https://mylesb.ca/'),
('Peter McCormick', 'https://twitter.com/pdmccormick'),
('Terry Yanchynskyy', 'https://github.com/onebit0fme'),
('Ryan Wilson-Perkin', 'https://github.com/ryanwilsonperkin'),
('Annaelle Duff', 'http://annaelleduff.info/'),
('Mathieu Leduc-Hamel', 'https://twitter.com/mlhamel'),
('Anarosa Paredes', 'https://twitter.com/aarosered'),
# Add you name and url here ^ and submit a pull request
# Order does not matter, final result is sorted by name
}
| mit | Python |
e024c2ddc8a94e6c597c9d73d91dc6d0de23c19d | Update aws_security_test.py | mikhailadvani/cis-aws-automation,mikhailadvani/cis-aws-automation | aws_security_test.py | aws_security_test.py | import argparse
import boto3
import unittest
import yaml
from third_party_modules import HTMLTestRunner
from tests.iam import IamAudit
from tests.networking import NetworkingAudit
from tests.log import LoggingAudit
from tests.monitoring import MonitoringAudit
suite = unittest.TestSuite()
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str, help="Selects the config file")
parser.add_argument("-p", "--profile", type=str, help="Specifies the boto profile to choose from ~/.aws/config")
parser.add_argument("--report", default='html', help="Prints test execution on the console rather than generating a HTML report", choices=['text', 'html'])
args = parser.parse_args()
if args.profile :
boto3.setup_default_session(profile_name=args.profile)
testConfig = yaml.load(open(args.config, 'r'))
# for testCategory, levelConfig in testConfig.iteritems():
for testCategory, tests in testConfig.iteritems():
for test, enabled in tests.iteritems():
if enabled:
suite.addTest(eval(testCategory+"Audit")(test))
runner = ''
if args.report == 'text':
runner = unittest.TextTestRunner(verbosity=2)
elif args.report == 'html':
reportFile = open("test_results.html", "w")
runner = HTMLTestRunner.HTMLTestRunner(
stream=reportFile,
title='aws-security-test - Report',
verbosity=2
)
else:
print 'Invalid report type'
exit(1)
testExecution = runner.run(suite)
exit(len(testExecution.failures) + len(testExecution.errors))
| import argparse
import boto3
import unittest
import yaml
from third_party_modules import HTMLTestRunner
from tests.iam import IamAudit
from tests.networking import NetworkingAudit
from tests.log import LoggingAudit
from tests.monitoring import MonitoringAudit
suite = unittest.TestSuite()
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str, help="Selects the config file")
parser.add_argument("-p", "--profile", type=str, help="Specifies the boto profile to choose from ~/.aws/config")
parser.add_argument("--report", default='html', help="Prints test execution on the console rather than generating a HTML report")
args = parser.parse_args()
if args.profile :
boto3.setup_default_session(profile_name=args.profile)
testConfig = yaml.load(open(args.config, 'r'))
# for testCategory, levelConfig in testConfig.iteritems():
for testCategory, tests in testConfig.iteritems():
for test, enabled in tests.iteritems():
if enabled:
suite.addTest(eval(testCategory+"Audit")(test))
runner = ''
if args.report == 'text':
runner = unittest.TextTestRunner(verbosity=2)
elif args.report == 'html':
reportFile = open("test_results.html", "w")
runner = HTMLTestRunner.HTMLTestRunner(
stream=reportFile,
title='aws-security-test - Report',
verbosity=2
)
else:
print 'Invalid report type'
exit(1)
testExecution = runner.run(suite)
exit(len(testExecution.failures) + len(testExecution.errors)) | apache-2.0 | Python |
18175d41a9ea136a5c65f503367e633b0c4da6b0 | print success | PepSalehi/boulder,PepSalehi/boulder | backup/some_tests.py | backup/some_tests.py | layer = qgis.utils.iface.activeLayer()
for feature in layer.getFeatures():
numberOfLane = feature['NUMLANE']
lts = feature ['_lts12']
islLTS_1 = feature['_isl_lts1']
islLTS_2 = feature['_isl_lts2']
islLTS_3 = feature['_isl_lts3']
islLTS_4 = feature['_isl_lts4']
if lts ==1 : assert islLTS_1 > 0
elif lts ==2 :
assert islLTS_1 == 0
assert islLTS_2 > 0
elif lts ==3 :
assert islLTS_1 == islLTS_2 == 0
assert islLTS_3 > 0
elif lts ==4 :
assert islLTS_1 == islLTS_2 == islLTS_3 == 0
assert islLTS_4 > 0
else:
print "Yoohoo, no errors!" | layer = qgis.utils.iface.activeLayer()
for feature in layer.getFeatures():
numberOfLane = feature['NUMLANE']
lts = feature ['_lts12']
islLTS_1 = feature['_isl_lts1']
islLTS_2 = feature['_isl_lts2']
islLTS_3 = feature['_isl_lts3']
islLTS_4 = feature['_isl_lts4']
if lts ==1 : assert islLTS_1 > 0
elif lts ==2 :
assert islLTS_1 == 0
assert islLTS_2 > 0
elif lts ==3 :
assert islLTS_1 == islLTS_2 == 0
assert islLTS_3 > 0
elif lts ==4 :
assert islLTS_1 == islLTS_2 == islLTS_3 == 0
assert islLTS_4 > 0 | mit | Python |
a610e4a053481afc4eb26015aefcde18e9ccb119 | Add definitions and precedence for operators | lnishan/SQLGitHub | components/definition.py | components/definition.py | """Language definitions for SQLGitHub."""
EXIT_TOKENS = [u"exit", u"q"]
COMMAND_TOKENS = [u"select", u"from", u"where", u"group", u"order"]
OPERATOR_TOKENS = [u"interval",
u"binary", u"collate",
u"!",
u"-", u"~",
u"^",
u"*", u"/", u"div", u"%", u"mod",
u"-", u"+",
u"<<", u">>",
u"&",
u"|",
u"=", u"<=>", u">=", u">", u"<=", u"<", u"<>", u"!=", u"is", u"like", u"regexp", u"in",
u"between", u"case", u"when", u"then", u"else",
u"not",
u"and", u"&&",
u"xor",
u"or", u"||",
u"=", u":="]
ALL_TOKENS = EXIT_TOKENS + COMMAND_TOKENS + OPERATOR_TOKENS
PRECEDENCE = {
u"interval": 17,
u"binary": 16, u"collate": 16,
u"!": 15,
u"--": 14, u"~": 14, # -- = - (unary minus)
u"^": 13,
u"*": 12, u"/": 12, u"div": 12, u"%": 12, u"mod": 12,
u"-": 11, u"+": 11,
u"<<": 10, u">>": 10,
u"&": 9,
u"|": 8,
u"==": 7, u"<=>": 7, u">=": 7, u">": 7, u"<": 7, u"<>": 7, u"!=": 7, u"is": 7, u"like": 7, u"regexp": 7, u"in": 7, # == = = (comparison)
u"between": 6, u"case": 6, u"when": 6, u"then": 6, u"else": 6,
u"not": 5,
u"and": 4, u"&&": 4,
u"xor": 3,
u"or": 2, u"||": 2,
u"=": 1, u":=": 1,
u",": -1,
u"(": -2,
u")": -3}
| """These are the reserved tokens for SQLGitHub."""
EXIT_TOKENS = [u"exit", u"q"]
COMMAND_TOKENS = [u"select", u"from", u"where", u"group", u"order"]
ALL_TOKENS = EXIT_TOKENS + COMMAND_TOKENS
| mit | Python |
9e09087606f6bd16939a5647a152507e74ae1a37 | update numpy in utils | dssg/wikienergy,dssg/wikienergy,dssg/wikienergy,dssg/wikienergy,dssg/wikienergy | disaggregator/utils.py | disaggregator/utils.py | import appliance
import pandas as pd
import numpy as np
def concatenate_traces(traces, metadata=None, how="strict"):
'''
Given a list of appliance traces, returns a single concatenated
trace. With how="strict" option, must be sampled at the same rate and
consecutive, without overlapping datapoints.
'''
if not metadata:
metadata = traces[0].metadata
if how == "strict":
# require ordered list of consecutive, similarly sampled traces with no
# missing data.
return ApplianceTrace(concat([t.series for t in traces],metadata))
else:
raise NotImplementedError
def aggregate_traces(traces, metadata, how="strict"):
'''
Given a list of temporally aligned traces, aggregate them into a single
signal.
'''
if how == "strict":
# require that traces are exactly aligned
summed_series = traces[0].series
for trace in traces[1:]:
summed_series += trace.series
return ApplianceTrace(summed_series,metadata)
else:
return NotImplementedError
def aggregate_instances(instances,how="strict"):
'''
Given a list of temporally aligned instances, aggregate them into a single
signal.
'''
if how == "strict":
traces = [instance.traces for instance in instances]
traces = [list(t) for t in zip(*traces)] # transpose
traces = [ aggregate_traces(t,{}) for t in traces]
# TODO how to aggregate metadata?
return ApplianceInstance(traces)
else:
return NotImplementedError
def order_traces(traces):
'''
Given a set of traces, orders them chronologically and catches
overlapping traces.
'''
order = np.argsort([t.series[0] for t in traces])
new_traces = [traces[i] for i in order]
return new_traces
| import appliance
import pandas
def concatenate_traces(traces, metadata=None, how="strict"):
'''
Given a list of appliance traces, returns a single concatenated
trace. With how="strict" option, must be sampled at the same rate and
consecutive, without overlapping datapoints.
'''
if not metadata:
metadata = traces[0].metadata
if how == "strict":
# require ordered list of consecutive, similarly sampled traces with no
# missing data.
return ApplianceTrace(concat([t.series for t in traces],metadata))
else:
raise NotImplementedError
def aggregate_traces(traces, metadata, how="strict"):
'''
Given a list of temporally aligned traces, aggregate them into a single
signal.
'''
if how == "strict":
# require that traces are exactly aligned
summed_series = traces[0].series
for trace in traces[1:]:
summed_series += trace.series
return ApplianceTrace(summed_series,metadata)
else:
return NotImplementedError
def aggregate_instances(instances,how="strict"):
'''
Given a list of temporally aligned instances, aggregate them into a single
signal.
'''
if how == "strict":
traces = [instance.traces for instance in instances]
traces = [list(t) for t in zip(*traces)] # transpose
traces = [ aggregate_traces(t,{}) for t in traces]
# TODO how to aggregate metadata?
return ApplianceInstance(traces)
else:
return NotImplementedError
def order_traces(traces):
'''
Given a set of traces, orders them chronologically and catches
overlapping traces.
'''
order = np.argsort([t.series[0] for t in traces])
new_traces = [traces[i] for i in order]
return new_traces
| mit | Python |
21b4c1da359996d6e7820792e68a3304ae1490fb | Update errors.py | Beanstream-DRWP/beanstream-python | beanstream/errors.py | beanstream/errors.py | '''
Copyright 2012 Upverter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
class Error(Exception):
pass
# define a superclass BeanstreamApiException
# Author: Haggai Liu
class BeanstreamApiException(Error):
pass
class ConfigurationException(BeanstreamApiException):
pass
class ValidationException(BeanstreamApiException): #parameters to a request were incorrect
pass
class RedirectionException(BeanstreamApiException):#HTTP status code 302
pass
class InvalidRequestException(BeanstreamApiException):#HTTP status code 400,405,415
pass
class UnAuthorizedException(BeanstreamApiException):#HTTP status code 401
pass
class BusinessRuleException(BeanstreamApiException):#HTTP status code 402
pass
class ForbiddenException(BeanstreamApiException):#HTTP status code 403
pass
class InternalServerException(BeanstreamApiException):#default
pass
def getMappedException(httpstatuscode):
code=str(httpstatuscode)
if code=='302':
return RedirectionException
if code[0]=='4':
code=code[1:]
if code in ['00','05','15']:
return InvalidRequestException
if code[0]=='0':
code=code[1:]
error_dict={
'1':UnAuthorizedException,
'2':BusinessRuleException,
'3':ForbiddenException
}
if code in error_dict:
return error_dict[code]
return InternalServerException
class TestErrorGenerator(object):
def __init__(self, error):
self.exception = error
def generateError(self):
return self.exception
| '''
Copyright 2012 Upverter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
class Error(Exception):
pass
# define a superclass BeanstreamApiException
# Author: Haggai Liu
class BeanstreamApiException(Error):
pass
class ConfigurationException(BeanstreamApiException):
pass
class ValidationException(BeanstreamApiException): #parameters to a request were incorrect
pass
class RedirectionException(BeanstreamApiException):#HTTP status code 302
pass
class InvalidRequestException(BeanstreamApiException):#HTTP status code 400,405,415
pass
class UnAuthorizedException(BeanstreamApiException):#HTTP status code 401
pass
class BusinessRuleException(BeanstreamApiException):#HTTP status code 402
pass
class ForbiddenException(BeanstreamApiException):#HTTP status code 403
pass
class InternalServerException(BeanstreamApiException):#default
pass
def getMappedException(httpstatuscode):
code=str(httpstatuscode)
if code=='302':
return RedirectionError
if code[0]=='4':
code=code[1:]
if code in ['00','05','15']:
return InvalidRequestException
if code[0]=='0':
code=code[1:]
error_dict={
'1':UnAuthorizedException,
'2':BusinessRuleException,
'3':ForbiddenException
}
if code in error_dict:
return error_dict[code]
return InternalServerException
class TestErrorGenerator(object):
def __init__(self, error):
self.exception = error
def generateError(self):
return self.exception
| apache-2.0 | Python |
a0e133ae5d769c03b121b15de63d23ef3eae975e | Allow unicode in rewrite items | lengtche/beets,tima/beets,Andypsamp/CODjunit,shamangeorge/beets,beetbox/beets,andremiller/beets,PierreRust/beets,jackwilsdon/beets,YetAnotherNerd/beets,ruippeixotog/beets,krig/beets,LordSputnik/beets,randybias/beets,bj-yinyan/beets,ttsda/beets,Dishwishy/beets,untitaker/beets,Freso/beets,marcuskrahl/beets,LordSputnik/beets,multikatt/beets,jackwilsdon/beets,pkess/beets,m-urban/beets,Andypsamp/CODfinalJUNIT,SusannaMaria/beets,jmwatte/beets,dfc/beets,parapente/beets,madmouser1/beets,kelvinhammond/beets,tima/beets,SusannaMaria/beets,PierreRust/beets,shamangeorge/beets,xsteadfastx/beets,swt30/beets,jmwatte/beets,YetAnotherNerd/beets,sadatay/beets,LordSputnik/beets,shanemikel/beets,jcoady9/beets,swt30/beets,lengtche/beets,Andypsamp/CODjunit,multikatt/beets,jcoady9/beets,Andypsamp/CODjunit,Andypsamp/CODjunit,jcoady9/beets,xsteadfastx/beets,multikatt/beets,lightwang1/beets,diego-plan9/beets,madmouser1/beets,ibmibmibm/beets,MyTunesFreeMusic/privacy-policy,moodboom/beets,ruippeixotog/beets,MyTunesFreeMusic/privacy-policy,randybias/beets,arabenjamin/beets,diego-plan9/beets,asteven/beets,sadatay/beets,bj-yinyan/beets,madmouser1/beets,ruippeixotog/beets,YetAnotherNerd/beets,lightwang1/beets,jmwatte/beets,sampsyo/beets,artemutin/beets,bj-yinyan/beets,parapente/beets,asteven/beets,m-urban/beets,jbaiter/beets,Kraymer/beets,shamangeorge/beets,pkess/beets,imsparsh/beets,shanemikel/beets,kelvinhammond/beets,aspidites/beets,lengtche/beets,ibmibmibm/beets,shanemikel/beets,xsteadfastx/beets,marcuskrahl/beets,moodboom/beets,arabenjamin/beets,jbaiter/beets,andremiller/beets,lengtche/beets,sadatay/beets,arabenjamin/beets,mathstuf/beets,sampsyo/beets,pkess/beets,sampsyo/beets,gabrielaraujof/beets,Kraymer/beets,bj-yinyan/beets,untitaker/beets,moodboom/beets,imsparsh/beets,Freso/beets,tima/beets,drm00/beets,ruippeixotog/beets,Andypsamp/CODfinalJUNIT,LordSputnik/beets,Andypsamp/CODfinalJUNIT,untitaker/beets,parapente/beets,lightwang1/beets,jcoady9/beets,krig/beets,shamangeorge/beets,Andypsamp/CODfinalJUNIT,Dishwishy/beets,Andypsamp/CODjunit,mried/beets,xsteadfastx/beets,artemutin/beets,multikatt/beets,imsparsh/beets,PierreRust/beets,imsparsh/beets,ttsda/beets,kareemallen/beets,kareemallen/beets,beetbox/beets,dfc/beets,beetbox/beets,dfc/beets,jayme-github/beets,mathstuf/beets,ttsda/beets,Freso/beets,Wen777/beets,YetAnotherNerd/beets,pdf/beets,Kraymer/beets,tima/beets,dfc/beets,drm00/beets,sadatay/beets,mosesfistos1/beetbox,Andypsamp/CODfinalJUNIT,artemutin/beets,mosesfistos1/beetbox,arabenjamin/beets,kareemallen/beets,SusannaMaria/beets,Wen777/beets,SusannaMaria/beets,mried/beets,mathstuf/beets,sampsyo/beets,Dishwishy/beets,parapente/beets,Wen777/beets,mathstuf/beets,ibmibmibm/beets,pdf/beets,asteven/beets,artemutin/beets,kareemallen/beets,moodboom/beets,aspidites/beets,swt30/beets,asteven/beets,gabrielaraujof/beets,MyTunesFreeMusic/privacy-policy,drm00/beets,gabrielaraujof/beets,jayme-github/beets,mosesfistos1/beetbox,marcuskrahl/beets,ttsda/beets,mried/beets,Dishwishy/beets,mosesfistos1/beetbox,lightwang1/beets,m-urban/beets,shanemikel/beets,kelvinhammond/beets,m-urban/beets,ibmibmibm/beets,beetbox/beets,marcuskrahl/beets,untitaker/beets,randybias/beets,kelvinhammond/beets,drm00/beets,MyTunesFreeMusic/privacy-policy,mried/beets,diego-plan9/beets,Kraymer/beets,andremiller/beets,PierreRust/beets,randybias/beets,Freso/beets,jackwilsdon/beets,pkess/beets,swt30/beets,krig/beets,gabrielaraujof/beets,diego-plan9/beets,jackwilsdon/beets,madmouser1/beets,jmwatte/beets | beetsplug/rewrite.py | beetsplug/rewrite.py | # This file is part of beets.
# Copyright 2012, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Uses user-specified rewriting rules to canonicalize names for path
formats.
"""
import re
import logging
from collections import defaultdict
from beets.plugins import BeetsPlugin
from beets import ui
from beets import library
log = logging.getLogger('beets')
def rewriter(field, rules):
"""Create a template field function that rewrites the given field
with the given rewriting rules. ``rules`` must be a list of
(pattern, replacement) pairs.
"""
def fieldfunc(item):
value = getattr(item, field)
for pattern, replacement in rules:
if pattern.match(value.lower()):
# Rewrite activated.
return replacement
# Not activated; return original value.
return value
return fieldfunc
class RewritePlugin(BeetsPlugin):
template_fields = {}
def configure(self, config):
cls = type(self)
# Gather all the rewrite rules for each field.
rules = defaultdict(list)
if not config.has_section('rewrite'):
return
for key, value in config.items('rewrite', True):
try:
fieldname, pattern = key.split(None, 1)
except ValueError:
raise ui.UserError("invalid rewrite specification")
if fieldname not in library.ITEM_KEYS:
raise ui.UserError("invalid field name (%s) in rewriter" %
fieldname)
log.debug(u'adding template field %s' % key)
pattern = re.compile(pattern.lower())
rules[fieldname].append((pattern, value))
if fieldname == 'artist':
# Special case for the artist field: apply the same
# rewrite for "albumartist" as well.
rules['albumartist'].append((pattern, value))
# Replace each template field with the new rewriter function.
for fieldname, fieldrules in rules.iteritems():
cls.template_fields[fieldname] = rewriter(fieldname, fieldrules)
| # This file is part of beets.
# Copyright 2012, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Uses user-specified rewriting rules to canonicalize names for path
formats.
"""
import re
import logging
from collections import defaultdict
from beets.plugins import BeetsPlugin
from beets import ui
from beets import library
log = logging.getLogger('beets')
def rewriter(field, rules):
"""Create a template field function that rewrites the given field
with the given rewriting rules. ``rules`` must be a list of
(pattern, replacement) pairs.
"""
def fieldfunc(item):
value = getattr(item, field)
for pattern, replacement in rules:
if pattern.match(value):
# Rewrite activated.
return replacement
# Not activated; return original value.
return value
return fieldfunc
class RewritePlugin(BeetsPlugin):
template_fields = {}
def configure(self, config):
cls = type(self)
# Gather all the rewrite rules for each field.
rules = defaultdict(list)
if not config.has_section('rewrite'):
return
for key, value in config.items('rewrite', True):
try:
fieldname, pattern = key.split(None, 1)
except ValueError:
raise ui.UserError("invalid rewrite specification")
if fieldname not in library.ITEM_KEYS:
raise ui.UserError("invalid field name (%s) in rewriter" %
fieldname)
log.debug(u'adding template field %s' % key)
pattern = re.compile(pattern, re.I)
rules[fieldname].append((pattern, value))
if fieldname == 'artist':
# Special case for the artist field: apply the same
# rewrite for "albumartist" as well.
rules['albumartist'].append((pattern, value))
# Replace each template field with the new rewriter function.
for fieldname, fieldrules in rules.iteritems():
cls.template_fields[fieldname] = rewriter(fieldname, fieldrules)
| mit | Python |
cd7ec0fce4cc73954d2c3769adff0373b9ef62b2 | Update control/passivity.py | python-control/python-control | control/passivity.py | control/passivity.py | '''
Author: Mark Yeatman
Date: May 15, 2022
'''
import numpy as np
try:
import cvxopt as cvx
except ImportError as e:
cvx = None
def is_passive(sys):
'''
Indicates if a linear time invarient system is passive
Constructs a linear matrix inequality and a feasibility optimization
such that is a solution exists, the system is passive.
The source for the algorithm is:
McCourt, Michael J., and Panos J. Antsaklis.
"Demonstrating passivity and dissipativity using computational methods." ISIS 8 (2013).
'''
if cvx is None:
print("cvxopt required for passivity module")
raise ModuleNotFoundError("cvxopt required for passivity module")
A = sys.A
B = sys.B
C = sys.C
D = sys.D
def make_LMI_matrix(P):
V = np.vstack((
np.hstack((A.T @ P + P@A, P@B)),
np.hstack((B.T@P, np.zeros_like(D))))
)
return V
matrix_list = []
state_space_size = sys.nstates
for i in range(0, state_space_size):
for j in range(0, state_space_size):
if j <= i:
P = np.zeros_like(A)
P[i, j] = 1.0
P[j, i] = 1.0
matrix_list.append(make_LMI_matrix(P).flatten())
coefficents = np.vstack(matrix_list).T
constants = -np.vstack((
np.hstack((np.zeros_like(A), - C.T)),
np.hstack((- C, -D - D.T)))
)
number_of_opt_vars = int(
(state_space_size**2-state_space_size)/2 + state_space_size)
c = cvx.matrix(0.0, (number_of_opt_vars, 1))
# crunch feasibility solution
sol = cvx.solvers.sdp(c,
Gs=[cvx.matrix(coefficents)],
hs=[cvx.matrix(constants)])
return (sol["x"] is not None)
| '''
Author: Mark Yeatman
Date: May 15, 2022
'''
import numpy as np
try:
import cvxopt as cvx
except ImportError as e:
cvx = None
def is_passive(sys):
'''
Indicates if a linear time invarient system is passive
Constructs a linear matrix inequality and a feasibility optimization
such that is a solution exists, the system is passive.
The source for the algorithm is:
McCourt, Michael J., and Panos J. Antsaklis.
"Demonstrating passivity and dissipativity using computational methods." ISIS 8 (2013).
'''
if cvx is None:
print("cvxopt required for passivity module")
raise ModuleNotFoundError
A = sys.A
B = sys.B
C = sys.C
D = sys.D
def make_LMI_matrix(P):
V = np.vstack((
np.hstack((A.T @ P + P@A, P@B)),
np.hstack((B.T@P, np.zeros_like(D))))
)
return V
matrix_list = []
state_space_size = sys.nstates
for i in range(0, state_space_size):
for j in range(0, state_space_size):
if j <= i:
P = np.zeros_like(A)
P[i, j] = 1.0
P[j, i] = 1.0
matrix_list.append(make_LMI_matrix(P).flatten())
coefficents = np.vstack(matrix_list).T
constants = -np.vstack((
np.hstack((np.zeros_like(A), - C.T)),
np.hstack((- C, -D - D.T)))
)
number_of_opt_vars = int(
(state_space_size**2-state_space_size)/2 + state_space_size)
c = cvx.matrix(0.0, (number_of_opt_vars, 1))
# crunch feasibility solution
sol = cvx.solvers.sdp(c,
Gs=[cvx.matrix(coefficents)],
hs=[cvx.matrix(constants)])
return (sol["x"] is not None)
| bsd-3-clause | Python |
e13761e1bc4d225d377b357b08722eb3cfad6048 | Update package info | TamiaLab/PySkCode | skcode/__init__.py | skcode/__init__.py | """
PySkCode, Python implementation of a full-featured BBCode syntax parser library.
"""
# Package information
__author__ = "Fabien Batteix (@skywodd)"
__copyright__ = "Copyright 2016, TamiaLab"
__credits__ = ["Fabien Batteix", "TamiaLab"]
__license__ = "GPLv3"
__version__ = "1.0.7"
__maintainer__ = "Fabien Batteix"
__email__ = "fabien.batteix@tamialab.fr"
__status__ = "Development" # "Production"
# User friendly imports
from .treebuilder import parse_skcode
from .render import (render_to_html,
render_to_skcode,
render_to_text)
| """
SkCode (Python implementation of BBcode syntax) parser library.
"""
# Package information
__author__ = "Fabien Batteix (@skywodd)"
__copyright__ = "Copyright 2015, TamiaLab"
__credits__ = ["Fabien Batteix", "TamiaLab"]
__license__ = "GPLv3"
__version__ = "1.0.7"
__maintainer__ = "Fabien Batteix"
__email__ = "fabien.batteix@tamialab.fr"
__status__ = "Development" # "Production"
# User friendly imports
from .treebuilder import parse_skcode
from .render import (render_to_html,
render_to_skcode,
render_to_text)
| agpl-3.0 | Python |
16292bd16caf38f073a4a1a782798eb9f3c13c1b | fix name of test | Nic30/hwtLib,Nic30/hwtLib | hwtLib/mem/cam_test.py | hwtLib/mem/cam_test.py | import unittest
from hdl_toolkit.bitmask import Bitmask
from hdl_toolkit.hdlObjects.specialValues import Time, NOP
from hdl_toolkit.simulator.agentConnector import valuesToInts
from hdl_toolkit.simulator.shortcuts import simUnitVcd, simPrepare
from hwtLib.mem.cam import Cam
class CamTC(unittest.TestCase):
def test_writeAndMatchTest(self):
u, model, procs = simPrepare(Cam())
u.write._ag.data = [(0, 1, -1),
(1, 3, -1),
(7, 11, -1)]
u.match._ag.data = [NOP, NOP, NOP, 1, 2, 3, 5, 11, 12]
simUnitVcd(model, procs,
"tmp/cam_simple.vcd", time=160 * Time.ns)
self.assertSequenceEqual(valuesToInts(u.out._ag.data),
[1, 0, 2, 0, 128, 0])
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(TwoCntrsTC('test_withStops'))
suite.addTest(unittest.makeSuite(CamTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| import unittest
from hdl_toolkit.bitmask import Bitmask
from hdl_toolkit.hdlObjects.specialValues import Time, NOP
from hdl_toolkit.simulator.agentConnector import valuesToInts
from hdl_toolkit.simulator.shortcuts import simUnitVcd, simPrepare
from hwtLib.mem.cam import Cam
class CamTC(unittest.TestCase):
def test_writeAndRead(self):
u, model, procs = simPrepare(Cam())
u.write._ag.data = [(0, 1, -1),
(1, 3, -1),
(7, 11, -1)]
u.match._ag.data = [NOP, NOP, NOP, 1, 2, 3, 5, 11, 12]
simUnitVcd(model, procs,
"tmp/cam_simple.vcd", time=160 * Time.ns)
self.assertSequenceEqual(valuesToInts(u.out._ag.data),
[1, 0, 2, 0, 128, 0])
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(TwoCntrsTC('test_withStops'))
suite.addTest(unittest.makeSuite(CamTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| mit | Python |
eee725353bee314a999c6c8d9d06959ac9f8b476 | fix edit redirect | tracon/infotv-tracon | infotv_tracon/views.py | infotv_tracon/views.py | from django.contrib.admin.views.decorators import staff_member_required
from django.core.urlresolvers import reverse
from django.conf import settings
from django.shortcuts import redirect
@staff_member_required(login_url=settings.LOGIN_URL)
def infotv_edit_redirect_view(self, event):
return redirect(reverse('infotv_view', args=(event,)) + '?edit=1')
| from django.contrib.admin.views.decorators import staff_member_required
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
@staff_member_required
def infotv_edit_redirect_view(self, event):
return redirect(reverse('infotv_view', args=(event,)) + '?edit=1')
| mit | Python |
2fd2ff02401339990029cc7270f912db32026230 | allow msg comparison and use repr instead of str | bndl/bndl,bndl/bndl | bndl/net/messages.py | bndl/net/messages.py | _msgtypes = {}
class Field(object):
pass
class MessageType(type):
def __new__(cls, name, parents, dct):
dct['__slots__'] = schema = [k for k, v in dct.items() if isinstance(v, Field)]
for k in schema:
dct.pop(k)
_msgtypes[name] = msgtype = super().__new__(cls, name, parents, dct)
return msgtype
class Message(metaclass=MessageType):
def __init__(self, **kwargs):
for k in self.__slots__:
setattr(self, k, kwargs.get(k))
def __repr__(self):
return (self.__class__.__name__ + '(' +
', '.join(k + '=' + str(getattr(self, k)) for k in self.__slots__)
+ ')')
def __msgdict__(self):
d = {k:getattr(self, k) for k in self.__slots__}
return (type(self).__name__, d)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
for k in self.__slots__:
if getattr(self, k, None) != getattr(other, k, None):
return False
return True
@staticmethod
def load(msg):
return _msgtypes[msg[0]](**msg[1])
class Hello(Message):
# str, name of node
name = Field()
# str, cluster of node
cluster = Field()
# str, type of node
node_type = Field()
# list or set of str, addresses at which the node can be reached
addresses = Field()
class Discovered(Message):
# list of name, addresses tuples
peers = Field()
class Disconnect(Message):
# str for debug perposes
reason = Field()
class Ping(Message):
pass
class Pong(Message):
pass
| _msgtypes = {}
class Field(object):
pass
class MessageType(type):
def __new__(cls, name, parents, dct):
dct['__slots__'] = schema = [k for k, v in dct.items() if isinstance(v, Field)]
for k in schema:
dct.pop(k)
_msgtypes[name] = msgtype = super().__new__(cls, name, parents, dct)
return msgtype
class Message(metaclass=MessageType):
def __init__(self, **kwargs):
for k in self.__slots__:
setattr(self, k, kwargs.get(k))
def __str__(self):
return (self.__class__.__name__ + '(' +
', '.join(k + '=' + str(getattr(self, k)) for k in self.__slots__)
+ ')')
def __msgdict__(self):
d = {k:getattr(self, k) for k in self.__slots__}
return (type(self).__name__, d)
@staticmethod
def load(msg):
return _msgtypes[msg[0]](**msg[1])
class Hello(Message):
# str, name of node
name = Field()
# str, cluster of node
cluster = Field()
# str, type of node
node_type = Field()
# list or set of str, addresses at which the node can be reached
addresses = Field()
class Discovered(Message):
# list of name, addresses tuples
peers = Field()
class Disconnect(Message):
# str for debug perposes
reason = Field()
class Ping(Message):
pass
class Pong(Message):
pass
| apache-2.0 | Python |
2f0c68f6a3fb311da9966008c0bb0e74b3e84e1b | increment patch version | qtux/instmatcher | instmatcher/version.py | instmatcher/version.py | # Copyright 2016 Matthias Gazzari
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''the library version'''
__version__ = '0.5.1'
| # Copyright 2016 Matthias Gazzari
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''the library version'''
__version__ = '0.5.0'
| apache-2.0 | Python |
53a79a15e7de62d30beedf90bef23427d554e469 | Bump to 1.1 | benjaoming/django-nyt,benjaoming/django-nyt | django_nyt/__init__.py | django_nyt/__init__.py | _disable_notifications = False
__version__ = "1.1"
default_app_config = "django_nyt.apps.DjangoNytConfig"
def notify(*args, **kwargs):
"""
DEPRECATED - please access django_nyt.utils.notify
"""
from django_nyt.utils import notify
return notify(*args, **kwargs)
| _disable_notifications = False
__version__ = "1.1b2"
default_app_config = "django_nyt.apps.DjangoNytConfig"
def notify(*args, **kwargs):
"""
DEPRECATED - please access django_nyt.utils.notify
"""
from django_nyt.utils import notify
return notify(*args, **kwargs)
| apache-2.0 | Python |
452db2d2a1bc5c5ae06a44e4fff3df6f7e5b804c | add CustomGroupAdmin, PermissionAdmin and UserPermissionListAdmin | sunils34/djangotoolbox,Knotis/djangotoolbox,potatolondon/djangotoolbox-1-4,kavdev/djangotoolbox,brstrat/djangotoolbox | djangotoolbox/admin.py | djangotoolbox/admin.py | from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin, GroupAdmin
from django.contrib.auth.models import User, Group, Permission
from djangotoolbox.auth.models import UserPermissionList
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email', 'first_name', 'last_name', 'is_active',
'is_staff', 'is_superuser')
class CustomUserAdmin(UserAdmin):
fieldsets = None
form = UserForm
class GroupForm(forms.ModelForm):
class Meta:
model = Group
exclude = ('permissions')
class CustomGroupAdmin(GroupAdmin):
fieldsets = None
form = GroupForm
class PermissionAdmin(admin.ModelAdmin):
ordering = ('codename',)
class UserPermissionListAdmin(admin.ModelAdmin):
pass
admin.site.register(UserPermissionList, UserPermissionListAdmin)
admin.site.register(Permission, PermissionAdmin)
admin.site.unregister(User)
admin.site.unregister(Group)
admin.site.register(User, CustomUserAdmin)
admin.site.register(Group, CustomGroupAdmin)
| from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User, Group
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email', 'first_name', 'last_name', 'is_active',
'is_staff', 'is_superuser')
class CustomUserAdmin(UserAdmin):
fieldsets = None
form = UserForm
admin.site.unregister(User)
admin.site.unregister(Group)
admin.site.register(User, CustomUserAdmin)
| bsd-3-clause | Python |
5425e10a39ce66d3c50e730d1e7b7878e8e88eb0 | Allow /dev/urandom for PHP 7 | DMOJ/judge,DMOJ/judge,DMOJ/judge | dmoj/executors/PHP7.py | dmoj/executors/PHP7.py | from .php_executor import PHPExecutor
from dmoj.judgeenv import env
class Executor(PHPExecutor):
name = 'PHP7'
command = env['runtime'].get('php7')
fs = ['.*\.so', '/etc/localtime$', '.*\.ini$', '/dev/urandom$']
initialize = Executor.initialize
| from .php_executor import PHPExecutor
from dmoj.judgeenv import env
class Executor(PHPExecutor):
name = 'PHP7'
command = env['runtime'].get('php7')
fs = ['.*\.so', '/etc/localtime$', '.*\.ini$']
initialize = Executor.initialize
| agpl-3.0 | Python |
c26fd0f22a7c3de3e41f4d20379d877eaab84468 | Use xyz when autodetecting bonds with Open Babel | OpenChemistry/mongochemserver | girder/molecules/molecules/openbabel.py | girder/molecules/molecules/openbabel.py | import json
import requests
from girder.models.setting import Setting
from molecules.avogadro import convert_str as avo_convert_str
from molecules.constants import PluginSettings
from molecules.utilities.has_3d_coords import cjson_has_3d_coords
def openbabel_base_url():
base_url = Setting().get(PluginSettings.OPENBABEL_BASE_URL)
if base_url is None:
base_url = 'http://localhost:5000'
return base_url
def convert_str(data_str, input_format, output_format, extra_options=None):
if extra_options is None:
extra_options = {}
base_url = openbabel_base_url()
path = 'convert'
url = '/'.join([base_url, path, output_format])
data = {
'format': input_format,
'data': data_str,
}
data.update(extra_options)
r = requests.post(url, json=data)
if r.headers and 'content-type' in r.headers:
mimetype = r.headers['content-type']
else:
mimetype = None
return r.text, mimetype
def to_inchi(data_str, input_format):
result, mime = convert_str(data_str, input_format, 'inchi')
result = json.loads(result)
return result.get('inchi'), result.get('inchikey')
def to_smiles(data_str, input_format):
result, mime = convert_str(data_str, input_format, 'smi')
return result
def gen_sdf_no_3d(data_str, input_format, add_hydrogens=True):
extra_options = {
'addHydrogens': add_hydrogens
}
return convert_str(data_str, input_format, 'sdf', extra_options)
def properties(data_str, input_format, add_hydrogens=True):
base_url = openbabel_base_url()
path = 'properties'
url = '/'.join([base_url, path])
data = {
'format': input_format,
'data': data_str,
'addHydrogens': add_hydrogens
}
r = requests.post(url, json=data)
return r.json()
def autodetect_bonds(cjson):
# This function drops all bonding info and autodetects bonds
# using Open Babel.
# Only autodetect bonds if we have 3D coordinates
if not cjson_has_3d_coords(cjson):
return cjson
cjson_str = json.dumps(cjson)
xyz_str = avo_convert_str(cjson_str, 'cjson', 'xyz')
extra_options = {
'perceiveBonds': True
}
sdf_str, mime = convert_str(xyz_str, 'xyz', 'sdf', extra_options)
cjson_str = avo_convert_str(sdf_str, 'sdf', 'cjson')
return json.loads(cjson_str)
| import json
import requests
from girder.models.setting import Setting
from molecules.avogadro import convert_str as avo_convert_str
from molecules.constants import PluginSettings
from molecules.utilities.has_3d_coords import cjson_has_3d_coords
def openbabel_base_url():
base_url = Setting().get(PluginSettings.OPENBABEL_BASE_URL)
if base_url is None:
base_url = 'http://localhost:5000'
return base_url
def convert_str(data_str, input_format, output_format, extra_options=None):
if extra_options is None:
extra_options = {}
base_url = openbabel_base_url()
path = 'convert'
url = '/'.join([base_url, path, output_format])
data = {
'format': input_format,
'data': data_str,
}
data.update(extra_options)
r = requests.post(url, json=data)
if r.headers and 'content-type' in r.headers:
mimetype = r.headers['content-type']
else:
mimetype = None
return r.text, mimetype
def to_inchi(data_str, input_format):
result, mime = convert_str(data_str, input_format, 'inchi')
result = json.loads(result)
return result.get('inchi'), result.get('inchikey')
def to_smiles(data_str, input_format):
result, mime = convert_str(data_str, input_format, 'smi')
return result
def gen_sdf_no_3d(data_str, input_format, add_hydrogens=True):
extra_options = {
'addHydrogens': add_hydrogens
}
return convert_str(data_str, input_format, 'sdf', extra_options)
def properties(data_str, input_format, add_hydrogens=True):
base_url = openbabel_base_url()
path = 'properties'
url = '/'.join([base_url, path])
data = {
'format': input_format,
'data': data_str,
'addHydrogens': add_hydrogens
}
r = requests.post(url, json=data)
return r.json()
def autodetect_bonds(cjson):
# Only autodetect bonds if we have 3D coordinates
if not cjson_has_3d_coords(cjson):
return cjson
cjson_str = json.dumps(cjson)
sdf_str = avo_convert_str(cjson_str, 'cjson', 'sdf')
extra_options = {
'perceiveBonds': True
}
sdf_str, mime = convert_str(sdf_str, 'sdf', 'sdf', extra_options)
cjson_str = avo_convert_str(sdf_str, 'sdf', 'cjson')
return json.loads(cjson_str)
| bsd-3-clause | Python |
5e966cb38fd60dddf7d7abc7636e39aabfc81783 | increase version to 1.6 | Chris7/cutadapt,marcelm/cutadapt | cutadapt/__init__.py | cutadapt/__init__.py | from __future__ import print_function
import sys
__version__ = '1.6'
def check_importability():
try:
import cutadapt._align
except ImportError as e:
if 'undefined symbol' in str(e):
print("""
ERROR: A required extension module could not be imported because it is
incompatible with your system. A quick fix is to recompile the extension
modules with the following command:
{} setup.py build_ext -i
See the documentation for alternative ways of installing the program.
The original error message follows.
""".format(sys.executable))
raise
| from __future__ import print_function
import sys
__version__ = '1.6dev'
def check_importability():
try:
import cutadapt._align
except ImportError as e:
if 'undefined symbol' in str(e):
print("""
ERROR: A required extension module could not be imported because it is
incompatible with your system. A quick fix is to recompile the extension
modules with the following command:
{} setup.py build_ext -i
See the documentation for alternative ways of installing the program.
The original error message follows.
""".format(sys.executable))
raise
| mit | Python |
3a88ec452327312a1e8d799856782acaa1854fb6 | Add initial code to support non-schemad files | andrewgross/json2parquet | json2parquet/client.py | json2parquet/client.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import pyarrow as pa
import pyarrow.parquet as pq
def ingest_data(data, schema=None):
"""
Takes an array of dictionary objects, and a pyarrow schema with column names and types.
Outputs a pyarrow Batch of the data
"""
if isinstance(schema, list):
return _convert_data_with_column_names(data, schema)
elif isinstance(schema, pa.Schema):
return _convert_data_with_schema(data, schema)
else:
return _convert_data_without_schema(data)
def _convert_data_without_schema(data):
pass
def _convert_data_with_column_names(data, schema):
pass
def _convert_data_with_schema(data, schema):
column_data = {}
array_data = []
for row in data:
for column in schema.names:
_col = column_data.get(column, [])
_col.append(row.get(column))
column_data[column] = _col
for column in schema:
_col = column_data.get(column.name)
array_data.append(pa.array(_col, type=column.type))
return pa.RecordBatch.from_arrays(array_data, schema.names)
def load_json(filename, schema):
"""
Simple but inefficient way to load data from a newline delineated json file
"""
json_data = []
with open(filename, "r") as f:
for line in f.readlines():
if line:
json_data.append(json.loads(line))
return ingest_data(json_data, schema)
def write_parquet(data, destination, **kwargs):
"""
Takes a PyArrow record batch and writes it as a parquet file to the gives destination
"""
try:
table = pa.Table.from_batches(data)
except TypeError:
table = pa.Table.from_batches([data])
pq.write_table(table, destination, **kwargs)
def convert_json(input, output, schema, **kwargs):
data = load_json(input, schema)
write_parquet(data, output, **kwargs)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import pyarrow as pa
import pyarrow.parquet as pq
def ingest_data(data, schema):
"""
Takes an array of dictionary objects, and a pyarrow schema with column names and types.
Outputs a pyarrow Batch of the data
"""
column_data = {}
array_data = []
for row in data:
for column in schema.names:
_col = column_data.get(column, [])
_col.append(row.get(column))
column_data[column] = _col
for column in schema:
_col = column_data.get(column.name)
array_data.append(pa.array(_col, type=column.type))
return pa.RecordBatch.from_arrays(array_data, schema.names)
def load_json(filename, schema):
"""
Simple but inefficient way to load data from a newline delineated json file
"""
json_data = []
with open(filename, "r") as f:
for line in f.readlines():
if line:
json_data.append(json.loads(line))
return ingest_data(json_data, schema)
def write_parquet(data, destination, **kwargs):
"""
Takes a PyArrow record batch and writes it as a parquet file to the gives destination
"""
try:
table = pa.Table.from_batches(data)
except TypeError:
table = pa.Table.from_batches([data])
pq.write_table(table, destination, **kwargs)
def convert_json(input, output, schema, **kwargs):
data = load_json(input, schema)
write_parquet(data, output, **kwargs)
| mit | Python |
2c16c858f9be1f00065146b09b6cb39f3851081e | Add license | taigaio/taiga-back,coopsource/taiga-back,seanchen/taiga-back,xdevelsistemas/taiga-back-community,gauravjns/taiga-back,CMLL/taiga-back,dycodedev/taiga-back,gam-phon/taiga-back,joshisa/taiga-back,EvgeneOskin/taiga-back,crr0004/taiga-back,forging2012/taiga-back,astagi/taiga-back,astagi/taiga-back,gam-phon/taiga-back,gauravjns/taiga-back,joshisa/taiga-back,crr0004/taiga-back,Tigerwhit4/taiga-back,obimod/taiga-back,jeffdwyatt/taiga-back,dayatz/taiga-back,taigaio/taiga-back,Rademade/taiga-back,seanchen/taiga-back,joshisa/taiga-back,dycodedev/taiga-back,jeffdwyatt/taiga-back,seanchen/taiga-back,seanchen/taiga-back,EvgeneOskin/taiga-back,rajiteh/taiga-back,rajiteh/taiga-back,Tigerwhit4/taiga-back,Tigerwhit4/taiga-back,xdevelsistemas/taiga-back-community,Rademade/taiga-back,EvgeneOskin/taiga-back,CMLL/taiga-back,WALR/taiga-back,WALR/taiga-back,CMLL/taiga-back,bdang2012/taiga-back-casting,coopsource/taiga-back,forging2012/taiga-back,CoolCloud/taiga-back,Rademade/taiga-back,dycodedev/taiga-back,forging2012/taiga-back,WALR/taiga-back,Tigerwhit4/taiga-back,coopsource/taiga-back,dayatz/taiga-back,CMLL/taiga-back,astagi/taiga-back,Rademade/taiga-back,astronaut1712/taiga-back,WALR/taiga-back,astronaut1712/taiga-back,bdang2012/taiga-back-casting,bdang2012/taiga-back-casting,joshisa/taiga-back,Rademade/taiga-back,forging2012/taiga-back,rajiteh/taiga-back,rajiteh/taiga-back,crr0004/taiga-back,gauravjns/taiga-back,CoolCloud/taiga-back,obimod/taiga-back,gam-phon/taiga-back,astronaut1712/taiga-back,astagi/taiga-back,crr0004/taiga-back,obimod/taiga-back,bdang2012/taiga-back-casting,obimod/taiga-back,CoolCloud/taiga-back,dycodedev/taiga-back,jeffdwyatt/taiga-back,coopsource/taiga-back,astronaut1712/taiga-back,gam-phon/taiga-back,dayatz/taiga-back,CoolCloud/taiga-back,xdevelsistemas/taiga-back-community,jeffdwyatt/taiga-back,taigaio/taiga-back,gauravjns/taiga-back,EvgeneOskin/taiga-back | taiga/projects/userstories/validators.py | taiga/projects/userstories/validators.py | # Copyright (C) 2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from . import models
class UserStoryExistsValidator:
def validate_us_id(self, attrs, source):
value = attrs[source]
if not models.UserStory.objects.filter(pk=value).exists():
msg = _("There's no user story with that id")
raise serializers.ValidationError(msg)
return attrs
| from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from . import models
class UserStoryExistsValidator:
def validate_us_id(self, attrs, source):
value = attrs[source]
if not models.UserStory.objects.filter(pk=value).exists():
msg = _("There's no user story with that id")
raise serializers.ValidationError(msg)
return attrs
| agpl-3.0 | Python |
60957d5bd4753e3f7e36366d3101d69d23dfb4a0 | Update MinimumJoystick.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/brotherbrown831/MinimumJoystick.py | home/brotherbrown831/MinimumJoystick.py | from org.myrobotlab.service import Joystick
from org.myrobotlab.service import Runtime
from time import sleep
#---------------------------------Create Services----------------------
joystick = Runtime.createAndStart("joystick","Joystick")
#----------------------Connect Peripherals-----------------------------------
joystick.setController(0); #PC only - Pi needs new
joystick.addInputListener(python)
# Tell the joystick to turn on
joystick.startPolling()
#----------------------Define callback function for Joystick-----------
def onJoystickInput(data):
global float(ryValue)
if (data.id == 'A' and float(data.value) == 1.0):
print "Attatch MotorLeft"
if (data.id == 'B' and float(data.value) == 1.0):
print "Detach MotorLeft"
if (data.id == 'ry'):
ryValue = float(data.value)
def print "the value of ry is" (ryValue)# this number could easily be used in other speed control functions
'''
example use
def motorLeft.move(255*(ryValue):
| from org.myrobotlab.service import Joystick
from org.myrobotlab.service import Runtime
from time import sleep
#---------------------------------Create Services----------------------
joystick = Runtime.createAndStart("joystick","Joystick")
#----------------------Define callback function for Joystick-----------
def onJoystickInput(data):
print data
if (data.id == '0' and float(data.value) == 1.0):
print "A Button has been pushed"
#----------------------Connect Peripherals-----------------------------------
joystick.setController(2); #PC only - Pi needs new
joystick.addInputListener(python)
# Tell the joystick to turn on
joystick.startPolling()
| apache-2.0 | Python |
43f05bb600a8975c2cb406560acd1f03dc237374 | Use pytest caplog | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/pytests/unit/client/test_netapi.py | tests/pytests/unit/client/test_netapi.py | import logging
import salt.client.netapi
import salt.config
from tests.support.mock import Mock, patch
def test_run_log(caplog):
"""
test salt.client.netapi logs correct message
"""
opts = salt.config.DEFAULT_MASTER_OPTS.copy()
opts["rest_cherrypy"] = {"port": 8000}
mock_process = Mock()
mock_process.add_process.return_value = True
patch_process = patch.object(salt.utils.process, "ProcessManager", mock_process)
with caplog.at_level(logging.INFO):
with patch_process:
netapi = salt.client.netapi.NetapiClient(opts)
netapi.run()
assert "Starting RunNetapi(salt.loaded.int.netapi.rest_cherrypy)" in caplog.text
| import pytest
import salt.client.netapi
import salt.config
from tests.support.helpers import TstSuiteLoggingHandler
from tests.support.mock import Mock, patch
def test_run_log():
"""
test salt.client.netapi logs correct message
"""
opts = salt.config.DEFAULT_MASTER_OPTS.copy()
opts["rest_cherrypy"] = {"port": 8000}
mock_process = Mock()
mock_process.add_process.return_value = True
patch_process = patch.object(salt.utils.process, "ProcessManager", mock_process)
exp_msg = "INFO:Starting RunNetapi(salt.loaded.int.netapi.rest_cherrypy)"
found = False
with TstSuiteLoggingHandler() as handler:
with patch_process:
netapi = salt.client.netapi.NetapiClient(opts)
netapi.run()
for message in handler.messages:
if "RunNetapi" in message:
assert exp_msg == message
found = True
break
if not found:
pytest.fail("Log message not found: {}".format(exp_msg))
| apache-2.0 | Python |
51afb5757ae7715b28df4e8991d1d3cebe9df07c | Remove pdb | michaelkuty/feincms-elephantblog,feincms/feincms-elephantblog,michaelkuty/feincms-elephantblog,matthiask/feincms-elephantblog,sbaechler/feincms-elephantblog,feincms/feincms-elephantblog,matthiask/feincms-elephantblog,sbaechler/feincms-elephantblog,michaelkuty/feincms-elephantblog,matthiask/feincms-elephantblog,sbaechler/feincms-elephantblog | tests/testapp/tests/test_templatetags.py | tests/testapp/tests/test_templatetags.py | # coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.template.loader import render_to_string
from django.test import TransactionTestCase
from django.test.utils import override_settings
from .factories import EntryFactory, create_entries, create_category
class TemplateTagsTest(TransactionTestCase):
def test_templatetags(self):
entries = create_entries(EntryFactory)
category = create_category(title='Category 1')
create_category(title='Category 2')
entries[0].categories.add(category)
entries[1].is_featured = True
entries[1].save()
self.methods()
def methods(self):
html = render_to_string('test_templatetags.html', {})
self.assertIn(
'<p>categories:Category 1,</p>',
html)
self.assertIn(
'<p>categories+empty:Category 1,Category 2,</p>',
html)
self.assertIn(
'<p>months:10.12,08.12,</p>',
html)
self.assert_months(html)
self.assertIn(
'<p>entries:Eintrag 1,Entry 1,</p>',
html)
self.assertIn(
'<p>entries+featured:Eintrag 1,</p>',
html)
self.assertIn(
'<p>entries+category0:Entry 1,</p>',
html)
self.assertIn(
'<p>entries+category1:</p>',
html)
self.assertIn(
'<p>entries+limit:Eintrag 1,</p>',
html)
def assert_months(self, html):
self.assertIn(
'<p>months:10.12,08.12,</p>',
html)
@override_settings(USE_TZ=True, TIME_ZONE='America/Chicago')
class TimezoneTemplateTagsTest(TemplateTagsTest):
def test_templatetags(self):
entries = create_entries(EntryFactory)
category = create_category(title='Category 1')
create_category(title='Category 2')
entries[0].categories.add(category)
entries[1].is_featured = True
entries[1].save()
self.methods()
| # coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.template.loader import render_to_string
from django.test import TransactionTestCase
from django.test.utils import override_settings
from .factories import EntryFactory, create_entries, create_category
class TemplateTagsTest(TransactionTestCase):
def test_templatetags(self):
entries = create_entries(EntryFactory)
category = create_category(title='Category 1')
create_category(title='Category 2')
entries[0].categories.add(category)
entries[1].is_featured = True
entries[1].save()
self.methods()
def methods(self):
html = render_to_string('test_templatetags.html', {})
self.assertIn(
'<p>categories:Category 1,</p>',
html)
self.assertIn(
'<p>categories+empty:Category 1,Category 2,</p>',
html)
self.assertIn(
'<p>months:10.12,08.12,</p>',
html)
self.assert_months(html)
self.assertIn(
'<p>entries:Eintrag 1,Entry 1,</p>',
html)
self.assertIn(
'<p>entries+featured:Eintrag 1,</p>',
html)
self.assertIn(
'<p>entries+category0:Entry 1,</p>',
html)
self.assertIn(
'<p>entries+category1:</p>',
html)
self.assertIn(
'<p>entries+limit:Eintrag 1,</p>',
html)
def assert_months(self, html):
self.assertIn(
'<p>months:10.12,08.12,</p>',
html)
@override_settings(USE_TZ=True, TIME_ZONE='America/Chicago')
class TimezoneTemplateTagsTest(TemplateTagsTest):
def test_templatetags(self):
entries = create_entries(EntryFactory)
category = create_category(title='Category 1')
create_category(title='Category 2')
entries[0].categories.add(category)
entries[1].is_featured = True
entries[1].save()
import pdb; pdb.set_trace()
self.methods()
| bsd-3-clause | Python |
a59a06fbb1e7771a0372ba67dd9ca2af55fa5fb7 | update issuer in supplier invoice | thinkopensolutions/tkobr-addons,thinkopensolutions/tkobr-addons,thinkopensolutions/tkobr-addons,thinkopensolutions/tkobr-addons | tko_coexiste_purchase/models/purchase.py | tko_coexiste_purchase/models/purchase.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, fields, api, _
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
@api.multi
def _prepare_invoice(self):
result = super(PurchaseOrder, self)._prepare_invoice()
result.update({'reference_coexiste': self.partner_ref,
'issuer': '0'})
return result
@api.multi
def action_view_invoice(self):
result = super(PurchaseOrder, self).action_view_invoice()
if not self.invoice_ids:
result['context'].update({'default_fiscal_position_id': self.fiscal_position_id.id,
'default_reference_coexiste': self.partner_ref,
'default_payment_term_id': self.payment_term_id.id,
})
return result
| # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, fields, api, _
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
@api.multi
def _prepare_invoice(self):
result = super(PurchaseOrder, self)._prepare_invoice()
result.update({'reference_coexiste': self.partner_ref})
return result
@api.multi
def action_view_invoice(self):
result = super(PurchaseOrder, self).action_view_invoice()
if not self.invoice_ids:
result['context'].update({'default_fiscal_position_id': self.fiscal_position_id.id,
'default_reference_coexiste': self.partner_ref,
'default_payment_term_id': self.payment_term_id.id,
})
return result
| agpl-3.0 | Python |
e0b14d9f1a89b446f78bc6af8eb47dffb3cc1e6a | Revise class name | bowen0701/algorithms_data_structures | lc0046_permutations.py | lc0046_permutations.py | """Leetcode 46. Permutations
Medium
URL: https://leetcode.com/problems/permutations/
Given a collection of distinct integers, return all possible permutations.
Example:
Input: [1,2,3]
Output:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]
"""
class SolutionBacktrack(object):
def _backtrack(self, result, temp, nums):
if len(temp) == len(nums):
# Once a permutation is completed, shallow copy it to result.
result.append(temp[:])
return None
for i in range(len(nums)):
# If num[i] was used, skip it.
if nums[i] in temp:
continue
temp.append(nums[i])
self._backtrack(result, temp, nums)
# Pop for backtracking.
temp.pop()
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
Time complexity: O(n*n!), where
- n is the length of nums for copying temp,
- n! is for permutation.
Space complexity: O(n*n!).
"""
# Apply backtracking.
result = []
temp = []
self._backtrack(result, temp, nums)
return result
def main():
nums = [1, 2, 3]
print SolutionBacktrack().permute(nums)
if __name__ == '__main__':
main()
| """Leetcode 46. Permutations
Medium
URL: https://leetcode.com/problems/permutations/
Given a collection of distinct integers, return all possible permutations.
Example:
Input: [1,2,3]
Output:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]
"""
class Solution(object):
def _backtrack(self, result, temp, nums):
if len(temp) == len(nums):
# Once a permutation is completed, shallow copy it to result.
result.append(temp[:])
return None
for i in range(len(nums)):
# If num[i] was used, skip it.
if nums[i] in temp:
continue
temp.append(nums[i])
self._backtrack(result, temp, nums)
# Pop for backtracking.
temp.pop()
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
Time complexity: O(n*n!), where
- n is the length of nums for copying temp,
- n! is for permutation.
Space complexity: O(n*n!).
"""
# Apply backtracking.
result = []
temp = []
self._backtrack(result, temp, nums)
return result
def main():
nums = [1, 2, 3]
print Solution().permute(nums)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
ec8d7b035617f9239a0a52be346d8611cf77cb6f | Add few oc wrappers for future resiliency testing | jpopelka/fabric8-analytics-common,tisnik/fabric8-analytics-common,jpopelka/fabric8-analytics-common,tisnik/fabric8-analytics-common,tisnik/fabric8-analytics-common,jpopelka/fabric8-analytics-common | integration-tests/features/src/utils.py | integration-tests/features/src/utils.py | """Unsorted utility functions used in integration tests."""
import requests
import subprocess
def download_file_from_url(url):
"""Download file from the given URL and do basic check of response."""
assert url
response = requests.get(url)
assert response.status_code == 200
assert response.text is not None
return response.text
def split_comma_separated_list(l):
"""Split the list into elements separated by commas."""
return [i.strip() for i in l.split(',')]
def oc_login(url, username, password, tls_verify=True):
"""Wrapper around `oc login`.
:param url: str, OpenShift URL
:param username: str, username
:param password: str, password
:param tls_verify: bool, verify server's certificate?; default: True
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'login', url, '--username', username, '--password', password]
if not tls_verify:
command.extend(['--insecure-skip-tls-verify=true'])
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
# replace password with '***' so somebody will not accidentally leak it in CI logs
e.cmd = [x if x != password else '***' for x in e.cmd]
raise e
def oc_delete_pods(selector, namespace=None):
"""Wrapper around `oc delete`.
Selector determines which pods will be deleted.
More on selectors:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
Note k8s/OpenShift will immediately restart deleted pods,
to match desired number of replicas for given deployment.
The expectation is that the user is already logged in
and has permissions to delete pods.
Example usage:
oc_delete_pods('service=bayesian-pgbouncer'
:param selector: str, selector identifying pods that will be deleted
:param namespace: str, namespace in which `oc delete` command should be executed,
default: currently selected namespace
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'delete', 'pods', '--selector=', selector]
if namespace:
command.extend(['--namespace', namespace])
subprocess.check_call(command)
| """Unsorted utility functions used in integration tests."""
import requests
def download_file_from_url(url):
"""Download file from the given URL and do basic check of response."""
assert url
response = requests.get(url)
assert response.status_code == 200
assert response.text is not None
return response.text
def split_comma_separated_list(l):
"""Split the list into elements separated by commas."""
return [i.strip() for i in l.split(',')]
| apache-2.0 | Python |
c77518e19f061c70b9ee9b087ca8f792d3c67021 | Bump the version to 0.1.2 | othieno/geotagx-tool-validator | src/__init__.py | src/__init__.py | # -*- coding: utf-8 -*-
#
# This module is part of the GeoTag-X project validator tool.
#
# Author: Jeremy Othieno (j.othieno@gmail.com)
#
# Copyright (c) 2016-2017 UNITAR/UNOSAT
#
# The MIT License (MIT)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__version__ = "0.1.2"
| # -*- coding: utf-8 -*-
#
# This module is part of the GeoTag-X project validator tool.
#
# Author: Jeremy Othieno (j.othieno@gmail.com)
#
# Copyright (c) 2016-2017 UNITAR/UNOSAT
#
# The MIT License (MIT)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__version__ = "0.1.1"
| mit | Python |
096d1a64a3829607ae74d57c2acb4a5df4b65023 | swap a .sort() for a sorted() | StoDevX/course-data-tools,StoDevX/course-data-tools | lib/calculate_terms.py | lib/calculate_terms.py | from .flattened import flatten
from datetime import datetime
def year_plus_term(year, term):
return int(str(year) + str(term))
def find_terms_for_year(year):
now = datetime.now()
current_month = now.month
current_year = now.year
all_terms = [1, 2, 3, 4, 5]
limited_terms = [1, 2, 3]
# St. Olaf publishes initial Fall, Interim, and Spring data in April
# of each year. Full data is published by August.
if year == current_year:
if current_month < 3:
return []
elif current_month <= 7:
return [year_plus_term(year, term) for term in limited_terms]
else:
return [year_plus_term(year, term) for term in all_terms]
elif year > current_year:
return []
else:
return [year_plus_term(year, term) for term in all_terms]
def find_terms(start_year=None, end_year=None, this_year=False):
now = datetime.now()
start_year = start_year or 1994
end_year = end_year or now.year
current_year = end_year if end_year is not start_year else end_year + 1
current_month = now.month
if this_year:
start_year = current_year - 1 if current_month <= 7 else current_year
most_years = range(start_year, current_year)
term_list = [find_terms_for_year(year) for year in most_years]
# Sort the list of terms to 20081, 20082, 20091
# (instead of 20081, 20091, 20082)
term_list = sorted(term_list)
return term_list
def get_years_and_terms(terms_and_years):
terms_and_years = flatten([item.split(' ')
if type(item) is str
else item
for item in terms_and_years])
years, terms = [], []
for item in terms_and_years:
str_item = str(item)
if len(str_item) is 4:
years.append(item)
elif len(str_item) is 5:
terms.append(item)
return years, terms
def calculate_terms(terms_and_years):
years, terms = get_years_and_terms(terms_and_years)
if (not terms) and (not years):
calculated_terms = find_terms()
elif 0 in years:
calculated_terms = find_terms(this_year=True)
else:
calculated_terms = terms + \
[find_terms(start_year=year, end_year=year) for year in years]
return flatten(calculated_terms)
| from .flattened import flatten
from datetime import datetime
def year_plus_term(year, term):
return int(str(year) + str(term))
def find_terms_for_year(year):
now = datetime.now()
current_month = now.month
current_year = now.year
all_terms = [1, 2, 3, 4, 5]
limited_terms = [1, 2, 3]
# St. Olaf publishes initial Fall, Interim, and Spring data in April
# of each year. Full data is published by August.
if year == current_year:
if current_month < 3:
return []
elif current_month <= 7:
return [year_plus_term(year, term) for term in limited_terms]
else:
return [year_plus_term(year, term) for term in all_terms]
elif year > current_year:
return []
else:
return [year_plus_term(year, term) for term in all_terms]
def find_terms(start_year=None, end_year=None, this_year=False):
now = datetime.now()
start_year = start_year or 1994
end_year = end_year or now.year
current_year = end_year if end_year is not start_year else end_year + 1
current_month = now.month
if this_year:
start_year = current_year - 1 if current_month <= 7 else current_year
most_years = range(start_year, current_year)
term_list = [find_terms_for_year(year) for year in most_years]
# Sort the list of terms to 20081, 20082, 20091
# (instead of 20081, 20091, 20082) (sorts in-place)
term_list.sort()
return term_list
def get_years_and_terms(terms_and_years):
terms_and_years = flatten([item.split(' ')
if type(item) is str
else item
for item in terms_and_years])
years, terms = [], []
for item in terms_and_years:
str_item = str(item)
if len(str_item) is 4:
years.append(item)
elif len(str_item) is 5:
terms.append(item)
return years, terms
def calculate_terms(terms_and_years):
years, terms = get_years_and_terms(terms_and_years)
if (not terms) and (not years):
calculated_terms = find_terms()
elif 0 in years:
calculated_terms = find_terms(this_year=True)
else:
calculated_terms = terms + \
[find_terms(start_year=year, end_year=year) for year in years]
return flatten(calculated_terms)
| mit | Python |
b9996fa9f697c436ae2bf829b440340b9ddceaa7 | Fix type error in `StashMixin` | divmain/GitSavvy,divmain/GitSavvy,divmain/GitSavvy | core/git_mixins/stash.py | core/git_mixins/stash.py | from collections import namedtuple
import re
from GitSavvy.core.git_command import mixin_base
Stash = namedtuple("Stash", ("id", "description"))
class StashMixin(mixin_base):
def get_stashes(self):
"""
Return a list of stashes in the repo.
"""
stdout = self.git("stash", "list")
stashes = []
for entry in stdout.split("\n"):
if not entry:
continue
match = re.match("^stash@\\{(\\d+)}: (.*?: )?(.*)", entry)
assert match
num, _, description = match.groups()
stashes.append(Stash(num, description))
return stashes
def show_stash(self, id):
stash_name = "stash@{{{}}}".format(id)
return self.git("stash", "show", "--no-color", "-p", stash_name)
def apply_stash(self, id):
"""
Apply stash with provided id.
"""
self.git("stash", "apply", "stash@{{{}}}".format(id))
def pop_stash(self, id):
"""
Pop stash with provided id.
"""
self.git("stash", "pop", "stash@{{{}}}".format(id))
def create_stash(self, description, include_untracked=False):
"""
Create stash with provided description from working files.
"""
self.git("stash", "save", "-k", "-u" if include_untracked else None, description)
def drop_stash(self, id):
"""
Drop stash with provided id.
"""
return self.git("stash", "drop", "stash@{{{}}}".format(id))
| from collections import namedtuple
import re
from GitSavvy.core.git_command import mixin_base
Stash = namedtuple("Stash", ("id", "description"))
class StashMixin(mixin_base):
def get_stashes(self):
"""
Return a list of stashes in the repo.
"""
stdout = self.git("stash", "list")
stashes = []
for entry in stdout.split("\n"):
if not entry:
continue
num, _, description = re.match("^stash@\\{(\\d+)}: (.*?: )?(.*)", entry).groups()
stashes.append(Stash(num, description))
return stashes
def show_stash(self, id):
stash_name = "stash@{{{}}}".format(id)
return self.git("stash", "show", "--no-color", "-p", stash_name)
def apply_stash(self, id):
"""
Apply stash with provided id.
"""
self.git("stash", "apply", "stash@{{{}}}".format(id))
def pop_stash(self, id):
"""
Pop stash with provided id.
"""
self.git("stash", "pop", "stash@{{{}}}".format(id))
def create_stash(self, description, include_untracked=False):
"""
Create stash with provided description from working files.
"""
self.git("stash", "save", "-k", "-u" if include_untracked else None, description)
def drop_stash(self, id):
"""
Drop stash with provided id.
"""
return self.git("stash", "drop", "stash@{{{}}}".format(id))
| mit | Python |
fd5711c7acabea1ed304a3e2113b907de556e645 | Set Docker API version in the DockerOperator params | opentrials/opentrials-airflow,opentrials/opentrials-airflow | dags/cochrane_reviews.py | dags/cochrane_reviews.py | from datetime import datetime
from airflow.operators.docker_operator import DockerOperator
from airflow.models import DAG, Variable
import utils.helpers as helpers
import os
args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.strptime('Dec 1 2016', '%b %d %Y'),
'retries': 1,
}
dag = DAG(dag_id='cochrane_reviews',
default_args=args,
max_active_runs=1,
schedule_interval='@monthly')
collector_task = DockerOperator(
task_id='cochrane_reviews_collector',
dag=dag,
image='okibot/collectors:latest',
force_pull=True,
api_version='1.23',
environment={
'WAREHOUSE_URL': helpers.get_postgres_uri('warehouse_db'),
'COCHRANE_ARCHIVE_URL': Variable.get('COCHRANE_ARCHIVE_URL'),
'LOGGING_URL': Variable.get('LOGGING_URL'),
'PYTHON_ENV': Variable.get('ENV'),
'FERNET_KEY': os.environ['FERNET_KEY'],
},
command='make start cochrane_reviews'
)
processor_task = DockerOperator(
task_id='cochrane_reviews_processor',
dag=dag,
image='okibot/processors:latest',
force_pull=True,
api_version='1.23',
environment={
'WAREHOUSE_URL': helpers.get_postgres_uri('warehouse_db'),
'DATABASE_URL': helpers.get_postgres_uri('api_db'),
'EXPLORERDB_URL': helpers.get_postgres_uri('explorer_db'),
'LOGGING_URL': Variable.get('LOGGING_URL'),
'FERNET_KEY': os.environ['FERNET_KEY'],
},
command='make start cochrane_reviews'
)
processor_task.set_upstream(collector_task)
| from datetime import datetime
from airflow.operators.docker_operator import DockerOperator
from airflow.models import DAG, Variable
import utils.helpers as helpers
import os
args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.strptime('Dec 1 2016', '%b %d %Y'),
'retries': 1,
}
dag = DAG(dag_id='cochrane_reviews',
default_args=args,
max_active_runs=1,
schedule_interval='@monthly')
collector_task = DockerOperator(
task_id='cochrane_reviews_collector',
dag=dag,
image='okibot/collectors:latest',
force_pull=True,
environment={
'WAREHOUSE_URL': helpers.get_postgres_uri('warehouse_db'),
'COCHRANE_ARCHIVE_URL': Variable.get('COCHRANE_ARCHIVE_URL'),
'LOGGING_URL': Variable.get('LOGGING_URL'),
'PYTHON_ENV': Variable.get('ENV'),
'FERNET_KEY': os.environ['FERNET_KEY'],
'DOCKER_API_VERSION': Variable.get('DOCKER_API_VERSION'),
},
command='make start cochrane_reviews'
)
processor_task = DockerOperator(
task_id='cochrane_reviews_processor',
dag=dag,
image='okibot/processors:latest',
force_pull=True,
environment={
'WAREHOUSE_URL': helpers.get_postgres_uri('warehouse_db'),
'DATABASE_URL': helpers.get_postgres_uri('api_db'),
'EXPLORERDB_URL': helpers.get_postgres_uri('explorer_db'),
'LOGGING_URL': Variable.get('LOGGING_URL'),
'FERNET_KEY': os.environ['FERNET_KEY'],
'DOCKER_API_VERSION': Variable.get('DOCKER_API_VERSION'),
},
command='make start cochrane_reviews'
)
processor_task.set_upstream(collector_task)
| mpl-2.0 | Python |
545f45bda1c7f40e4fd70fd7286479d5e1cf3bed | Add optional dtype argument to _get_ang_freq_grid | dask-image/dask-ndfourier | dask_ndfourier/_utils.py | dask_ndfourier/_utils.py | # -*- coding: utf-8 -*-
import collections
import itertools
import numbers
import numpy
import dask.array
from dask_ndfourier import _compat
try:
from itertools import imap
except ImportError:
imap = map
try:
irange = xrange
except NameError:
irange = range
def _get_freq_grid(shape, chunks, dtype=float):
assert len(shape) == len(chunks)
shape = tuple(shape)
dtype = numpy.dtype(dtype).type
assert (issubclass(dtype, numbers.Real) and
not issubclass(dtype, numbers.Integral))
ndim = len(shape)
freq_grid = []
for i in irange(ndim):
sl = ndim * [None]
sl[i] = slice(None)
sl = tuple(sl)
freq_grid_i = _compat._fftfreq(shape[i],
chunks=chunks[i]).astype(dtype)[sl]
for j in itertools.chain(range(i), range(i + 1, ndim)):
freq_grid_i = freq_grid_i.repeat(shape[j], axis=j)
freq_grid.append(freq_grid_i)
freq_grid = dask.array.stack(freq_grid)
return freq_grid
def _get_ang_freq_grid(shape, chunks, dtype=float):
dtype = numpy.dtype(dtype).type
assert (issubclass(dtype, numbers.Real) and
not issubclass(dtype, numbers.Integral))
pi = dtype(numpy.pi)
freq_grid = _get_freq_grid(shape, chunks, dtype=dtype)
ang_freq_grid = 2 * pi * freq_grid
return ang_freq_grid
def _norm_args(a, s, n=-1, axis=-1):
if issubclass(a.dtype.type, numbers.Integral):
a = a.astype(float)
if isinstance(s, numbers.Number):
s = numpy.array(a.ndim * [s])
elif not isinstance(s, dask.array.Array):
s = numpy.array(s)
if not issubclass(s.dtype.type, numbers.Real):
raise TypeError("The `s` must contain real value(s).")
if s.shape != (a.ndim,):
raise RuntimeError(
"Shape of `s` must be 1-D and equal to the input's rank."
)
if n != -1:
raise NotImplementedError(
"Currently `n` other than -1 is unsupported."
)
return (a, s, n, axis)
| # -*- coding: utf-8 -*-
import collections
import itertools
import numbers
import numpy
import dask.array
from dask_ndfourier import _compat
try:
from itertools import imap
except ImportError:
imap = map
try:
irange = xrange
except NameError:
irange = range
def _get_freq_grid(shape, chunks, dtype=float):
assert len(shape) == len(chunks)
shape = tuple(shape)
dtype = numpy.dtype(dtype).type
assert (issubclass(dtype, numbers.Real) and
not issubclass(dtype, numbers.Integral))
ndim = len(shape)
freq_grid = []
for i in irange(ndim):
sl = ndim * [None]
sl[i] = slice(None)
sl = tuple(sl)
freq_grid_i = _compat._fftfreq(shape[i],
chunks=chunks[i]).astype(dtype)[sl]
for j in itertools.chain(range(i), range(i + 1, ndim)):
freq_grid_i = freq_grid_i.repeat(shape[j], axis=j)
freq_grid.append(freq_grid_i)
freq_grid = dask.array.stack(freq_grid)
return freq_grid
def _get_ang_freq_grid(shape, chunks):
freq_grid = _get_freq_grid(shape, chunks)
ang_freq_grid = 2 * numpy.pi * freq_grid
return ang_freq_grid
def _norm_args(a, s, n=-1, axis=-1):
if issubclass(a.dtype.type, numbers.Integral):
a = a.astype(float)
if isinstance(s, numbers.Number):
s = numpy.array(a.ndim * [s])
elif not isinstance(s, dask.array.Array):
s = numpy.array(s)
if not issubclass(s.dtype.type, numbers.Real):
raise TypeError("The `s` must contain real value(s).")
if s.shape != (a.ndim,):
raise RuntimeError(
"Shape of `s` must be 1-D and equal to the input's rank."
)
if n != -1:
raise NotImplementedError(
"Currently `n` other than -1 is unsupported."
)
return (a, s, n, axis)
| bsd-3-clause | Python |
6d0c99026f8382822d49ad636b06f884f96351eb | Set version to 0.19.2 final | emory-libraries/eulexistdb,emory-libraries/eulexistdb,emory-libraries/eulexistdb | eulexistdb/__init__.py | eulexistdb/__init__.py | # file eulexistdb/__init__.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with `eXist-db`_ XML databases.
This package provides classes to ease interaction with eXist XML databases.
It contains the following modules:
* :mod:`eulexistdb.db` -- Connect to the database and query
* :mod:`eulexistdb.query` -- Query :class:`~eulxml.xmlmap.XmlObject`
models from eXist with semantics like a Django_ QuerySet
.. _eXist-db: http://exist.sourceforge.net/
.. _Django: http://www.djangoproject.com/
"""
__version_info__ = (0, 19, 2, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| # file eulexistdb/__init__.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with `eXist-db`_ XML databases.
This package provides classes to ease interaction with eXist XML databases.
It contains the following modules:
* :mod:`eulexistdb.db` -- Connect to the database and query
* :mod:`eulexistdb.query` -- Query :class:`~eulxml.xmlmap.XmlObject`
models from eXist with semantics like a Django_ QuerySet
.. _eXist-db: http://exist.sourceforge.net/
.. _Django: http://www.djangoproject.com/
"""
__version_info__ = (0, 20, 0, 'dev')
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| apache-2.0 | Python |
e125fc909e569a6612bf2f967fffd86ce18f7e08 | Update example url | willmcgugan/rich | examples/downloader.py | examples/downloader.py | """
A rudimentary URL downloader (like wget or curl) to demonstrate Rich progress bars.
"""
import os.path
import sys
from concurrent.futures import as_completed, ThreadPoolExecutor
import signal
from functools import partial
from threading import Event
from typing import Iterable
from urllib.request import urlopen
from rich.progress import (
BarColumn,
DownloadColumn,
Progress,
TaskID,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
progress = Progress(
TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
DownloadColumn(),
"•",
TransferSpeedColumn(),
"•",
TimeRemainingColumn(),
)
done_event = Event()
def handle_sigint(signum, frame):
done_event.set()
signal.signal(signal.SIGINT, handle_sigint)
def copy_url(task_id: TaskID, url: str, path: str) -> None:
"""Copy data from a url to a local file."""
progress.console.log(f"Requesting {url}")
response = urlopen(url)
# This will break if the response doesn't contain content length
progress.update(task_id, total=int(response.info()["Content-length"]))
with open(path, "wb") as dest_file:
progress.start_task(task_id)
for data in iter(partial(response.read, 32768), b""):
dest_file.write(data)
progress.update(task_id, advance=len(data))
if done_event.is_set():
return
progress.console.log(f"Downloaded {path}")
def download(urls: Iterable[str], dest_dir: str):
"""Download multuple files to the given directory."""
with progress:
with ThreadPoolExecutor(max_workers=4) as pool:
for url in urls:
filename = url.split("/")[-1]
dest_path = os.path.join(dest_dir, filename)
task_id = progress.add_task("download", filename=filename, start=False)
pool.submit(copy_url, task_id, url, dest_path)
if __name__ == "__main__":
# Try with https://releases.ubuntu.com/20.04/ubuntu-20.04.3-desktop-amd64.iso
if sys.argv[1:]:
download(sys.argv[1:], "./")
else:
print("Usage:\n\tpython downloader.py URL1 URL2 URL3 (etc)")
| """
A rudimentary URL downloader (like wget or curl) to demonstrate Rich progress bars.
"""
import os.path
import sys
from concurrent.futures import as_completed, ThreadPoolExecutor
import signal
from functools import partial
from threading import Event
from typing import Iterable
from urllib.request import urlopen
from rich.progress import (
BarColumn,
DownloadColumn,
Progress,
TaskID,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
progress = Progress(
TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
DownloadColumn(),
"•",
TransferSpeedColumn(),
"•",
TimeRemainingColumn(),
)
done_event = Event()
def handle_sigint(signum, frame):
done_event.set()
signal.signal(signal.SIGINT, handle_sigint)
def copy_url(task_id: TaskID, url: str, path: str) -> None:
"""Copy data from a url to a local file."""
progress.console.log(f"Requesting {url}")
response = urlopen(url)
# This will break if the response doesn't contain content length
progress.update(task_id, total=int(response.info()["Content-length"]))
with open(path, "wb") as dest_file:
progress.start_task(task_id)
for data in iter(partial(response.read, 32768), b""):
dest_file.write(data)
progress.update(task_id, advance=len(data))
if done_event.is_set():
return
progress.console.log(f"Downloaded {path}")
def download(urls: Iterable[str], dest_dir: str):
"""Download multuple files to the given directory."""
with progress:
with ThreadPoolExecutor(max_workers=4) as pool:
for url in urls:
filename = url.split("/")[-1]
dest_path = os.path.join(dest_dir, filename)
task_id = progress.add_task("download", filename=filename, start=False)
pool.submit(copy_url, task_id, url, dest_path)
if __name__ == "__main__":
# Try with https://releases.ubuntu.com/20.04/ubuntu-20.04.2.0-desktop-amd64.iso
if sys.argv[1:]:
download(sys.argv[1:], "./")
else:
print("Usage:\n\tpython downloader.py URL1 URL2 URL3 (etc)")
| mit | Python |
d21da3716e34576c02cc296cac4844c525fc84a5 | Fix typo | jodal/pyspotify,felix1m/pyspotify,mopidy/pyspotify,jodal/pyspotify,kotamat/pyspotify,jodal/pyspotify,mopidy/pyspotify,kotamat/pyspotify,felix1m/pyspotify,felix1m/pyspotify,kotamat/pyspotify | examples/play_track.py | examples/play_track.py | #!/usr/bin/env python
"""
This is an example of playing music from Spotify using pyspotify.
The example use the :class:`spotify.sink.AlsaSink`, and will thus only work on
systems with an ALSA sound subsystem, which means most Linux systems.
You can either run this file directly without arguments to play a default
track::
python play_track.py
Or, give the script a Spotify track URI to play::
python play_track.py spotify:track:3iFjScPoAC21CT5cbAFZ7b
"""
from __future__ import unicode_literals
import sys
import threading
import spotify
import spotify.sink
if sys.argv[1:]:
track_uri = sys.argv[1]
else:
track_uri = 'spotify:track:6xZtSE6xaBxmRozKA0F6TA'
# Assuming a spotify_appkey.key in the current dir
session = spotify.Session()
# Process events in the background
loop = spotify.EventLoop(session)
loop.start()
# Connect an audio sink
audio = spotify.sink.AlsaSink(session)
# Events for coordination
logged_in = threading.Event()
end_of_track = threading.Event()
def on_logged_in(session, error_type):
assert error_type == spotify.ErrorType.OK, 'Login failed'
logged_in.set()
def on_end_of_track(self):
end_of_track.set()
# Register event listeners
session.on(spotify.SessionEvent.LOGGED_IN, on_logged_in)
session.on(spotify.SessionEvent.END_OF_TRACK, on_end_of_track)
# Assuming a previous login with remember_me=True and a proper logout
session.relogin()
logged_in.wait()
# TODO Get rid of this sleep
import time
time.sleep(1)
# Play a track
track = session.get_track(track_uri).load()
session.player.load(track)
session.player.play()
# Wait for playback to complete or Ctrl+C
try:
while not end_of_track.wait(0.1):
pass
except KeyboardInterrupt:
pass
| #!/usr/bin/env python
"""
This is an example of playing music from Spotify using pyspotify.
The example use the :class:`spotify.sink.AlsaSink`, and will thus only work on
systems with an ALSA sound subsystem, which means most Linux systems.
You can either run this file directly without arguments to play a default
track::
python play_track.py
Or, give the script a Spotify track URI to play::
python play_track.py spotify:track:3iFjScPoAC21CT5cbAFZ7b
"""
from __future__ import unicode_literals
import sys
import threading
import spotify
import spotify.sink
if sys.argv[1:]:
track_uri = sys.argv[1]
else:
track_uri = 'spotify:track:6xZtSE6xaBxmRozKA0F6TA'
# Assuming a spotify_appkey.key in the current dir
session = spotify.Session()
# Process events in the background
loop = spotify.EventLoop(session)
loop.start()
# Connect an audio sink
audio = spotify.sink.AlsaSink(session)
# EVents for coordination
logged_in = threading.Event()
end_of_track = threading.Event()
def on_logged_in(session, error_type):
assert error_type == spotify.ErrorType.OK, 'Login failed'
logged_in.set()
def on_end_of_track(self):
end_of_track.set()
# Register event listeners
session.on(spotify.SessionEvent.LOGGED_IN, on_logged_in)
session.on(spotify.SessionEvent.END_OF_TRACK, on_end_of_track)
# Assuming a previous login with remember_me=True and a proper logout
session.relogin()
logged_in.wait()
# TODO Get rid of this sleep
import time
time.sleep(1)
# Play a track
track = session.get_track(track_uri).load()
session.player.load(track)
session.player.play()
# Wait for playback to complete or Ctrl+C
try:
while not end_of_track.wait(0.1):
pass
except KeyboardInterrupt:
pass
| apache-2.0 | Python |
ee9532a9b1baa39fa8e674ff3b88b0847177c53a | Make simExample importable. | Tech-XCorp/ultracold-ions,hosseinsadeghi/ultracold-ions,hosseinsadeghi/ultracold-ions,Tech-XCorp/ultracold-ions | examples/simExample.py | examples/simExample.py | import uci.Sim as Sim
import uci.TrapConfiguration as TrapConfig
import numpy as np
def run_simulation():
t = TrapConfig.TrapConfiguration()
t.Bz = 4.5
#V0 in V/m^2
t.kz = 2.0 * 1.167e6
delta = 0.010
t.kx = -(0.5 + delta) * t.kz
t.ky = -(0.5 - delta) * t.kz
t.theta = 0
t.omega = 2.0 * np.pi * 43.0e3
fundcharge = 1.602176565e-19
ionMass = 8.9465 * 1.673e-27
s = Sim.Sim()
s.ptcls.set_nptcls(300)
s.ptcls.sigma = 2.0e-4
s.ptcls.init_ptcls(charge = fundcharge, mass = ionMass)
axialFrictionCoeff = 1.0e7
angularFrictionCoeff = 1.0e7
s.init_sim(t, axialFrictionCoeff, angularFrictionCoeff,
recoilVelocity = 0.01, scatterRate = 1.0e6)
s.updater.peakCoolingRate = 1.0e4
s.updater.peakDiffusionConstant = 0.1 * np.sqrt(1.0e4)
s.updater.width = 30.0e-6
s.updater.offset = 0.0
s.spin_up()
def main(argv=None):
try:
run_simulation()
except Exception as msg:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| import uci.Sim as Sim
import uci.TrapConfiguration as TrapConfig
import numpy as np
t = TrapConfig.TrapConfiguration()
t.Bz = 4.5
#V0 in V/m^2
t.kz = 2.0 * 1.167e6
delta = 0.010
t.kx = -(0.5 + delta) * t.kz
t.ky = -(0.5 - delta) * t.kz
t.theta = 0
t.omega = 2.0 * np.pi * 43.0e3
fundcharge = 1.602176565e-19
ionMass = 8.9465 * 1.673e-27
s = Sim.Sim()
s.ptcls.set_nptcls(300)
s.ptcls.sigma = 2.0e-4
s.ptcls.init_ptcls(charge = fundcharge, mass = ionMass)
axialFrictionCoeff = 1.0e7
angularFrictionCoeff = 1.0e7
s.init_sim(t, axialFrictionCoeff, angularFrictionCoeff,
recoilVelocity = 0.01, scatterRate = 1.0e6)
s.updater.peakCoolingRate = 1.0e4
s.updater.peakDiffusionConstant = 0.1 * np.sqrt(1.0e4)
s.updater.width = 30.0e-6
s.updater.offset = 0.0
s.spin_up()
| mit | Python |
65266813c6438eeb67002e17dd4cd70a00e84b5d | Clean the code with autopen8 | daweiwu/meta-iotqa-1,ostroproject/meta-iotqa,ostroproject/meta-iotqa,ostroproject/meta-iotqa,daweiwu/meta-iotqa-1,wanghongjuan/meta-iotqa-1,wanghongjuan/meta-iotqa-1,ostroproject/meta-iotqa,daweiwu/meta-iotqa-1,daweiwu/meta-iotqa-1,wanghongjuan/meta-iotqa-1,wanghongjuan/meta-iotqa-1,ostroproject/meta-iotqa,daweiwu/meta-iotqa-1,wanghongjuan/meta-iotqa-1 | meta-iotqa/lib/oeqa/runtime/boottime.py | meta-iotqa/lib/oeqa/runtime/boottime.py | #[PROTEXCAT]
#\License: ALL RIGHTS RESERVED
"""System boot time"""
import os
from oeqa.oetest import oeRuntimeTest
from oeqa.runtime.helper import collect_pnp_log
class BootTimeTest(oeRuntimeTest):
def _setup(self):
(status, output) = self.target.copy_to(
os.path.join(os.path.dirname(__file__), 'files',
'systemd-analyze'), "/tmp/systemd-analyze")
self.assertEqual(
status,
0,
msg="systemd-analyze could not be copied. Output: %s" %
output)
(status, output) = self.target.run(" ls -la /tmp/systemd-analyze")
self.assertEqual(
status,
0,
msg="Failed to find systemd-analyze command")
def test_boot_time(self):
self._setup()
filename = os.path.basename(__file__)
casename = os.path.splitext(filename)[0]
(status, output) = self.target.run("/tmp/systemd-analyze time"
" | awk -F '=' '{print $2}'")
collect_pnp_log(casename, output)
print "\n%s:%s\n" % (casename, output)
self.assertEqual(status, 0, output)
| #[PROTEXCAT]
#\License: ALL RIGHTS RESERVED
"""System boot time"""
import os
from oeqa.oetest import oeRuntimeTest
from oeqa.runtime.helper import collect_pnp_log
class BootTimeTest(oeRuntimeTest):
def _setup(self):
(status,output) = self.target.copy_to(os.path.join(os.path.dirname(__file__), 'files','systemd-analyze'),"/tmp/systemd-analyze")
self.assertEqual(status, 0, msg="systemd-analyze could not be copied. Output: %s" % output)
(status,output) = self.target.run(" ls -la /tmp/systemd-analyze")
self.assertEqual(status, 0, msg="Failed to find systemd-analyze command")
def test_boot_time(self):
self._setup()
filename=os.path.basename(__file__)
casename=os.path.splitext(filename)[0]
(status,output) = self.target.run("/tmp/systemd-analyze time | awk -F '=' '{print $2}'")
collect_pnp_log(casename, output)
print "\n%s:%s\n" %(casename, output)
self.assertEqual(status, 0, output)
| mit | Python |
d11596b7511cdc9e6164f46436fdf66661edff3a | change way of columns' redefining | CSchool/SchoolSite,CSchool/SchoolSite,CSchool/SchoolSite,CSchool/SchoolSite | CSchoolSite/userprofile/tables.py | CSchoolSite/userprofile/tables.py | from django_datatables_view.base_datatable_view import BaseDatatableView
from django.utils.translation import ugettext_lazy as _
from userprofile.models import Relationship, User
class PossibleRelativesTable(BaseDatatableView):
max_display_length = 50
# yep, we need to redefine existed column for custom data (even for buttons!)
columns = ['username', 'first_name', 'birthday']
order_columns = ['username', 'first_name', 'birthday']
# get data
def get_initial_queryset(self):
user = self.request.user
# check is user parent or child
excluded_id_list = []
if user.groups.filter(name=_('Parents')).exists():
excluded_id_list = Relationship.objects.filter(relative=user.id).values('child')
elif user.groups.filter(name=_('Students')).exists():
excluded_id_list = Relationship.objects.filter(child=user.id).values('relative')
excluded_id_list.append(user.id)
# exclude id from users
return User.objects.exclude(id__in=excluded_id_list)
def render_column(self, row, column):
if column == 'first_name':
return '{}'.format(row.get_initials())
else:
return super(PossibleRelativesTable, self).render_column(row, column)
| from django_datatables_view.base_datatable_view import BaseDatatableView
from django.utils.translation import ugettext_lazy as _
from userprofile.models import Relationship, User
class PossibleRelativesTable(BaseDatatableView):
max_display_length = 50
# yep, we need to redefine existed column for custom data (even for buttons!)
columns = ['username', 'first_name', 'birthday']
order_columns = ['username', 'first_name', 'birthday']
# get data
def get_initial_queryset(self):
user = self.request.user
# check is user parent or child
excluded_id_list = []
if user.groups.filter(name=_('Parents')).exists():
excluded_id_list = Relationship.objects.filter(relative=user.id).values('child')
elif user.groups.filter(name=_('Students')).exists():
excluded_id_list = Relationship.objects.filter(child=user.id).values('relative')
excluded_id_list.append(user.id)
# exclude id from users
return User.objects.exclude(id__in=excluded_id_list)
# change some columns (here redefinition comes to play)
def prepare_results(self, qs):
json_data = []
for item in qs:
full_name = '{} {} {}'.format(item.last_name, item.first_name, item.patronymic).strip()
json_data.append([item.username, full_name, item.birthday])
return json_data
| apache-2.0 | Python |
cc2685f4242fe1e1c30f82b5319edc62c5acaebb | Bump version | markstory/lint-review,markstory/lint-review,markstory/lint-review | lintreview/__init__.py | lintreview/__init__.py | __version__ = '2.2.0'
| __version__ = '2.1.2'
| mit | Python |
0aeb3aad2cd4d3f13f4e99b888fa12f9d6dbaedb | Bump version | markstory/lint-review,markstory/lint-review,markstory/lint-review | lintreview/__init__.py | lintreview/__init__.py | __version__ = '2.34.10'
| __version__ = '2.34.9'
| mit | Python |
1906c2dca105eb7ba9c69689a37c9884683d67e8 | Add License | dpnishant/appmon,dpnishant/appmon,dpnishant/appmon,dpnishant/appmon | database/__init__.py | database/__init__.py | ###
# Copyright (c) 2016 eBay Software Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import dataset, json
from xml.sax.saxutils import escape
def save_to_database(db_path, str_json):
str_json = json.loads(str_json)
db = dataset.connect('sqlite:///%s' % (db_path.replace("'", "_")))
table = db['api_captures']
table.insert(dict(time=str_json['time'],
operation=str_json['txnType'],
artifact=json.dumps(str_json['artifact']),
method=str_json['method'],
module=str_json['lib'],
remark=''))
def stringify(data):
str_data = ""
if type(data) == dict or type(data) == list:
return json.dumps(data)
else:
try:
str_data = str(data)
return str_data
except Exception as e:
return data
def read_from_database(db_path, index=0):
result_set = {}
parent_holder = []
db = dataset.connect('sqlite:///./app_dumps/%s.db' % (db_path))
api_captures = db.query('SELECT * FROM api_captures GROUP BY artifact')
for capture in api_captures:
child_holder = []
child_holder.append(capture['id'])
child_holder.append(capture['time'])
child_holder.append(capture['operation'])
child_holder.append(capture['method'])
child_holder.append(capture['module'])
child_holder.append(capture['remark'])
str_artifact = ''
artifacts = json.loads(capture['artifact'])
for artifact in artifacts:
str_artifact += 'Name: ' + stringify(artifact['name']) + '\n' + stringify(artifact['value']) + '\n\n' # artifact['value'], str(artifact['argSeq'])
#str_artifact = str_artifact.replace("<", "<").replace(">", ">")
str_artifact = str_artifact.replace('\n', '<br/>').replace('Name: ', '<b>Name: </b>')
#print str_artifact
child_holder.append(str_artifact)
parent_holder.append(child_holder)
result_set['data'] = parent_holder
return json.dumps(result_set)
| import dataset, json
from xml.sax.saxutils import escape
def save_to_database(db_path, str_json):
str_json = json.loads(str_json)
db = dataset.connect('sqlite:///%s' % (db_path.replace("'", "_")))
table = db['api_captures']
table.insert(dict(time=str_json['time'],
operation=str_json['txnType'],
artifact=json.dumps(str_json['artifact']),
method=str_json['method'],
module=str_json['lib'],
remark=''))
def stringify(data):
str_data = ""
if type(data) == dict or type(data) == list:
return json.dumps(data)
else:
try:
str_data = str(data)
return str_data
except Exception as e:
return data
def read_from_database(db_path, index=0):
result_set = {}
parent_holder = []
db = dataset.connect('sqlite:///./app_dumps/%s.db' % (db_path))
api_captures = db.query('SELECT * FROM api_captures GROUP BY artifact')
for capture in api_captures:
child_holder = []
child_holder.append(capture['id'])
child_holder.append(capture['time'])
child_holder.append(capture['operation'])
child_holder.append(capture['method'])
child_holder.append(capture['module'])
child_holder.append(capture['remark'])
str_artifact = ''
artifacts = json.loads(capture['artifact'])
for artifact in artifacts:
str_artifact += 'Name: ' + stringify(artifact['name']) + '\n' + stringify(artifact['value']) + '\n\n' # artifact['value'], str(artifact['argSeq'])
#str_artifact = str_artifact.replace("<", "<").replace(">", ">")
str_artifact = str_artifact.replace('\n', '<br/>').replace('Name: ', '<b>Name: </b>')
#print str_artifact
child_holder.append(str_artifact)
parent_holder.append(child_holder)
result_set['data'] = parent_holder
return json.dumps(result_set)
| apache-2.0 | Python |
305340f912bd9b05da425100135c1825c5a738fd | Prepare rel. 0.0.15 | SEMAFORInformatik/femagtools,SEMAFORInformatik/femagtools | femagtools/__init__.py | femagtools/__init__.py | # -*- coding: utf-8 -*-
"""
femagtools
~~~~~~~~~~
Python bindings for FEMAG
:copyright: 2016 Semafor Informatik & Energie AG, Basel
:license: BSD, see LICENSE for more details.
"""
__title__ = 'femagtools'
__version__ = '0.0.15'
__author__ = 'Ronald Tanner'
__license__ = 'BSD'
__copyright__ = 'Copyright 2016 SEMAFOR Informatik & Energie AG'
from .bch import Reader
from .model import MachineModel
from .fsl import Builder
from .magnet import Magnet
from .femag import Femag, ZmqFemag
def read_bchfile(filename):
"""Read BCH/BATCH results from file *filename*."""
import io
bchresults = Reader()
with io.open(filename, encoding='latin1', errors='ignore') as f:
bchresults.read(f.readlines())
return bchresults
def create_fsl(machine, operatingconditions=None, magnetmat=None):
"""create FSL command list from model parameters"""
model = MachineModel(machine)
builder = Builder()
if operatingconditions:
if magnetmat:
magnets = Magnet(magnetmat)
return builder.create(model, operatingconditions, magnets)
return builder.create(model, operatingconditions, None)
return builder.create_model(model)
| # -*- coding: utf-8 -*-
"""
femagtools
~~~~~~~~~~
Python bindings for FEMAG
:copyright: 2016 Semafor Informatik & Energie AG, Basel
:license: BSD, see LICENSE for more details.
"""
__title__ = 'femagtools'
__version__ = '0.0.14'
__author__ = 'Ronald Tanner'
__license__ = 'BSD'
__copyright__ = 'Copyright 2016 SEMAFOR Informatik & Energie AG'
from .bch import Reader
from .model import MachineModel
from .fsl import Builder
from .magnet import Magnet
from .femag import Femag, ZmqFemag
def read_bchfile(filename):
"""Read BCH/BATCH results from file *filename*."""
import io
bchresults = Reader()
with io.open(filename, encoding='latin1', errors='ignore') as f:
bchresults.read(f.readlines())
return bchresults
def create_fsl(machine, operatingconditions=None, magnetmat=None):
"""create FSL command list from model parameters"""
model = MachineModel(machine)
builder = Builder()
if operatingconditions:
if magnetmat:
magnets = Magnet(magnetmat)
return builder.create(model, operatingconditions, magnets)
return builder.create(model, operatingconditions, None)
return builder.create_model(model)
| bsd-2-clause | Python |
4d396ccd8364b6635c54be0ef747f019a1d71af6 | Remove unnecessary code from datasift/__init__.py | datasift/datasift-python | datasift/__init__.py | datasift/__init__.py | # -*- coding: utf-8 -*-
"""
The official DataSift API library for Python. This module provides access to
the REST API and also facilitates consuming streams.
Requires Python 2.4+.
To use, 'import datasift' and create a datasift.User object passing in your
username and API key. See the examples folder for reference usage.
Source Code:
https://github.com/datasift/datasift-python
Examples:
https://github.com/datasift/datasift-python/tree/master/examples
DataSift Platform Documentation:
http://dev.datasift.com/docs/
Copyright (C) 2012 MediaSift Ltd. All Rights Reserved.
This software is Open Source. Read the license:
https://github.com/datasift/datasift-python/blob/master/LICENSE
"""
__author__ = "Courtney Robinson <courtney.robinson@datasift.com>"
__status__ = "beta"
__version__ = "1.0.0"
__date__ = "1st Nov 2013"
#-----------------------------------------------------------------------------
# Module constants
#-----------------------------------------------------------------------------
USER_AGENT = 'DataSift Python/%s' % __version__
WEBSOCKET_HOST = 'websocket.datasift.com'
API_HOST = 'api.datasift.com/'
from client import Client as DataSiftClient
from config import Config as DataSiftConfig
| # -*- coding: utf-8 -*-
"""
The official DataSift API library for Python. This module provides access to
the REST API and also facilitates consuming streams.
Requires Python 2.4+.
To use, 'import datasift' and create a datasift.User object passing in your
username and API key. See the examples folder for reference usage.
Source Code:
https://github.com/datasift/datasift-python
Examples:
https://github.com/datasift/datasift-python/tree/master/examples
DataSift Platform Documentation:
http://dev.datasift.com/docs/
Copyright (C) 2012 MediaSift Ltd. All Rights Reserved.
This software is Open Source. Read the license:
https://github.com/datasift/datasift-python/blob/master/LICENSE
"""
import sys
import os
__author__ = "Courtney Robinson <courtney.robinson@datasift.com>"
__status__ = "beta"
__version__ = "1.0.0"
__date__ = "1st Nov 2013"
#-----------------------------------------------------------------------------
# Add this folder to the system path.
#-----------------------------------------------------------------------------
sys.path[0:0] = [os.path.dirname(__file__), ]
#-----------------------------------------------------------------------------
# Module constants
#-----------------------------------------------------------------------------
USER_AGENT = 'DataSift Python/%s' % __version__
WEBSOCKET_HOST = 'websocket.datasift.com'
API_HOST = 'api.datasift.com/'
#-----------------------------------------------------------------------------
# Check for SSL support.
#-----------------------------------------------------------------------------
try:
import ssl
SSL_AVAILABLE = True
except ImportError:
SSL_AVAILABLE = False
from client import Client as DataSiftClient
from config import Config as DataSiftConfig
| mit | Python |
47504100712fb29b5380fcaaf2647fb85443c348 | Implement caching | Ecotrust/forestplanner,Ecotrust/forestplanner,Ecotrust/forestplanner,Ecotrust/forestplanner,Ecotrust/forestplanner,Ecotrust/forestplanner,Ecotrust/forestplanner,Ecotrust/forestplanner | lot/landmapper/urls.py | lot/landmapper/urls.py | from django.urls import include, re_path, path
from django.views.decorators.cache import cache_page
from landmapper.views import *
urlpatterns = [
# What is difference between re_path and path?
# re_path(r'',
# home, name='landmapper-home'),
path('', home, name="home"),
path('identify/', identify, name="identify"),
# path('/report/', report, name="report"),
path('create_property_id/', create_property_id, name='create_property_id'),
path('report/<str:property_id>', cache_page(60 * 60 * 24 * 7)(report), name='report'),
path('get_taxlot_json/', get_taxlot_json, name='get taxlot json'),
]
| from django.urls import include, re_path, path
from landmapper.views import *
urlpatterns = [
# What is difference between re_path and path?
# re_path(r'',
# home, name='landmapper-home'),
path('', home, name="home"),
path('identify/', identify, name="identify"),
# path('/report/', report, name="report"),
path('create_property_id/', create_property_id, name='create_property_id'),
path('report/<str:property_id>', report, name='report'),
path('get_taxlot_json/', get_taxlot_json, name='get taxlot json'),
]
| bsd-3-clause | Python |
de877d7dc98bc6cb227dbdea51df9a95281f10df | Add version to init | e-koch/FilFinder | fil_finder/__init__.py | fil_finder/__init__.py | # Licensed under an MIT open source license - see LICENSE
__version__ = "1.2.2"
from cores import *
from length import *
from pixel_ident import *
from utilities import *
from width import *
from analysis import Analysis
from filfind_class import fil_finder_2D
| # Licensed under an MIT open source license - see LICENSE
from cores import *
from length import *
from pixel_ident import *
from utilities import *
from width import *
from analysis import Analysis
from filfind_class import fil_finder_2D
| mit | Python |
4d4448e23957cc537a9e2d5c2013e4b19b24f836 | Fix imports for py34 | globocom/dbaas-zabbix,globocom/dbaas-zabbix | dbaas_zabbix/__init__.py | dbaas_zabbix/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from dbaas_zabbix.dbaas_api import DatabaseAsAServiceApi
from dbaas_zabbix.provider_factory import ProviderFactory
def factory_for(**kwargs):
databaseinfra = kwargs['databaseinfra']
del kwargs['databaseinfra']
dbaas_api = DatabaseAsAServiceApi(databaseinfra)
return ProviderFactory(dbaas_api, **kwargs)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from dbaas_api import DatabaseAsAServiceApi
from provider_factory import ProviderFactory
def factory_for(**kwargs):
databaseinfra = kwargs['databaseinfra']
del kwargs['databaseinfra']
dbaas_api = DatabaseAsAServiceApi(databaseinfra)
return ProviderFactory(dbaas_api, **kwargs)
| bsd-3-clause | Python |
6fe4f4bc8b4b564e8111aba162c00aaf7a4fc057 | Make is_local=False | Yancey1989/cloud,gongweibao/cloud,PaddlePaddle/cloud,gongweibao/cloud,PaddlePaddle/cloud,gongweibao/cloud,Yancey1989/cloud,gongweibao/cloud,PaddlePaddle/cloud,PaddlePaddle/cloud,Yancey1989/cloud,Yancey1989/cloud,PaddlePaddle/cloud,gongweibao/cloud,Yancey1989/cloud | demo/fit_a_line/train.py | demo/fit_a_line/train.py | import paddle.v2 as paddle
import pcloud.dataset.uci_housing as uci_housing
def main():
# init
paddle.init()
# network config
x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13))
y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear())
y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1))
cost = paddle.layer.mse_cost(input=y_predict, label=y)
# create parameters
parameters = paddle.parameters.create(cost)
# create optimizer
optimizer = paddle.optimizer.Momentum(momentum=0)
trainer = paddle.trainer.SGD(
cost=cost, parameters=parameters, update_equation=optimizer, is_local=False)
feeding = {'x': 0, 'y': 1}
# event_handler to print training and testing info
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
print "Pass %d, Batch %d, Cost %f" % (
event.pass_id, event.batch_id, event.cost)
if isinstance(event, paddle.event.EndPass):
result = trainer.test(
reader=paddle.batch(uci_housing.test(), batch_size=2),
feeding=feeding)
print "Test %d, Cost %f" % (event.pass_id, result.cost)
# training
trainer.train(
reader=paddle.batch(
paddle.reader.shuffle(uci_housing.train(), buf_size=500),
batch_size=2),
feeding=feeding,
event_handler=event_handler,
num_passes=30)
if __name__ == '__main__':
main()
| import paddle.v2 as paddle
import pcloud.dataset.uci_housing as uci_housing
def main():
# init
paddle.init()
# network config
x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13))
y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear())
y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1))
cost = paddle.layer.mse_cost(input=y_predict, label=y)
# create parameters
parameters = paddle.parameters.create(cost)
# create optimizer
optimizer = paddle.optimizer.Momentum(momentum=0)
trainer = paddle.trainer.SGD(
cost=cost, parameters=parameters, update_equation=optimizer)
feeding = {'x': 0, 'y': 1}
# event_handler to print training and testing info
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
print "Pass %d, Batch %d, Cost %f" % (
event.pass_id, event.batch_id, event.cost)
if isinstance(event, paddle.event.EndPass):
result = trainer.test(
reader=paddle.batch(uci_housing.test(), batch_size=2),
feeding=feeding)
print "Test %d, Cost %f" % (event.pass_id, result.cost)
# training
trainer.train(
reader=paddle.batch(
paddle.reader.shuffle(uci_housing.train(), buf_size=500),
batch_size=2),
feeding=feeding,
event_handler=event_handler,
num_passes=30)
if __name__ == '__main__':
main()
| apache-2.0 | Python |
860e16f506d0a601540847fe21e617d8f7fbf882 | Add pickle convert to json method | John-Lin/malware,John-Lin/malware | malware/pickle_tool.py | malware/pickle_tool.py | import os
import cPickle as pickle
import simplejson as json
def update_pickle(new_cache):
# print('Updated pickle')
pickle.dump(new_cache, open('url_cache.pkl', 'wb'), 2)
def check_pickle():
# print('Checking in pickle')
if not os.path.isfile('url_cache.pkl'):
malicious_url = {}
pickle.dump(malicious_url, open('url_cache.pkl', 'wb'), 2)
cache = pickle.load(open('url_cache.pkl', 'rb'))
return cache
def update_json(data):
with open("url_cache.json", "wb") as fp:
try:
json.dump(data, fp)
except:
print "UnicodeDecodeError: 'utf8' codec can't decode byte 0xb8"
def check_json():
if not os.path.isfile('url_cache.json'):
init = {}
with open("url_cache.json", "wb") as fp:
json.dump(init, fp)
with open("url_cache.json", "rb") as fp:
# cache = json.load(fp)
cache = json.load(fp, "ISO-8859-1")
return cache
def pickle2json(pkl):
if os.path.isfile(pkl):
cache = pickle.load(open(pkl, 'rb'))
with open('pkl2json.json', 'wb') as fp:
json.dump(cache, fp)
else:
print "No such file"
| import os
import cPickle as pickle
import simplejson as json
def update_pickle(new_cache):
# print('Updated pickle')
pickle.dump(new_cache, open('url_cache.pkl', 'wb'), 2)
def check_pickle():
# print('Checking in pickle')
if not os.path.isfile('url_cache.pkl'):
malicious_url = {}
pickle.dump(malicious_url, open('url_cache.pkl', 'wb'), 2)
cache = pickle.load(open('url_cache.pkl', 'rb'))
return cache
def update_json(data):
with open("url_cache.json", "wb") as fp:
try:
json.dump(data, fp)
except:
print "UnicodeDecodeError: 'utf8' codec can't decode byte 0xb8"
def check_json():
if not os.path.isfile('url_cache.json'):
init = {}
with open("url_cache.json", "wb") as fp:
json.dump(init, fp)
with open("url_cache.json", "rb") as fp:
# cache = json.load(fp)
cache = json.load(fp, "ISO-8859-1")
return cache
| apache-2.0 | Python |
de6fea0ead5a8d2c7ffe65e1bb07249ad6823c69 | bump cifparser version to 0.0.2 | msfrank/cifparser | cifparser/version.py | cifparser/version.py | # Copyright 2015 Michael Frank <msfrank@syntaxjockey.com>
#
# This file is part of cifparser. cifparser is BSD-licensed software;
# for copyright information see the LICENSE file.
__version__ = (0, 0, 2)
def versionstring():
"""
Return the version number as a string.
"""
return "%i.%i.%i" % __version__
| # Copyright 2015 Michael Frank <msfrank@syntaxjockey.com>
#
# This file is part of cifparser. cifparser is BSD-licensed software;
# for copyright information see the LICENSE file.
__version__ = (0, 0, 1)
def versionstring():
"""
Return the version number as a string.
"""
return "%i.%i.%i" % __version__
| bsd-2-clause | Python |
11cf5c092cb71adb5713c414c585afca3586ff1a | Bump version - v0.2.0 | dealertrack/flake8-diff,miki725/flake8-diff | flake8diff/__init__.py | flake8diff/__init__.py | __version__ = '0.2.0'
__all__ = ['__version__']
| __version__ = '0.1.2'
__all__ = ['__version__']
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.