commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
15dfc6d1da9d8af2ed9a6f519744fab11a68367e | Fix `xml_sax_parser.py` good/bad naming | github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql | python/ql/test/experimental/query-tests/Security/CWE-611/xml_sax_make_parser.py | python/ql/test/experimental/query-tests/Security/CWE-611/xml_sax_make_parser.py | from flask import request, Flask
from io import StringIO
import xml.sax
# xml_content = '<?xml version="1.0"?><!DOCTYPE dt [<!ENTITY xxe SYSTEM "file:///etc/passwd">]><test>&xxe;</test>'
app = Flask(__name__)
class MainHandler(xml.sax.ContentHandler):
def __init__(self):
self._result = []
def characters(self, data):
self._result.append(data)
def parse(self, f):
xml.sax.parse(f, self)
return self._result
# GOOD
@app.route("/MainHandler")
def mainHandler():
xml_content = request.args['xml_content']
return MainHandler().parse(StringIO(xml_content))
@app.route("/xml.sax.make_parser()+MainHandler")
def xml_makeparser_MainHandler():
xml_content = request.args['xml_content']
GoodHandler = MainHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(GoodHandler)
parser.parse(StringIO(xml_content))
return GoodHandler._result
@app.route("/xml.sax.make_parser()+MainHandler-xml.sax.handler.feature_external_ges_False")
def xml_makeparser_MainHandler_entitiesFalse():
xml_content = request.args['xml_content']
GoodHandler = MainHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(GoodHandler)
# https://docs.python.org/3/library/xml.sax.handler.html#xml.sax.handler.feature_external_ges
parser.setFeature(xml.sax.handler.feature_external_ges, False)
parser.parse(StringIO(xml_content))
return GoodHandler._result
# BAD
@app.route("/xml.sax.make_parser()+MainHandler-xml.sax.handler.feature_external_ges_True")
def xml_makeparser_MainHandler_entitiesTrue():
xml_content = request.args['xml_content']
BadHandler = MainHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(BadHandler)
parser.setFeature(xml.sax.handler.feature_external_ges, True)
parser.parse(StringIO(xml_content))
return BadHandler._result
@app.route("/xml.sax.make_parser()+xml.dom.minidom.parse-xml.sax.handler.feature_external_ges_True")
def xml_makeparser_minidom_entitiesTrue():
xml_content = request.args['xml_content']
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_external_ges, True)
return xml.dom.minidom.parse(StringIO(xml_content), parser=parser).documentElement.childNodes
| from flask import request, Flask
from io import StringIO
import xml.sax
# xml_content = '<?xml version="1.0"?><!DOCTYPE dt [<!ENTITY xxe SYSTEM "file:///etc/passwd">]><test>&xxe;</test>'
app = Flask(__name__)
class MainHandler(xml.sax.ContentHandler):
def __init__(self):
self._result = []
def characters(self, data):
self._result.append(data)
def parse(self, f):
xml.sax.parse(f, self)
return self._result
# GOOD
@app.route("/MainHandler")
def mainHandler():
xml_content = request.args['xml_content']
return MainHandler().parse(StringIO(xml_content))
@app.route("/xml.sax.make_parser()+MainHandler")
def xml_makeparser_MainHandler():
xml_content = request.args['xml_content']
BadHandler = MainHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(BadHandler)
parser.parse(StringIO(xml_content))
return BadHandler._result
@app.route("/xml.sax.make_parser()+MainHandler-xml.sax.handler.feature_external_ges_False")
def xml_makeparser_MainHandler_entitiesFalse():
xml_content = request.args['xml_content']
BadHandler = MainHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(BadHandler)
# https://docs.python.org/3/library/xml.sax.handler.html#xml.sax.handler.feature_external_ges
parser.setFeature(xml.sax.handler.feature_external_ges, False)
parser.parse(StringIO(xml_content))
return BadHandler._result
# BAD
@app.route("/xml.sax.make_parser()+MainHandler-xml.sax.handler.feature_external_ges_True")
def xml_makeparser_MainHandler_entitiesTrue():
xml_content = request.args['xml_content']
GoodHandler = MainHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(GoodHandler)
parser.setFeature(xml.sax.handler.feature_external_ges, True)
parser.parse(StringIO(xml_content))
return GoodHandler._result
@app.route("/xml.sax.make_parser()+xml.dom.minidom.parse-xml.sax.handler.feature_external_ges_True")
def xml_makeparser_minidom_entitiesTrue():
xml_content = request.args['xml_content']
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_external_ges, True)
return xml.dom.minidom.parse(StringIO(xml_content), parser=parser).documentElement.childNodes
| mit | Python |
fdd66e3f5859e8cbec0c6bb7d3d337ea5c30845d | remove unused import statements | arjunkrishnababu96/jekpost | jekpost/jekpost_create.py | jekpost/jekpost_create.py | #!/usr/bin/env python3
import argparse
import datetime
def generate_post_file(title, location, disqus_name=None):
title_line = "title: {}".format(title)
filename = make_filename(title, get_date_formatted(datetime.date.today()))
with open(filename, mode='x', encoding='utf-8') as actual_file:
print('---', file=actual_file)
print('layout: post', file=actual_file)
print(title_line, file=actual_file)
print('excerpt: Your excerpt goes here', file=actual_file)
print('tags: tag1, tag2', file=actual_file)
print('---', file=actual_file)
print('Your post content goes here', file=actual_file)
print("done!")
return filename
def make_filename(post_title, date_prefix):
title_formatted = post_title.replace(' ', '-')
filename = date_prefix + '-' + title_formatted + '.md'
return filename
def get_date_formatted(date):
"""
Return the date in the format: 'YEAR-MONTH-DAY'
"""
year = str(date.year)
month = str(date.month)
day = str(date.day)
single_digits = range(1, 10)
if date.month in single_digits:
month = '0' + month
if date.day in single_digits:
day = '0' + day
return year + '-' + month + '-' + day
def main():
parser = argparse.ArgumentParser()
parser.add_argument('title', help='Post title')
parser.add_argument('location', help='Destination directory')
parser.add_argument('-dq', '--disqus', help='Disqus shortname')
args = parser.parse_args()
post_title = args.title.strip() # remove whitespaces that may be at
# either ends.
print(" Disqus shortname: ", args.disqus)
print(" Post Title: ", post_title)
try:
filename = generate_post_file(post_title, args.location, args.disqus)
except FileExistsError as err:
print("\n\n", err)
except FileNotFoundError as err:
print("\n\n", err)
except NotADirectoryError as err:
print("\n\n", err)
else:
print(" New post created: ", filename)
print(" Happy blogging!")
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import argparse
import shutil
import os
import datetime
def generate_post_file(title, location, disqus_name=None):
title_line = "title: {}".format(title)
filename = make_filename(title, get_date_formatted(datetime.date.today()))
with open(filename, mode='x', encoding='utf-8') as actual_file:
print('---', file=actual_file)
print('layout: post', file=actual_file)
print(title_line, file=actual_file)
print('excerpt: Your excerpt goes here', file=actual_file)
print('tags: tag1, tag2', file=actual_file)
print('---', file=actual_file)
print('Your post content goes here', file=actual_file)
print("done!")
return filename
def make_filename(post_title, date_prefix):
title_formatted = post_title.replace(' ', '-')
filename = date_prefix + '-' + title_formatted + '.md'
return filename
def get_date_formatted(date):
"""
Return the date in the format: 'YEAR-MONTH-DAY'
"""
year = str(date.year)
month = str(date.month)
day = str(date.day)
single_digits = range(1, 10)
if date.month in single_digits:
month = '0' + month
if date.day in single_digits:
day = '0' + day
return year + '-' + month + '-' + day
def main():
parser = argparse.ArgumentParser()
parser.add_argument('title', help='Post title')
parser.add_argument('location', help='Destination directory')
parser.add_argument('-dq', '--disqus', help='Disqus shortname')
args = parser.parse_args()
post_title = args.title.strip() # remove whitespaces that may be at
# either ends.
print(" Disqus shortname: ", args.disqus)
print(" Post Title: ", post_title)
try:
filename = generate_post_file(post_title, args.location, args.disqus)
except FileExistsError as err:
print("\n\n", err)
except FileNotFoundError as err:
print("\n\n", err)
except NotADirectoryError as err:
print("\n\n", err)
else:
print(" New post created: ", filename)
print(" Happy blogging!")
if __name__ == '__main__':
main()
| mit | Python |
9e7eae0e2f3ec5e818f70a537ee740cd5375f7c3 | make cfitsio explicitly depend on curl (#12016) | iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/cfitsio/package.py | var/spack/repos/builtin/packages/cfitsio/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cfitsio(AutotoolsPackage):
"""CFITSIO is a library of C and Fortran subroutines for reading and writing
data files in FITS (Flexible Image Transport System) data format.
"""
homepage = 'http://heasarc.gsfc.nasa.gov/fitsio/'
url = 'http://heasarc.gsfc.nasa.gov/FTP/software/fitsio/c/cfitsio3450.tar.gz'
version('3.450', 'f470849bb43561d9a9b1925eeb7f7f0d')
version('3.420', '26e5c0dfb85b8d00f536e706305caa13')
version('3.410', '8a4a66fcdd816aae41768baa0b025552')
version('3.370', 'abebd2d02ba5b0503c633581e3bfa116')
variant('bzip2', default=True, description='Enable bzip2 support')
variant('shared', default=True, description='Build shared libraries')
depends_on('curl')
depends_on('bzip2', when='+bzip2')
def url_for_version(self, version):
url = 'http://heasarc.gsfc.nasa.gov/FTP/software/fitsio/c/cfitsio{0}.tar.gz'
return url.format(version.joined)
def configure_args(self):
spec = self.spec
extra_args = []
if '+bzip2' in spec:
extra_args.append('--with-bzip2=%s' % spec['bzip2'].prefix),
return extra_args
@property
def build_targets(self):
targets = ['all']
# Build shared if variant is set.
if '+shared' in self.spec:
targets += ['shared']
return targets
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cfitsio(AutotoolsPackage):
"""CFITSIO is a library of C and Fortran subroutines for reading and writing
data files in FITS (Flexible Image Transport System) data format.
"""
homepage = 'http://heasarc.gsfc.nasa.gov/fitsio/'
url = 'http://heasarc.gsfc.nasa.gov/FTP/software/fitsio/c/cfitsio3450.tar.gz'
version('3.450', 'f470849bb43561d9a9b1925eeb7f7f0d')
version('3.420', '26e5c0dfb85b8d00f536e706305caa13')
version('3.410', '8a4a66fcdd816aae41768baa0b025552')
version('3.370', 'abebd2d02ba5b0503c633581e3bfa116')
variant('bzip2', default=True, description='Enable bzip2 support')
variant('shared', default=True, description='Build shared libraries')
depends_on('bzip2', when='+bzip2')
def url_for_version(self, version):
url = 'http://heasarc.gsfc.nasa.gov/FTP/software/fitsio/c/cfitsio{0}.tar.gz'
return url.format(version.joined)
def configure_args(self):
spec = self.spec
extra_args = []
if '+bzip2' in spec:
extra_args.append('--with-bzip2=%s' % spec['bzip2'].prefix),
return extra_args
@property
def build_targets(self):
targets = ['all']
# Build shared if variant is set.
if '+shared' in self.spec:
targets += ['shared']
return targets
| lgpl-2.1 | Python |
a028ee702583deeb07b3bee38e200bd843479320 | Load default watermark from the environment | jacebrowning/memegen,jacebrowning/memegen | app/settings.py | app/settings.py | import os
from pathlib import Path
ROOT = Path(__file__).parent.parent.resolve()
# Server configuration
PORT = int(os.environ.get("PORT", 5000))
WORKERS = int(os.environ.get("WEB_CONCURRENCY", 1))
if "DOMAIN" in os.environ: # staging / production
SERVER_NAME = os.environ["DOMAIN"]
RELEASE_STAGE = "staging" if "staging" in SERVER_NAME else "production"
SCHEME = "https"
elif "HEROKU_APP_NAME" in os.environ: # review apps
SERVER_NAME = os.environ["HEROKU_APP_NAME"] + ".herokuapp.com"
RELEASE_STAGE = "review"
SCHEME = "https"
else: # localhost
SERVER_NAME = f"localhost:{PORT}"
RELEASE_STAGE = "local"
SCHEME = "http"
DEPLOYED = RELEASE_STAGE != "local" or "WEB_CONCURRENCY" in os.environ
BUGSNAG_API_KEY = os.getenv("BUGSNAG_API_KEY")
DEBUG = bool(os.environ.get("DEBUG", False))
# Fonts
FONTS_DIRECTORY = ROOT / "fonts"
FONT_THIN = FONTS_DIRECTORY / "TitilliumWeb-SemiBold.ttf"
FONT_THICK = FONTS_DIRECTORY / "TitilliumWeb-Black.ttf"
# Image rendering
IMAGES_DIRECTORY = ROOT / "images"
DEFAULT_EXT = "png"
DEFAULT_STYLE = "default"
PREVIEW_SIZE = (300, 300)
DEFAULT_SIZE = (600, 600)
MAXIMUM_PIXELS = 1920 * 1080
# Test images
TEST_IMAGES_DIRECTORY = ROOT / "app" / "tests" / "images"
TEST_IMAGES = [
(
"iw",
["tests code", "in production"],
),
(
"fry",
["a", "b"],
),
(
"fry",
["short line", "longer line of text than the short one"],
),
(
"fry",
["longer line of text than the short one", "short line"],
),
(
"sparta",
["", "this is a wide image!"],
),
(
"ski",
[
"if you try to put a bunch more text than can possibly fit on a meme",
"you're gonna have a bad time",
],
),
(
"ds",
["Push this button.", "Push that button.", "can't decide which is worse"],
),
(
"spongebob",
["You: Stop talking like that", "Me: Stop talking like that"],
),
]
# Analytics
DEFAULT_WATERMARK = os.getenv("WATERMARK_DEFAULT", "")
ALLOWED_WATERMARKS = [DEFAULT_WATERMARK] + os.getenv("WATERMARK_OPTIONS", "").split(",")
REMOTE_TRACKING_URL = os.getenv("REMOTE_TRACKING_URL")
| import os
from pathlib import Path
ROOT = Path(__file__).parent.parent.resolve()
# Server configuration
PORT = int(os.environ.get("PORT", 5000))
WORKERS = int(os.environ.get("WEB_CONCURRENCY", 1))
if "DOMAIN" in os.environ: # staging / production
SERVER_NAME = os.environ["DOMAIN"]
RELEASE_STAGE = "staging" if "staging" in SERVER_NAME else "production"
SCHEME = "https"
elif "HEROKU_APP_NAME" in os.environ: # review apps
SERVER_NAME = os.environ["HEROKU_APP_NAME"] + ".herokuapp.com"
RELEASE_STAGE = "review"
SCHEME = "https"
else: # localhost
SERVER_NAME = f"localhost:{PORT}"
RELEASE_STAGE = "local"
SCHEME = "http"
DEPLOYED = RELEASE_STAGE != "local" or "WEB_CONCURRENCY" in os.environ
BUGSNAG_API_KEY = os.getenv("BUGSNAG_API_KEY")
DEBUG = bool(os.environ.get("DEBUG", False))
# Fonts
FONTS_DIRECTORY = ROOT / "fonts"
FONT_THIN = FONTS_DIRECTORY / "TitilliumWeb-SemiBold.ttf"
FONT_THICK = FONTS_DIRECTORY / "TitilliumWeb-Black.ttf"
# Image rendering
IMAGES_DIRECTORY = ROOT / "images"
DEFAULT_EXT = "png"
DEFAULT_STYLE = "default"
PREVIEW_SIZE = (300, 300)
DEFAULT_SIZE = (600, 600)
MAXIMUM_PIXELS = 1920 * 1080
# Test images
TEST_IMAGES_DIRECTORY = ROOT / "app" / "tests" / "images"
TEST_IMAGES = [
(
"iw",
["tests code", "in production"],
),
(
"fry",
["a", "b"],
),
(
"fry",
["short line", "longer line of text than the short one"],
),
(
"fry",
["longer line of text than the short one", "short line"],
),
(
"sparta",
["", "this is a wide image!"],
),
(
"ski",
[
"if you try to put a bunch more text than can possibly fit on a meme",
"you're gonna have a bad time",
],
),
(
"ds",
["Push this button.", "Push that button.", "can't decide which is worse"],
),
(
"spongebob",
["You: Stop talking like that", "Me: Stop talking like that"],
),
]
# Analytics
DEFAULT_WATERMARK = ""
ALLOWED_WATERMARKS = os.getenv("ALLOWED_WATERMARKS", "").split(",")
REMOTE_TRACKING_URL = os.getenv("REMOTE_TRACKING_URL")
| mit | Python |
1283d02beb8f633e2c31ba29fd43c8eef45b7cdb | Bump micro version number. | laijingtao/landlab,landlab/landlab,cmshobe/landlab,RondaStrauch/landlab,landlab/landlab,landlab/landlab,decvalts/landlab,csherwood-usgs/landlab,SiccarPoint/landlab,cmshobe/landlab,RondaStrauch/landlab,SiccarPoint/landlab,ManuSchmi88/landlab,Carralex/landlab,ManuSchmi88/landlab,decvalts/landlab,csherwood-usgs/landlab,amandersillinois/landlab,Carralex/landlab,RondaStrauch/landlab,ManuSchmi88/landlab,amandersillinois/landlab,cmshobe/landlab,Carralex/landlab,laijingtao/landlab | landlab/__init__.py | landlab/__init__.py | #! /usr/bin/env python
"""
The Landlab
:Package name: TheLandlab
:Version: 0.1.0
:Release date: 2013-03-24
:Authors:
Greg Tucker,
Nicole Gasparini,
Erkan Istanbulluoglu,
Daniel Hobley,
Sai Nudurupati,
Jordan Adams,
Eric Hutton
:URL: http://csdms.colorado.edu/trac/landlab
:License: MIT
"""
from __future__ import absolute_import
__version__ = '0.1.19'
import os
if 'DISPLAY' not in os.environ:
try:
import matplotlib
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
else:
matplotlib.use('Agg')
from .core.model_parameter_dictionary import ModelParameterDictionary
from .core.model_parameter_dictionary import (MissingKeyError,
ParameterValueError)
from .core.model_component import Component
from .framework.collections import Palette, Arena, NoProvidersError
from .framework.decorators import Implements, ImplementsOrRaise
from .framework.framework import Framework
from .field.scalar_data_fields import FieldError
from .grid import *
from .plot import *
from .testing.nosetester import LandlabTester
test = LandlabTester().test
bench = LandlabTester().bench
__all__ = ['ModelParameterDictionary', 'MissingKeyError',
'ParameterValueError', 'Component', 'Palette', 'Arena',
'NoProvidersError', 'Implements', 'ImplementsOrRaise',
'Framework', 'FieldError', 'LandlabTester']
| #! /usr/bin/env python
"""
The Landlab
:Package name: TheLandlab
:Version: 0.1.0
:Release date: 2013-03-24
:Authors:
Greg Tucker,
Nicole Gasparini,
Erkan Istanbulluoglu,
Daniel Hobley,
Sai Nudurupati,
Jordan Adams,
Eric Hutton
:URL: http://csdms.colorado.edu/trac/landlab
:License: MIT
"""
from __future__ import absolute_import
__version__ = '0.1.18'
import os
if 'DISPLAY' not in os.environ:
try:
import matplotlib
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
else:
matplotlib.use('Agg')
from .core.model_parameter_dictionary import ModelParameterDictionary
from .core.model_parameter_dictionary import (MissingKeyError,
ParameterValueError)
from .core.model_component import Component
from .framework.collections import Palette, Arena, NoProvidersError
from .framework.decorators import Implements, ImplementsOrRaise
from .framework.framework import Framework
from .field.scalar_data_fields import FieldError
from .grid import *
from .plot import *
from .testing.nosetester import LandlabTester
test = LandlabTester().test
bench = LandlabTester().bench
__all__ = ['ModelParameterDictionary', 'MissingKeyError',
'ParameterValueError', 'Component', 'Palette', 'Arena',
'NoProvidersError', 'Implements', 'ImplementsOrRaise',
'Framework', 'FieldError', 'LandlabTester']
| mit | Python |
eea8d0364609a53e215e3f056e6315a6dbc3d50d | call the decorator | Endika/OpenUpgrade,Endika/OpenUpgrade,pedrobaeza/OpenUpgrade,hifly/OpenUpgrade,grap/OpenUpgrade,damdam-s/OpenUpgrade,Endika/OpenUpgrade,hifly/OpenUpgrade,blaggacao/OpenUpgrade,blaggacao/OpenUpgrade,sebalix/OpenUpgrade,hifly/OpenUpgrade,damdam-s/OpenUpgrade,hifly/OpenUpgrade,blaggacao/OpenUpgrade,sebalix/OpenUpgrade,OpenUpgrade/OpenUpgrade,pedrobaeza/OpenUpgrade,damdam-s/OpenUpgrade,OpenUpgrade/OpenUpgrade,grap/OpenUpgrade,damdam-s/OpenUpgrade,sebalix/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,OpenUpgrade/OpenUpgrade,sebalix/OpenUpgrade,OpenUpgrade/OpenUpgrade,blaggacao/OpenUpgrade,blaggacao/OpenUpgrade,bwrsandman/OpenUpgrade,blaggacao/OpenUpgrade,sebalix/OpenUpgrade,hifly/OpenUpgrade,bwrsandman/OpenUpgrade,pedrobaeza/OpenUpgrade,grap/OpenUpgrade,pedrobaeza/OpenUpgrade,Endika/OpenUpgrade,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,Endika/OpenUpgrade,blaggacao/OpenUpgrade,bwrsandman/OpenUpgrade,damdam-s/OpenUpgrade,Endika/OpenUpgrade,damdam-s/OpenUpgrade,bwrsandman/OpenUpgrade,pedrobaeza/OpenUpgrade,pedrobaeza/OpenUpgrade,OpenUpgrade/OpenUpgrade,grap/OpenUpgrade,grap/OpenUpgrade,sebalix/OpenUpgrade,hifly/OpenUpgrade,damdam-s/OpenUpgrade,bwrsandman/OpenUpgrade,pedrobaeza/OpenUpgrade,grap/OpenUpgrade,sebalix/OpenUpgrade,hifly/OpenUpgrade,bwrsandman/OpenUpgrade,bwrsandman/OpenUpgrade | addons/account_analytic_analysis/migrations/8.0.1.1/pre-migration.py | addons/account_analytic_analysis/migrations/8.0.1.1/pre-migration.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# if account_analytic_analysis_recurring is installed, uninstall it and
# move relevant xmlids to this module
cr.execute(
"update ir_model_data set module='account_analytic_analysis' "
"where name in ('account_analytic_cron_for_invoice') "
"and module='account_analytic_analysis_recurring'")
cr.execute(
"update ir_module_module set state='to remove' "
"where name='account_analytic_analysis_recurring' "
"and state in ('installed', 'to install', 'to upgrade')")
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
@openupgrade.migrate
def migrate(cr, version):
# if account_analytic_analysis_recurring is installed, uninstall it and
# move relevant xmlids to this module
cr.execute(
"update ir_model_data set module='account_analytic_analysis' "
"where name in ('account_analytic_cron_for_invoice') "
"and module='account_analytic_analysis_recurring'")
cr.execute(
"update ir_module_module set state='to remove' "
"where name='account_analytic_analysis_recurring' "
"and state in ('installed', 'to install', 'to upgrade')")
| agpl-3.0 | Python |
a368bf67522711d8c9ad31f231af95c307612ca2 | add a getters to exceptions | missionpinball/mpf,missionpinball/mpf | mpf/exceptions/base_error.py | mpf/exceptions/base_error.py | """Error in MPF or MPF-MC."""
from mpf._version import log_url
class BaseError(AssertionError):
"""Error in a config file found."""
# pylint: disable-msg=too-many-arguments
def __init__(self, message, error_no, logger_name, context=None, url_name=None):
"""Initialise exception."""
self._logger_name = logger_name
self._error_no = error_no
self._context = context
self._message = message
if url_name:
self._url_name = url_name
else:
self._url_name = logger_name
super().__init__(message)
def get_error_no(self):
"""Return error no."""
return self._error_no
def get_context(self):
"""Return error no."""
return self._context
def get_logger_name(self):
"""Return error no."""
return self._logger_name
def get_short_name(self):
"""Return short name."""
raise NotImplementedError
def get_long_name(self):
"""Return long name."""
raise NotImplementedError
def extend(self, message):
"""Chain a new message onto an existing error, keeping the original error's logger, context, and error_no."""
self._message = "{} >> {}".format(message, self._message)
super().__init__(self._message)
def __str__(self):
"""Return nice string."""
error_slug = "{}-{}-{}".format(self.get_short_name(), self._url_name.replace(" ", "_"), self._error_no)
error_url = log_url.format(error_slug)
if self._context:
return "{} in {}: {} Context: {} Error Code: {} ({})".format(
self.get_long_name(), self._logger_name, super().__str__(), self._context, error_slug, error_url)
return "{} in {}: {} Error Code: {} ({})".format(
self.get_long_name(), self._logger_name, super().__str__(), error_slug, error_url)
| """Error in MPF or MPF-MC."""
from mpf._version import log_url
class BaseError(AssertionError):
"""Error in a config file found."""
# pylint: disable-msg=too-many-arguments
def __init__(self, message, error_no, logger_name, context=None, url_name=None):
"""Initialise exception."""
self._logger_name = logger_name
self._error_no = error_no
self._context = context
self._message = message
if url_name:
self._url_name = url_name
else:
self._url_name = logger_name
super().__init__(message)
def get_short_name(self):
"""Return short name."""
raise NotImplementedError
def get_long_name(self):
"""Return long name."""
raise NotImplementedError
def extend(self, message):
"""Chain a new message onto an existing error, keeping the original error's logger, context, and error_no."""
self._message = "{} >> {}".format(message, self._message)
super().__init__(self._message)
def __str__(self):
"""Return nice string."""
error_slug = "{}-{}-{}".format(self.get_short_name(), self._url_name.replace(" ", "_"), self._error_no)
error_url = log_url.format(error_slug)
if self._context:
return "{} in {}: {} Context: {} Error Code: {} ({})".format(
self.get_long_name(), self._logger_name, super().__str__(), self._context, error_slug, error_url)
return "{} in {}: {} Error Code: {} ({})".format(
self.get_long_name(), self._logger_name, super().__str__(), error_slug, error_url)
| mit | Python |
a651b979ca869be932a6ad8f9686436ab6a16a6e | Update the py-lxml package (#4090) | iulian787/spack,tmerrick1/spack,lgarren/spack,matthiasdiener/spack,iulian787/spack,LLNL/spack,LLNL/spack,skosukhin/spack,matthiasdiener/spack,krafczyk/spack,tmerrick1/spack,iulian787/spack,tmerrick1/spack,tmerrick1/spack,krafczyk/spack,mfherbst/spack,mfherbst/spack,skosukhin/spack,EmreAtes/spack,lgarren/spack,EmreAtes/spack,LLNL/spack,lgarren/spack,TheTimmy/spack,TheTimmy/spack,matthiasdiener/spack,TheTimmy/spack,lgarren/spack,matthiasdiener/spack,skosukhin/spack,matthiasdiener/spack,EmreAtes/spack,mfherbst/spack,mfherbst/spack,skosukhin/spack,tmerrick1/spack,EmreAtes/spack,TheTimmy/spack,mfherbst/spack,LLNL/spack,TheTimmy/spack,lgarren/spack,krafczyk/spack,krafczyk/spack,krafczyk/spack,EmreAtes/spack,LLNL/spack,iulian787/spack,skosukhin/spack,iulian787/spack | var/spack/repos/builtin/packages/py-lxml/package.py | var/spack/repos/builtin/packages/py-lxml/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyLxml(PythonPackage):
"""lxml is the most feature-rich and easy-to-use library for processing
XML and HTML in the Python language."""
homepage = "http://lxml.de/"
url = "https://pypi.io/packages/source/l/lxml/lxml-2.3.tar.gz"
version('3.7.3', '075692ce442e69bbd604d44e21c02753')
version('2.3', 'a245a015fd59b63e220005f263e1682a')
depends_on('py-setuptools@0.6c5:', type='build')
depends_on('py-cython@0.20:', type='build')
depends_on('libxml2', type=('build', 'run'))
depends_on('libxslt', type=('build', 'run'))
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyLxml(PythonPackage):
"""lxml is the most feature-rich and easy-to-use library for processing
XML and HTML in the Python language."""
homepage = "http://lxml.de/"
url = "https://pypi.io/packages/source/l/lxml/lxml-2.3.tar.gz"
version('2.3', 'a245a015fd59b63e220005f263e1682a')
depends_on('py-setuptools@0.6c5:', type='build')
| lgpl-2.1 | Python |
40625f2b38525c738486c25d10223fa1b24ce7ec | Use unicode in to_string | translationexchange/tml-python,translationexchange/tml-python | tml/strings.py | tml/strings.py | # encoding: UTF-8
def to_string(text):
""" Safe string conversion
Args:
text (string|unicode): input string
Returns:
str
"""
if type(text) is unicode:
return text
return unicode(text.decode('utf-8'))
| # encoding: UTF-8
def to_string(text):
""" Safe string conversion
Args:
text (string|unicode): input string
Returns:
str
"""
if text is unicode:
return text.encode('utf-8')
return text
| mit | Python |
9660f44a5d17521109397b14c32db71a2fba18ff | Update Conda Version to match MT build number | Esri/military-tools-geoprocessing-toolbox | tools/setup.py | tools/setup.py | # coding: utf-8
'''
------------------------------------------------------------------------------
Copyright 2018 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
'''
#from distutils.core import setup
from setuptools import setup, find_packages
setup(name='militarytools',
version='3.4.0',
description=r"Military Tools for ArcGIS",
license='Apache-2.0',
url=r'http://solutions.arcgis.com/defense/help/military-tools/',
author=r'Esri Solutions',
author_email=r"support@esri.com",
keywords=r"military tools arcgis arcpy solutions esri",
packages=find_packages(),
package_dir={'militarytools': 'militarytools'},
package_data={'militarytools':
[r'*.*',
r'esri/toolboxes/scripts/*.*',
r'esri/toolboxes/layers/*.*',
r'esri/toolboxes/layers/featuresetsWebMerc.gdb/*.*',
r'esri/toolboxes/layers/featuresetsWebMerc.gdb/*',
r'esri/toolboxes/tooldata/*.*',
r'esri/toolboxes/tooldata/RangeRings.gdb/*.*',
r'esri/toolboxes/tooldata/RangeRings.gdb/*',
r'esri/toolboxes/*.*',
r'esri/*.*',
r'esri/arcpy/*.*', # Not currently used
r'esri/help/*.*', # Not currently used
r'esri/help/gp/*.*', # Not currently used
r'esri/help/gp/toolboxes/*.*', # Not currently used
]
},
)
| # coding: utf-8
'''
------------------------------------------------------------------------------
Copyright 2018 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
'''
#from distutils.core import setup
from setuptools import setup, find_packages
setup(name='militarytools',
version='3.3.1',
description=r"Military Tools for ArcGIS",
license='Apache-2.0',
url=r'http://solutions.arcgis.com/defense/help/military-tools/',
author=r'Esri Solutions',
author_email=r"support@esri.com",
keywords=r"military tools arcgis arcpy solutions esri",
packages=find_packages(),
package_dir={'militarytools': 'militarytools'},
package_data={'militarytools':
[r'*.*',
r'esri/toolboxes/scripts/*.*',
r'esri/toolboxes/layers/*.*',
r'esri/toolboxes/layers/featuresetsWebMerc.gdb/*.*',
r'esri/toolboxes/layers/featuresetsWebMerc.gdb/*',
r'esri/toolboxes/tooldata/*.*',
r'esri/toolboxes/tooldata/RangeRings.gdb/*.*',
r'esri/toolboxes/tooldata/RangeRings.gdb/*',
r'esri/toolboxes/*.*',
r'esri/*.*',
r'esri/arcpy/*.*', # Not currently used
r'esri/help/*.*', # Not currently used
r'esri/help/gp/*.*', # Not currently used
r'esri/help/gp/toolboxes/*.*', # Not currently used
]
},
)
| apache-2.0 | Python |
b50f72c1aa77e91f4cfcecbcf4dedd7095f1c71e | improve comments for placeholder replacing | b-mueller/mythril,b-mueller/mythril,b-mueller/mythril,b-mueller/mythril | mythril/ether/ethcontract.py | mythril/ether/ethcontract.py | from mythril.disassembler.disassembly import Disassembly
from ethereum import utils
import persistent
import re
class ETHContract(persistent.Persistent):
def __init__(self, code, creation_code="", name="Unknown", enable_online_lookup=True):
# Workaround: We currently do not support compile-time linking.
# Dynamic contract addresses of the format __[contract-name]_____________ are replaced with a generic address
# Apply this for creation_code & code
creation_code = re.sub(r'(_+.*_+)', 'aa' * 20, creation_code)
code = re.sub(r'(_+.*_+)', 'aa' * 20, code)
self.creation_code = creation_code
self.name = name
self.code = code
self.disassembly = Disassembly(code, enable_online_lookup=enable_online_lookup)
self.creation_disassembly = Disassembly(creation_code, enable_online_lookup=enable_online_lookup)
def as_dict(self):
return {
'address': self.address,
'name': self.name,
'code': self.code,
'creation_code': self.creation_code,
'disassembly': self.disassembly
}
def get_easm(self):
return self.disassembly.get_easm()
def matches_expression(self, expression):
str_eval = ''
easm_code = None
tokens = re.split("\s+(and|or|not)\s+", expression, re.IGNORECASE)
for token in tokens:
if token in ("and", "or", "not"):
str_eval += " " + token + " "
continue
m = re.match(r'^code#([a-zA-Z0-9\s,\[\]]+)#', token)
if (m):
if easm_code is None:
easm_code = self.get_easm()
code = m.group(1).replace(",", "\\n")
str_eval += "\"" + code + "\" in easm_code"
continue
m = re.match(r'^func#([a-zA-Z0-9\s_,(\\)\[\]]+)#$', token)
if (m):
sign_hash = "0x" + utils.sha3(m.group(1))[:4].hex()
str_eval += "\"" + sign_hash + "\" in self.disassembly.func_hashes"
continue
return eval(str_eval.strip())
| from mythril.disassembler.disassembly import Disassembly
from ethereum import utils
import persistent
import re
class ETHContract(persistent.Persistent):
def __init__(self, code, creation_code="", name="Unknown", enable_online_lookup=True):
creation_code = re.sub(r'(_+.*_+)', 'aa' * 20, creation_code)
self.creation_code = creation_code
self.name = name
# Workaround: We currently do not support compile-time linking.
# Dynamic contract addresses of the format __[contract-name]_____________ are replaced with a generic address
code = re.sub(r'(_+.*_+)', 'aa' * 20, code)
self.code = code
self.disassembly = Disassembly(code, enable_online_lookup=enable_online_lookup)
self.creation_disassembly = Disassembly(creation_code, enable_online_lookup=enable_online_lookup)
def as_dict(self):
return {
'address': self.address,
'name': self.name,
'code': self.code,
'creation_code': self.creation_code,
'disassembly': self.disassembly
}
def get_easm(self):
return self.disassembly.get_easm()
def matches_expression(self, expression):
str_eval = ''
easm_code = None
tokens = re.split("\s+(and|or|not)\s+", expression, re.IGNORECASE)
for token in tokens:
if token in ("and", "or", "not"):
str_eval += " " + token + " "
continue
m = re.match(r'^code#([a-zA-Z0-9\s,\[\]]+)#', token)
if (m):
if easm_code is None:
easm_code = self.get_easm()
code = m.group(1).replace(",", "\\n")
str_eval += "\"" + code + "\" in easm_code"
continue
m = re.match(r'^func#([a-zA-Z0-9\s_,(\\)\[\]]+)#$', token)
if (m):
sign_hash = "0x" + utils.sha3(m.group(1))[:4].hex()
str_eval += "\"" + sign_hash + "\" in self.disassembly.func_hashes"
continue
return eval(str_eval.strip())
| mit | Python |
09d7f74ad51f468c9e74db0fcce9c5d65ebc919c | Fix import | projectweekend/Pi-Camera-Time-Lapse,projectweekend/Pi-Camera-Time-Lapse | camera/__init__.py | camera/__init__.py | from .camera import ConfigurableCamera
| from .camera import TimeLapseCamera
| mit | Python |
c941414c6ff18d6f3ad46b20cfa431549d8fde20 | Bump version to 0.8.0.rc2 | christiansandberg/canopen,christiansandberg/canopen | canopen/version.py | canopen/version.py |
__version__ = "0.8.0.rc2"
|
__version__ = "0.8.0.rc1"
| mit | Python |
7d22d48e14cdcc24e9f4d1308be4a9bfb443f5df | disable the discussion cache update signal until we get solution for pickling | J861449197/edx-platform,shashank971/edx-platform,bigdatauniversity/edx-platform,eestay/edx-platform,EduPepperPDTesting/pepper2013-testing,pdehaye/theming-edx-platform,jazztpt/edx-platform,leansoft/edx-platform,vismartltd/edx-platform,longmen21/edx-platform,mbareta/edx-platform-ft,vasyarv/edx-platform,devs1991/test_edx_docmode,msegado/edx-platform,LICEF/edx-platform,eemirtekin/edx-platform,motion2015/edx-platform,UOMx/edx-platform,chauhanhardik/populo,itsjeyd/edx-platform,cecep-edu/edx-platform,ahmadiga/min_edx,antoviaque/edx-platform,zofuthan/edx-platform,doismellburning/edx-platform,valtech-mooc/edx-platform,zubair-arbi/edx-platform,hastexo/edx-platform,ak2703/edx-platform,bitifirefly/edx-platform,edx/edx-platform,CourseTalk/edx-platform,Edraak/edraak-platform,marcore/edx-platform,utecuy/edx-platform,auferack08/edx-platform,MakeHer/edx-platform,syjeon/new_edx,hkawasaki/kawasaki-aio8-0,morenopc/edx-platform,abdoosh00/edraak,mjg2203/edx-platform-seas,jjmiranda/edx-platform,DefyVentures/edx-platform,IITBinterns13/edx-platform-dev,leansoft/edx-platform,nagyistoce/edx-platform,WatanabeYasumasa/edx-platform,LearnEra/LearnEraPlaftform,alu042/edx-platform,IndonesiaX/edx-platform,jolyonb/edx-platform,nttks/jenkins-test,TsinghuaX/edx-platform,EDUlib/edx-platform,Endika/edx-platform,wwj718/ANALYSE,jamesblunt/edx-platform,defance/edx-platform,ahmedaljazzar/edx-platform,miptliot/edx-platform,gsehub/edx-platform,pepeportela/edx-platform,unicri/edx-platform,kursitet/edx-platform,doganov/edx-platform,JioEducation/edx-platform,hmcmooc/muddx-platform,4eek/edx-platform,TeachAtTUM/edx-platform,appliedx/edx-platform,J861449197/edx-platform,UOMx/edx-platform,cyanna/edx-platform,vikas1885/test1,edx/edx-platform,RPI-OPENEDX/edx-platform,zerobatu/edx-platform,kalebhartje/schoolboost,appsembler/edx-platform,lduarte1991/edx-platform,vikas1885/test1,kursitet/edx-platform,BehavioralInsightsTeam/edx-platform,morenopc/edx-platform,LearnEra/LearnEraPlaftform,halvertoluke/edx-platform,alu042/edx-platform,philanthropy-u/edx-platform,dkarakats/edx-platform,IndonesiaX/edx-platform,lduarte1991/edx-platform,antoviaque/edx-platform,marcore/edx-platform,vikas1885/test1,ak2703/edx-platform,nttks/jenkins-test,nttks/jenkins-test,shubhdev/edxOnBaadal,itsjeyd/edx-platform,sameetb-cuelogic/edx-platform-test,deepsrijit1105/edx-platform,marcore/edx-platform,cognitiveclass/edx-platform,ahmedaljazzar/edx-platform,dcosentino/edx-platform,kxliugang/edx-platform,PepperPD/edx-pepper-platform,benpatterson/edx-platform,franosincic/edx-platform,rismalrv/edx-platform,mahendra-r/edx-platform,DNFcode/edx-platform,Semi-global/edx-platform,appsembler/edx-platform,kamalx/edx-platform,10clouds/edx-platform,kxliugang/edx-platform,doismellburning/edx-platform,RPI-OPENEDX/edx-platform,pdehaye/theming-edx-platform,Softmotions/edx-platform,torchingloom/edx-platform,antonve/s4-project-mooc,dkarakats/edx-platform,rue89-tech/edx-platform,don-github/edx-platform,dcosentino/edx-platform,torchingloom/edx-platform,RPI-OPENEDX/edx-platform,knehez/edx-platform,hkawasaki/kawasaki-aio8-0,cselis86/edx-platform,pelikanchik/edx-platform,UOMx/edx-platform,nanolearningllc/edx-platform-cypress,hmcmooc/muddx-platform,alexthered/kienhoc-platform,RPI-OPENEDX/edx-platform,AkA84/edx-platform,halvertoluke/edx-platform,sudheerchintala/LearnEraPlatForm,adoosii/edx-platform,shashank971/edx-platform,devs1991/test_edx_docmode,tiagochiavericosta/edx-platform,martynovp/edx-platform,peterm-itr/edx-platform,rationalAgent/edx-platform-custom,jswope00/griffinx,Edraak/edraak-platform,eestay/edx-platform,shubhdev/openedx,nttks/edx-platform,shashank971/edx-platform,chudaol/edx-platform,morenopc/edx-platform,Unow/edx-platform,eemirtekin/edx-platform,beacloudgenius/edx-platform,Semi-global/edx-platform,olexiim/edx-platform,CredoReference/edx-platform,nagyistoce/edx-platform,ak2703/edx-platform,apigee/edx-platform,mushtaqak/edx-platform,chrisndodge/edx-platform,rationalAgent/edx-platform-custom,syjeon/new_edx,shubhdev/edxOnBaadal,hamzehd/edx-platform,IONISx/edx-platform,Unow/edx-platform,yokose-ks/edx-platform,doganov/edx-platform,xingyepei/edx-platform,zadgroup/edx-platform,dsajkl/reqiop,PepperPD/edx-pepper-platform,TsinghuaX/edx-platform,nanolearningllc/edx-platform-cypress,ahmadio/edx-platform,apigee/edx-platform,playm2mboy/edx-platform,vismartltd/edx-platform,CourseTalk/edx-platform,pelikanchik/edx-platform,Stanford-Online/edx-platform,bitifirefly/edx-platform,gsehub/edx-platform,shubhdev/edx-platform,IONISx/edx-platform,naresh21/synergetics-edx-platform,beacloudgenius/edx-platform,wwj718/ANALYSE,xuxiao19910803/edx-platform,zhenzhai/edx-platform,itsjeyd/edx-platform,abdoosh00/edx-rtl-final,jelugbo/tundex,jazztpt/edx-platform,utecuy/edx-platform,appliedx/edx-platform,xinjiguaike/edx-platform,ubc/edx-platform,MSOpenTech/edx-platform,xinjiguaike/edx-platform,hkawasaki/kawasaki-aio8-1,antonve/s4-project-mooc,jjmiranda/edx-platform,DefyVentures/edx-platform,franosincic/edx-platform,ferabra/edx-platform,jruiperezv/ANALYSE,benpatterson/edx-platform,EduPepperPDTesting/pepper2013-testing,synergeticsedx/deployment-wipro,jazkarta/edx-platform,pepeportela/edx-platform,jazkarta/edx-platform-for-isc,rismalrv/edx-platform,utecuy/edx-platform,nikolas/edx-platform,antonve/s4-project-mooc,IndonesiaX/edx-platform,cyanna/edx-platform,philanthropy-u/edx-platform,rhndg/openedx,shashank971/edx-platform,zofuthan/edx-platform,mahendra-r/edx-platform,doismellburning/edx-platform,carsongee/edx-platform,10clouds/edx-platform,alexthered/kienhoc-platform,proversity-org/edx-platform,synergeticsedx/deployment-wipro,a-parhom/edx-platform,inares/edx-platform,shubhdev/edx-platform,wwj718/edx-platform,SivilTaram/edx-platform,romain-li/edx-platform,praveen-pal/edx-platform,raccoongang/edx-platform,jelugbo/tundex,ampax/edx-platform-backup,sudheerchintala/LearnEraPlatForm,DNFcode/edx-platform,ahmadio/edx-platform,jbassen/edx-platform,mjg2203/edx-platform-seas,AkA84/edx-platform,dkarakats/edx-platform,Edraak/edraak-platform,don-github/edx-platform,jonathan-beard/edx-platform,EDUlib/edx-platform,CredoReference/edx-platform,EduPepperPDTesting/pepper2013-testing,jazztpt/edx-platform,xuxiao19910803/edx-platform,bigdatauniversity/edx-platform,TeachAtTUM/edx-platform,chudaol/edx-platform,longmen21/edx-platform,teltek/edx-platform,CourseTalk/edx-platform,abdoosh00/edx-rtl-final,mjirayu/sit_academy,y12uc231/edx-platform,arbrandes/edx-platform,gsehub/edx-platform,edx-solutions/edx-platform,polimediaupv/edx-platform,knehez/edx-platform,UXE/local-edx,praveen-pal/edx-platform,sameetb-cuelogic/edx-platform-test,ahmadiga/min_edx,eestay/edx-platform,solashirai/edx-platform,eduNEXT/edunext-platform,pabloborrego93/edx-platform,devs1991/test_edx_docmode,jamiefolsom/edx-platform,hkawasaki/kawasaki-aio8-0,RPI-OPENEDX/edx-platform,deepsrijit1105/edx-platform,OmarIthawi/edx-platform,hkawasaki/kawasaki-aio8-1,cognitiveclass/edx-platform,hamzehd/edx-platform,abdoosh00/edraak,deepsrijit1105/edx-platform,eduNEXT/edx-platform,unicri/edx-platform,mcgachey/edx-platform,antonve/s4-project-mooc,nikolas/edx-platform,shubhdev/edx-platform,Softmotions/edx-platform,hastexo/edx-platform,fly19890211/edx-platform,jamesblunt/edx-platform,DefyVentures/edx-platform,edx-solutions/edx-platform,cselis86/edx-platform,playm2mboy/edx-platform,antoviaque/edx-platform,gymnasium/edx-platform,doganov/edx-platform,edx/edx-platform,pku9104038/edx-platform,jazkarta/edx-platform-for-isc,Edraak/edx-platform,yokose-ks/edx-platform,martynovp/edx-platform,rhndg/openedx,ahmedaljazzar/edx-platform,Edraak/circleci-edx-platform,LICEF/edx-platform,LICEF/edx-platform,bdero/edx-platform,eduNEXT/edunext-platform,polimediaupv/edx-platform,dsajkl/123,AkA84/edx-platform,mbareta/edx-platform-ft,ovnicraft/edx-platform,valtech-mooc/edx-platform,benpatterson/edx-platform,chand3040/cloud_that,J861449197/edx-platform,chauhanhardik/populo_2,caesar2164/edx-platform,atsolakid/edx-platform,teltek/edx-platform,ahmadiga/min_edx,nanolearning/edx-platform,valtech-mooc/edx-platform,zerobatu/edx-platform,TeachAtTUM/edx-platform,Kalyzee/edx-platform,fly19890211/edx-platform,WatanabeYasumasa/edx-platform,nanolearningllc/edx-platform-cypress-2,knehez/edx-platform,jbassen/edx-platform,mcgachey/edx-platform,philanthropy-u/edx-platform,simbs/edx-platform,torchingloom/edx-platform,martynovp/edx-platform,eduNEXT/edx-platform,bigdatauniversity/edx-platform,inares/edx-platform,tiagochiavericosta/edx-platform,etzhou/edx-platform,JCBarahona/edX,Stanford-Online/edx-platform,DNFcode/edx-platform,jswope00/GAI,jazkarta/edx-platform-for-isc,simbs/edx-platform,mitocw/edx-platform,jolyonb/edx-platform,jamesblunt/edx-platform,jonathan-beard/edx-platform,naresh21/synergetics-edx-platform,ampax/edx-platform,leansoft/edx-platform,arifsetiawan/edx-platform,don-github/edx-platform,4eek/edx-platform,teltek/edx-platform,pdehaye/theming-edx-platform,Stanford-Online/edx-platform,IndonesiaX/edx-platform,utecuy/edx-platform,sameetb-cuelogic/edx-platform-test,dsajkl/reqiop,shurihell/testasia,ampax/edx-platform-backup,pku9104038/edx-platform,ubc/edx-platform,louyihua/edx-platform,EduPepperPDTesting/pepper2013-testing,dsajkl/123,rationalAgent/edx-platform-custom,MakeHer/edx-platform,proversity-org/edx-platform,Semi-global/edx-platform,kalebhartje/schoolboost,romain-li/edx-platform,jazkarta/edx-platform,Unow/edx-platform,mjg2203/edx-platform-seas,shashank971/edx-platform,xuxiao19910803/edx,yokose-ks/edx-platform,Kalyzee/edx-platform,fly19890211/edx-platform,hkawasaki/kawasaki-aio8-0,shurihell/testasia,bdero/edx-platform,mitocw/edx-platform,appliedx/edx-platform,sameetb-cuelogic/edx-platform-test,abdoosh00/edraak,Edraak/edx-platform,nanolearningllc/edx-platform-cypress,ferabra/edx-platform,wwj718/edx-platform,utecuy/edx-platform,xingyepei/edx-platform,polimediaupv/edx-platform,SravanthiSinha/edx-platform,cpennington/edx-platform,iivic/BoiseStateX,edx/edx-platform,procangroup/edx-platform,cecep-edu/edx-platform,nttks/edx-platform,Ayub-Khan/edx-platform,DefyVentures/edx-platform,prarthitm/edxplatform,iivic/BoiseStateX,jamiefolsom/edx-platform,amir-qayyum-khan/edx-platform,openfun/edx-platform,waheedahmed/edx-platform,playm2mboy/edx-platform,nagyistoce/edx-platform,zofuthan/edx-platform,y12uc231/edx-platform,hastexo/edx-platform,SravanthiSinha/edx-platform,mtlchun/edx,hamzehd/edx-platform,franosincic/edx-platform,rue89-tech/edx-platform,shubhdev/edxOnBaadal,shubhdev/edx-platform,dkarakats/edx-platform,msegado/edx-platform,jbzdak/edx-platform,xinjiguaike/edx-platform,jazkarta/edx-platform,zubair-arbi/edx-platform,defance/edx-platform,jswope00/GAI,ESOedX/edx-platform,vismartltd/edx-platform,openfun/edx-platform,kalebhartje/schoolboost,arifsetiawan/edx-platform,motion2015/a3,Edraak/edx-platform,beni55/edx-platform,carsongee/edx-platform,ak2703/edx-platform,JioEducation/edx-platform,amir-qayyum-khan/edx-platform,bdero/edx-platform,eduNEXT/edunext-platform,andyzsf/edx,nanolearningllc/edx-platform-cypress-2,jzoldak/edx-platform,jjmiranda/edx-platform,chauhanhardik/populo_2,cecep-edu/edx-platform,motion2015/edx-platform,y12uc231/edx-platform,Shrhawk/edx-platform,hkawasaki/kawasaki-aio8-2,devs1991/test_edx_docmode,shabab12/edx-platform,ampax/edx-platform,olexiim/edx-platform,proversity-org/edx-platform,ahmadiga/min_edx,CredoReference/edx-platform,B-MOOC/edx-platform,jonathan-beard/edx-platform,MSOpenTech/edx-platform,DefyVentures/edx-platform,mtlchun/edx,syjeon/new_edx,mushtaqak/edx-platform,SivilTaram/edx-platform,zhenzhai/edx-platform,UXE/local-edx,lduarte1991/edx-platform,OmarIthawi/edx-platform,olexiim/edx-platform,mahendra-r/edx-platform,andyzsf/edx,ESOedX/edx-platform,rismalrv/edx-platform,analyseuc3m/ANALYSE-v1,zadgroup/edx-platform,jbassen/edx-platform,pku9104038/edx-platform,Edraak/edx-platform,a-parhom/edx-platform,raccoongang/edx-platform,longmen21/edx-platform,wwj718/ANALYSE,raccoongang/edx-platform,doismellburning/edx-platform,ovnicraft/edx-platform,MSOpenTech/edx-platform,cselis86/edx-platform,eestay/edx-platform,mjirayu/sit_academy,morenopc/edx-platform,morenopc/edx-platform,shurihell/testasia,shurihell/testasia,adoosii/edx-platform,BehavioralInsightsTeam/edx-platform,Kalyzee/edx-platform,playm2mboy/edx-platform,jzoldak/edx-platform,Ayub-Khan/edx-platform,don-github/edx-platform,zhenzhai/edx-platform,gsehub/edx-platform,shubhdev/openedx,chrisndodge/edx-platform,Softmotions/edx-platform,eestay/edx-platform,jelugbo/tundex,cognitiveclass/edx-platform,ubc/edx-platform,EduPepperPDTesting/pepper2013-testing,martynovp/edx-platform,jazztpt/edx-platform,carsongee/edx-platform,jolyonb/edx-platform,kmoocdev2/edx-platform,edx-solutions/edx-platform,mbareta/edx-platform-ft,Livit/Livit.Learn.EdX,mitocw/edx-platform,dkarakats/edx-platform,ahmadio/edx-platform,zerobatu/edx-platform,jzoldak/edx-platform,amir-qayyum-khan/edx-platform,chudaol/edx-platform,wwj718/ANALYSE,nikolas/edx-platform,PepperPD/edx-pepper-platform,rismalrv/edx-platform,antonve/s4-project-mooc,leansoft/edx-platform,SravanthiSinha/edx-platform,adoosii/edx-platform,msegado/edx-platform,unicri/edx-platform,TsinghuaX/edx-platform,ubc/edx-platform,SivilTaram/edx-platform,Endika/edx-platform,solashirai/edx-platform,auferack08/edx-platform,jamesblunt/edx-platform,pku9104038/edx-platform,olexiim/edx-platform,EduPepperPD/pepper2013,rationalAgent/edx-platform-custom,atsolakid/edx-platform,ferabra/edx-platform,nikolas/edx-platform,unicri/edx-platform,MakeHer/edx-platform,arbrandes/edx-platform,shabab12/edx-platform,etzhou/edx-platform,Lektorium-LLC/edx-platform,SravanthiSinha/edx-platform,y12uc231/edx-platform,abdoosh00/edx-rtl-final,chauhanhardik/populo_2,vasyarv/edx-platform,kmoocdev2/edx-platform,cselis86/edx-platform,nanolearningllc/edx-platform-cypress-2,mjirayu/sit_academy,zofuthan/edx-platform,4eek/edx-platform,ZLLab-Mooc/edx-platform,motion2015/a3,franosincic/edx-platform,pabloborrego93/edx-platform,hmcmooc/muddx-platform,chauhanhardik/populo_2,chudaol/edx-platform,xingyepei/edx-platform,stvstnfrd/edx-platform,10clouds/edx-platform,analyseuc3m/ANALYSE-v1,Endika/edx-platform,a-parhom/edx-platform,MakeHer/edx-platform,nanolearning/edx-platform,hastexo/edx-platform,a-parhom/edx-platform,rismalrv/edx-platform,longmen21/edx-platform,jazkarta/edx-platform,ak2703/edx-platform,hkawasaki/kawasaki-aio8-1,nttks/jenkins-test,auferack08/edx-platform,dcosentino/edx-platform,IITBinterns13/edx-platform-dev,kmoocdev2/edx-platform,caesar2164/edx-platform,apigee/edx-platform,y12uc231/edx-platform,alexthered/kienhoc-platform,defance/edx-platform,Edraak/circleci-edx-platform,nanolearningllc/edx-platform-cypress-2,hkawasaki/kawasaki-aio8-2,fintech-circle/edx-platform,rue89-tech/edx-platform,Unow/edx-platform,jbzdak/edx-platform,cpennington/edx-platform,hamzehd/edx-platform,4eek/edx-platform,EduPepperPD/pepper2013,mjirayu/sit_academy,Edraak/circleci-edx-platform,ahmadio/edx-platform,solashirai/edx-platform,EduPepperPD/pepper2013,CredoReference/edx-platform,jswope00/griffinx,adoosii/edx-platform,don-github/edx-platform,jazkarta/edx-platform,waheedahmed/edx-platform,edry/edx-platform,tanmaykm/edx-platform,mtlchun/edx,chudaol/edx-platform,bigdatauniversity/edx-platform,Semi-global/edx-platform,miptliot/edx-platform,cecep-edu/edx-platform,tanmaykm/edx-platform,kalebhartje/schoolboost,B-MOOC/edx-platform,zhenzhai/edx-platform,Kalyzee/edx-platform,jbzdak/edx-platform,bigdatauniversity/edx-platform,devs1991/test_edx_docmode,eduNEXT/edx-platform,jazkarta/edx-platform-for-isc,rationalAgent/edx-platform-custom,pelikanchik/edx-platform,halvertoluke/edx-platform,etzhou/edx-platform,ZLLab-Mooc/edx-platform,Edraak/circleci-edx-platform,jswope00/GAI,B-MOOC/edx-platform,MakeHer/edx-platform,lduarte1991/edx-platform,nanolearningllc/edx-platform-cypress,jamesblunt/edx-platform,jjmiranda/edx-platform,unicri/edx-platform,rue89-tech/edx-platform,naresh21/synergetics-edx-platform,inares/edx-platform,ESOedX/edx-platform,nttks/edx-platform,fly19890211/edx-platform,xuxiao19910803/edx-platform,atsolakid/edx-platform,Livit/Livit.Learn.EdX,Edraak/circleci-edx-platform,nanolearning/edx-platform,analyseuc3m/ANALYSE-v1,zofuthan/edx-platform,hmcmooc/muddx-platform,shubhdev/edxOnBaadal,mtlchun/edx,olexiim/edx-platform,torchingloom/edx-platform,J861449197/edx-platform,knehez/edx-platform,louyihua/edx-platform,jzoldak/edx-platform,polimediaupv/edx-platform,jbzdak/edx-platform,UXE/local-edx,xinjiguaike/edx-platform,dcosentino/edx-platform,LearnEra/LearnEraPlaftform,AkA84/edx-platform,ESOedX/edx-platform,vasyarv/edx-platform,arifsetiawan/edx-platform,wwj718/ANALYSE,angelapper/edx-platform,alu042/edx-platform,ahmadio/edx-platform,gymnasium/edx-platform,kamalx/edx-platform,abdoosh00/edx-rtl-final,kmoocdev/edx-platform,gymnasium/edx-platform,jswope00/griffinx,Lektorium-LLC/edx-platform,PepperPD/edx-pepper-platform,Endika/edx-platform,dsajkl/123,doganov/edx-platform,jbzdak/edx-platform,SravanthiSinha/edx-platform,peterm-itr/edx-platform,appliedx/edx-platform,amir-qayyum-khan/edx-platform,nttks/edx-platform,openfun/edx-platform,solashirai/edx-platform,jamiefolsom/edx-platform,sudheerchintala/LearnEraPlatForm,Shrhawk/edx-platform,chrisndodge/edx-platform,raccoongang/edx-platform,xuxiao19910803/edx-platform,valtech-mooc/edx-platform,zubair-arbi/edx-platform,procangroup/edx-platform,analyseuc3m/ANALYSE-v1,JioEducation/edx-platform,yokose-ks/edx-platform,miptliot/edx-platform,leansoft/edx-platform,eemirtekin/edx-platform,kxliugang/edx-platform,mushtaqak/edx-platform,benpatterson/edx-platform,louyihua/edx-platform,IONISx/edx-platform,J861449197/edx-platform,JCBarahona/edX,antoviaque/edx-platform,gymnasium/edx-platform,andyzsf/edx,Lektorium-LLC/edx-platform,bdero/edx-platform,dsajkl/123,ovnicraft/edx-platform,jonathan-beard/edx-platform,nanolearningllc/edx-platform-cypress,praveen-pal/edx-platform,mushtaqak/edx-platform,motion2015/edx-platform,JCBarahona/edX,jswope00/griffinx,naresh21/synergetics-edx-platform,jazztpt/edx-platform,alexthered/kienhoc-platform,jelugbo/tundex,bitifirefly/edx-platform,JioEducation/edx-platform,pdehaye/theming-edx-platform,louyihua/edx-platform,bitifirefly/edx-platform,motion2015/a3,jruiperezv/ANALYSE,shubhdev/openedx,Livit/Livit.Learn.EdX,arifsetiawan/edx-platform,beacloudgenius/edx-platform,morpheby/levelup-by,IITBinterns13/edx-platform-dev,waheedahmed/edx-platform,valtech-mooc/edx-platform,kursitet/edx-platform,cpennington/edx-platform,Lektorium-LLC/edx-platform,xuxiao19910803/edx,beni55/edx-platform,xingyepei/edx-platform,edry/edx-platform,kmoocdev2/edx-platform,tiagochiavericosta/edx-platform,shabab12/edx-platform,nikolas/edx-platform,zadgroup/edx-platform,mushtaqak/edx-platform,kxliugang/edx-platform,jamiefolsom/edx-platform,ZLLab-Mooc/edx-platform,polimediaupv/edx-platform,pepeportela/edx-platform,EDUlib/edx-platform,zhenzhai/edx-platform,kmoocdev/edx-platform,procangroup/edx-platform,syjeon/new_edx,mcgachey/edx-platform,chand3040/cloud_that,jonathan-beard/edx-platform,EDUlib/edx-platform,devs1991/test_edx_docmode,wwj718/edx-platform,pabloborrego93/edx-platform,iivic/BoiseStateX,EduPepperPD/pepper2013,xingyepei/edx-platform,vismartltd/edx-platform,fintech-circle/edx-platform,kamalx/edx-platform,doganov/edx-platform,yokose-ks/edx-platform,cognitiveclass/edx-platform,appliedx/edx-platform,ZLLab-Mooc/edx-platform,jazkarta/edx-platform-for-isc,jbassen/edx-platform,Softmotions/edx-platform,IndonesiaX/edx-platform,eemirtekin/edx-platform,tanmaykm/edx-platform,eduNEXT/edunext-platform,nttks/edx-platform,chauhanhardik/populo,nttks/jenkins-test,vikas1885/test1,rhndg/openedx,kmoocdev2/edx-platform,simbs/edx-platform,openfun/edx-platform,B-MOOC/edx-platform,dsajkl/123,nanolearning/edx-platform,morpheby/levelup-by,msegado/edx-platform,wwj718/edx-platform,Ayub-Khan/edx-platform,WatanabeYasumasa/edx-platform,fintech-circle/edx-platform,dsajkl/reqiop,iivic/BoiseStateX,sudheerchintala/LearnEraPlatForm,TsinghuaX/edx-platform,motion2015/edx-platform,morpheby/levelup-by,chrisndodge/edx-platform,kursitet/edx-platform,edry/edx-platform,xuxiao19910803/edx,nagyistoce/edx-platform,UOMx/edx-platform,Ayub-Khan/edx-platform,WatanabeYasumasa/edx-platform,shubhdev/edx-platform,pabloborrego93/edx-platform,ferabra/edx-platform,ampax/edx-platform-backup,mtlchun/edx,benpatterson/edx-platform,praveen-pal/edx-platform,adoosii/edx-platform,playm2mboy/edx-platform,apigee/edx-platform,kmoocdev/edx-platform,kmoocdev/edx-platform,fintech-circle/edx-platform,peterm-itr/edx-platform,kalebhartje/schoolboost,JCBarahona/edX,hamzehd/edx-platform,ahmedaljazzar/edx-platform,romain-li/edx-platform,shubhdev/openedx,IONISx/edx-platform,B-MOOC/edx-platform,marcore/edx-platform,chauhanhardik/populo_2,devs1991/test_edx_docmode,LICEF/edx-platform,edry/edx-platform,kamalx/edx-platform,stvstnfrd/edx-platform,4eek/edx-platform,devs1991/test_edx_docmode,Ayub-Khan/edx-platform,Stanford-Online/edx-platform,IITBinterns13/edx-platform-dev,atsolakid/edx-platform,etzhou/edx-platform,10clouds/edx-platform,appsembler/edx-platform,beni55/edx-platform,cecep-edu/edx-platform,rue89-tech/edx-platform,eduNEXT/edx-platform,pelikanchik/edx-platform,alexthered/kienhoc-platform,jamiefolsom/edx-platform,ampax/edx-platform-backup,zerobatu/edx-platform,mitocw/edx-platform,pomegranited/edx-platform,morpheby/levelup-by,mjirayu/sit_academy,zubair-arbi/edx-platform,halvertoluke/edx-platform,pomegranited/edx-platform,beacloudgenius/edx-platform,tiagochiavericosta/edx-platform,beni55/edx-platform,ampax/edx-platform,alu042/edx-platform,openfun/edx-platform,cyanna/edx-platform,hkawasaki/kawasaki-aio8-2,dcosentino/edx-platform,rhndg/openedx,simbs/edx-platform,SivilTaram/edx-platform,BehavioralInsightsTeam/edx-platform,xuxiao19910803/edx,vasyarv/edx-platform,EduPepperPD/pepper2013,Shrhawk/edx-platform,ovnicraft/edx-platform,UXE/local-edx,AkA84/edx-platform,waheedahmed/edx-platform,abdoosh00/edraak,jolyonb/edx-platform,BehavioralInsightsTeam/edx-platform,ahmadiga/min_edx,PepperPD/edx-pepper-platform,vikas1885/test1,auferack08/edx-platform,jruiperezv/ANALYSE,wwj718/edx-platform,angelapper/edx-platform,chauhanhardik/populo,bitifirefly/edx-platform,mcgachey/edx-platform,ovnicraft/edx-platform,Shrhawk/edx-platform,hkawasaki/kawasaki-aio8-2,xuxiao19910803/edx-platform,synergeticsedx/deployment-wipro,procangroup/edx-platform,JCBarahona/edX,prarthitm/edxplatform,OmarIthawi/edx-platform,cyanna/edx-platform,appsembler/edx-platform,jruiperezv/ANALYSE,proversity-org/edx-platform,fly19890211/edx-platform,carsongee/edx-platform,motion2015/a3,EduPepperPDTesting/pepper2013-testing,pomegranited/edx-platform,cselis86/edx-platform,etzhou/edx-platform,knehez/edx-platform,tanmaykm/edx-platform,TeachAtTUM/edx-platform,tiagochiavericosta/edx-platform,vasyarv/edx-platform,zerobatu/edx-platform,shabab12/edx-platform,DNFcode/edx-platform,caesar2164/edx-platform,chand3040/cloud_that,ferabra/edx-platform,chauhanhardik/populo,zadgroup/edx-platform,mahendra-r/edx-platform,inares/edx-platform,angelapper/edx-platform,ampax/edx-platform-backup,CourseTalk/edx-platform,simbs/edx-platform,shurihell/testasia,kmoocdev/edx-platform,Edraak/edraak-platform,ubc/edx-platform,mcgachey/edx-platform,Softmotions/edx-platform,zadgroup/edx-platform,pepeportela/edx-platform,inares/edx-platform,franosincic/edx-platform,Livit/Livit.Learn.EdX,kamalx/edx-platform,romain-li/edx-platform,shubhdev/openedx,doismellburning/edx-platform,ampax/edx-platform,eemirtekin/edx-platform,torchingloom/edx-platform,cyanna/edx-platform,LICEF/edx-platform,beni55/edx-platform,philanthropy-u/edx-platform,atsolakid/edx-platform,Semi-global/edx-platform,arifsetiawan/edx-platform,Kalyzee/edx-platform,solashirai/edx-platform,jswope00/GAI,motion2015/edx-platform,martynovp/edx-platform,angelapper/edx-platform,prarthitm/edxplatform,defance/edx-platform,cognitiveclass/edx-platform,mahendra-r/edx-platform,dsajkl/reqiop,msegado/edx-platform,longmen21/edx-platform,Edraak/edx-platform,peterm-itr/edx-platform,waheedahmed/edx-platform,halvertoluke/edx-platform,mbareta/edx-platform-ft,OmarIthawi/edx-platform,vismartltd/edx-platform,kxliugang/edx-platform,romain-li/edx-platform,nanolearningllc/edx-platform-cypress-2,IONISx/edx-platform,pomegranited/edx-platform,prarthitm/edxplatform,rhndg/openedx,MSOpenTech/edx-platform,ZLLab-Mooc/edx-platform,nanolearning/edx-platform,teltek/edx-platform,stvstnfrd/edx-platform,xuxiao19910803/edx,caesar2164/edx-platform,Shrhawk/edx-platform,stvstnfrd/edx-platform,SivilTaram/edx-platform,andyzsf/edx,shubhdev/edxOnBaadal,chand3040/cloud_that,edry/edx-platform,pomegranited/edx-platform,jelugbo/tundex,nagyistoce/edx-platform,LearnEra/LearnEraPlaftform,jbassen/edx-platform,hkawasaki/kawasaki-aio8-1,jswope00/griffinx,jruiperezv/ANALYSE,arbrandes/edx-platform,sameetb-cuelogic/edx-platform-test,miptliot/edx-platform,chauhanhardik/populo,chand3040/cloud_that,zubair-arbi/edx-platform,beacloudgenius/edx-platform,deepsrijit1105/edx-platform,cpennington/edx-platform,motion2015/a3,synergeticsedx/deployment-wipro,MSOpenTech/edx-platform,itsjeyd/edx-platform,iivic/BoiseStateX,edx-solutions/edx-platform,kursitet/edx-platform,arbrandes/edx-platform,DNFcode/edx-platform,mjg2203/edx-platform-seas,xinjiguaike/edx-platform | common/djangoapps/cache_toolbox/discussion_cache.py | common/djangoapps/cache_toolbox/discussion_cache.py | import logging
from django.core.cache import cache, get_cache
from datetime import datetime
def _get_discussion_cache():
return get_cache('mongo_metadata_inheritance')
def get_discussion_cache_key(course_id):
return 'discussion_items_{0}'.format(course_id)
def get_discussion_cache_entry(modulestore, course_id):
cache_entry = None
cache = _get_discussion_cache()
if cache is not None:
cache_entry = cache.get(get_discussion_cache_key(course_id), None)
# @todo: add expiry here
if cache_entry is None:
cache_entry = generate_discussion_cache_entry(modulestore, course_id)
return cache_entry.get('modules',[])
def generate_discussion_cache_entry(modulestore, course_id):
# make this a NOP for now. We have to figure out how to pickle the result set
return
components = course_id.split('/')
all_discussion_modules = modulestore.get_items(['i4x', components[0], components[1], 'discussion', None],
course_id=course_id)
cache = _get_discussion_cache()
entry = {'modules': all_discussion_modules, 'timestamp': datetime.now()}
if cache is not None:
cache.set(get_discussion_cache_key(course_id), entry)
return entry
def modulestore_update_signal_handler(modulestore = None, course_id = None, location = None, **kwargs):
"""called when there is an write event in our modulestore
"""
return
if location.category == 'discussion':
logging.debug('******* got modulestore update signal. Regenerating discussion cache for {0}'.format(course_id))
# refresh the cache entry if we've changed a discussion module
generate_discussion_cache_entry(modulestore, course_id)
def discussion_cache_register_for_updates(modulestore):
if modulestore.modulestore_update_signal is not None:
modulestore.modulestore_update_signal.connect(modulestore_update_signal_handler) | import logging
from django.core.cache import cache, get_cache
from datetime import datetime
def _get_discussion_cache():
return get_cache('mongo_metadata_inheritance')
def get_discussion_cache_key(course_id):
return 'discussion_items_{0}'.format(course_id)
def get_discussion_cache_entry(modulestore, course_id):
cache_entry = None
cache = _get_discussion_cache()
if cache is not None:
cache_entry = cache.get(get_discussion_cache_key(course_id), None)
# @todo: add expiry here
if cache_entry is None:
cache_entry = generate_discussion_cache_entry(modulestore, course_id)
return cache_entry.get('modules',[])
def generate_discussion_cache_entry(modulestore, course_id):
components = course_id.split('/')
all_discussion_modules = modulestore.get_items(['i4x', components[0], components[1], 'discussion', None],
course_id=course_id)
cache = _get_discussion_cache()
entry = {'modules': all_discussion_modules, 'timestamp': datetime.now()}
logging.debug('**** entry = {0}'.format(entry))
if cache is not None:
cache.set(get_discussion_cache_key(course_id), entry)
return entry
def modulestore_update_signal_handler(modulestore = None, course_id = None, location = None, **kwargs):
"""called when there is an write event in our modulestore
"""
if location.category == 'discussion':
logging.debug('******* got modulestore update signal. Regenerating discussion cache for {0}'.format(course_id))
# refresh the cache entry if we've changed a discussion module
generate_discussion_cache_entry(modulestore, course_id)
def discussion_cache_register_for_updates(modulestore):
if modulestore.modulestore_update_signal is not None:
modulestore.modulestore_update_signal.connect(modulestore_update_signal_handler) | agpl-3.0 | Python |
aa9f15af76e67a529be345850e7913755cba8b21 | Fix typo | github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql | python/ql/test/experimental/library-tests/frameworks/django-v1/response_test.py | python/ql/test/experimental/library-tests/frameworks/django-v1/response_test.py | from django.http.response import HttpResponse, HttpResponseRedirect, JsonResponse, HttpResponseNotFound
# Not an XSS sink, since the Content-Type is not "text/html"
# FP reported in https://github.com/github/codeql-python-team/issues/38
def fp_json_response(request):
# implicitly sets Content-Type to "application/json"
return JsonResponse({"foo": request.GET.get("foo")})
# Not an XSS sink, since the Content-Type is not "text/html"
def fp_manual_json_response(request):
json_data = '{"json": "{}"}'.format(request.GET.get("foo"))
return HttpResponse(json_data, content_type="application/json")
# Not an XSS sink, since the Content-Type is not "text/html"
def fp_manual_content_type(request):
return HttpResponse('<img src="0" onerror="alert(1)">', content_type="text/plain")
# XSS FP reported in https://github.com/github/codeql/issues/3466
# Note: This should be a open-redirect sink, but not a XSS sink.
def fp_redirect(request):
return HttpResponseRedirect(request.GET.get("next"))
# Ensure that simple subclasses are still vuln to XSS
def tp_not_found(request):
return HttpResponseNotFound(request.GET.get("name"))
# Ensure we still have a XSS sink when manually setting the content_type to HTML
def tp_manual_response_type(request):
return HttpResponse(request.GET.get("name"), content_type="text/html; charset=utf-8")
| from django.http.response import HttpResponse, HttpResponseRedirect, JsonResponse, HttpResponseNotFound
# Not an XSS sink, since the Content-Type is not "text/html"
# FP reported in https://github.com/github/codeql-python-team/issues/38
def fp_json_response(request):
# implicitly sets Content-Type to "application/json"
return JsonResponse({"foo": request.GET.get("foo")})
# Not an XSS sink, since the Content-Type is not "text/html"
def fp_manual_json_response(request):
json_data = '{"json": "{}"}'.format(request.GET.get("foo"))
return HttpResponse(json_data, content_type="application/json")
# Not an XSS sink, since the Content-Type is not "text/html"
def fp_manual_content_type(reuqest):
return HttpResponse('<img src="0" onerror="alert(1)">', content_type="text/plain")
# XSS FP reported in https://github.com/github/codeql/issues/3466
# Note: This should be a open-redirect sink, but not a XSS sink.
def fp_redirect(request):
return HttpResponseRedirect(request.GET.get("next"))
# Ensure that simple subclasses are still vuln to XSS
def tp_not_found(request):
return HttpResponseNotFound(request.GET.get("name"))
# Ensure we still have a XSS sink when manually setting the content_type to HTML
def tp_manual_response_type(request):
return HttpResponse(request.GET.get("name"), content_type="text/html; charset=utf-8")
| mit | Python |
8748de555f3cd939ecc39cf3bdae50a8677359d7 | Bump to v1.9.0 | Cal-CS-61A-Staff/ok-client | client/__init__.py | client/__init__.py | __version__ = 'v1.9.0'
FILE_NAME = 'ok'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| __version__ = 'v1.8.2'
FILE_NAME = 'ok'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| apache-2.0 | Python |
8ae4be4fea8af1d560f713338de36cdd651d6e2a | bump to 1.6.6. for auth changes | Cal-CS-61A-Staff/ok-client | client/__init__.py | client/__init__.py | __version__ = 'v1.6.6'
FILE_NAME = 'ok'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| __version__ = 'v1.6.5'
FILE_NAME = 'ok'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| apache-2.0 | Python |
0bb418c72d858322a8fc7ad515f36a045d9dd0de | update to v1.3.29 | jathak/ok-client,Cal-CS-61A-Staff/ok-client,jackzhao-mj/ok-client | client/__init__.py | client/__init__.py | __version__ = 'v1.3.29'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| __version__ = 'v1.3.28'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| apache-2.0 | Python |
4f3234433b97e7f243d54e9e95399f5cabecd315 | Change broken course_modes migration to not touch the database. | antoviaque/edx-platform,pepeportela/edx-platform,edx/edx-platform,teltek/edx-platform,ahmedaljazzar/edx-platform,mitocw/edx-platform,edx-solutions/edx-platform,hastexo/edx-platform,IndonesiaX/edx-platform,pepeportela/edx-platform,Lektorium-LLC/edx-platform,mitocw/edx-platform,marcore/edx-platform,romain-li/edx-platform,philanthropy-u/edx-platform,amir-qayyum-khan/edx-platform,angelapper/edx-platform,UOMx/edx-platform,jzoldak/edx-platform,10clouds/edx-platform,defance/edx-platform,antoviaque/edx-platform,Edraak/edraak-platform,JioEducation/edx-platform,10clouds/edx-platform,tanmaykm/edx-platform,Stanford-Online/edx-platform,Lektorium-LLC/edx-platform,raccoongang/edx-platform,pabloborrego93/edx-platform,ESOedX/edx-platform,defance/edx-platform,analyseuc3m/ANALYSE-v1,ahmedaljazzar/edx-platform,edx-solutions/edx-platform,mbareta/edx-platform-ft,cecep-edu/edx-platform,eduNEXT/edunext-platform,stvstnfrd/edx-platform,MakeHer/edx-platform,jjmiranda/edx-platform,teltek/edx-platform,edx-solutions/edx-platform,CourseTalk/edx-platform,franosincic/edx-platform,solashirai/edx-platform,franosincic/edx-platform,CredoReference/edx-platform,CredoReference/edx-platform,caesar2164/edx-platform,pepeportela/edx-platform,kmoocdev2/edx-platform,marcore/edx-platform,naresh21/synergetics-edx-platform,philanthropy-u/edx-platform,solashirai/edx-platform,franosincic/edx-platform,arbrandes/edx-platform,philanthropy-u/edx-platform,gymnasium/edx-platform,analyseuc3m/ANALYSE-v1,ampax/edx-platform,franosincic/edx-platform,MakeHer/edx-platform,louyihua/edx-platform,alu042/edx-platform,waheedahmed/edx-platform,angelapper/edx-platform,lduarte1991/edx-platform,synergeticsedx/deployment-wipro,cpennington/edx-platform,EDUlib/edx-platform,teltek/edx-platform,Endika/edx-platform,philanthropy-u/edx-platform,a-parhom/edx-platform,TeachAtTUM/edx-platform,synergeticsedx/deployment-wipro,jolyonb/edx-platform,kmoocdev2/edx-platform,deepsrijit1105/edx-platform,eduNEXT/edx-platform,shabab12/edx-platform,procangroup/edx-platform,cpennington/edx-platform,wwj718/edx-platform,ESOedX/edx-platform,fintech-circle/edx-platform,lduarte1991/edx-platform,proversity-org/edx-platform,teltek/edx-platform,shabab12/edx-platform,raccoongang/edx-platform,EDUlib/edx-platform,msegado/edx-platform,defance/edx-platform,mitocw/edx-platform,a-parhom/edx-platform,caesar2164/edx-platform,a-parhom/edx-platform,proversity-org/edx-platform,waheedahmed/edx-platform,JioEducation/edx-platform,cpennington/edx-platform,alu042/edx-platform,franosincic/edx-platform,edx/edx-platform,analyseuc3m/ANALYSE-v1,CredoReference/edx-platform,wwj718/edx-platform,doganov/edx-platform,Livit/Livit.Learn.EdX,devs1991/test_edx_docmode,10clouds/edx-platform,solashirai/edx-platform,ESOedX/edx-platform,eduNEXT/edx-platform,gymnasium/edx-platform,defance/edx-platform,prarthitm/edxplatform,cecep-edu/edx-platform,procangroup/edx-platform,tanmaykm/edx-platform,msegado/edx-platform,wwj718/edx-platform,synergeticsedx/deployment-wipro,jolyonb/edx-platform,devs1991/test_edx_docmode,analyseuc3m/ANALYSE-v1,devs1991/test_edx_docmode,chrisndodge/edx-platform,caesar2164/edx-platform,alu042/edx-platform,Endika/edx-platform,waheedahmed/edx-platform,JioEducation/edx-platform,Stanford-Online/edx-platform,edx/edx-platform,Endika/edx-platform,IndonesiaX/edx-platform,mbareta/edx-platform-ft,longmen21/edx-platform,naresh21/synergetics-edx-platform,longmen21/edx-platform,TeachAtTUM/edx-platform,jjmiranda/edx-platform,gsehub/edx-platform,eduNEXT/edunext-platform,EDUlib/edx-platform,jzoldak/edx-platform,prarthitm/edxplatform,itsjeyd/edx-platform,angelapper/edx-platform,jzoldak/edx-platform,chrisndodge/edx-platform,Lektorium-LLC/edx-platform,solashirai/edx-platform,Endika/edx-platform,doganov/edx-platform,UOMx/edx-platform,cecep-edu/edx-platform,naresh21/synergetics-edx-platform,eduNEXT/edx-platform,ampax/edx-platform,shabab12/edx-platform,CourseTalk/edx-platform,ESOedX/edx-platform,BehavioralInsightsTeam/edx-platform,CredoReference/edx-platform,appsembler/edx-platform,prarthitm/edxplatform,fintech-circle/edx-platform,MakeHer/edx-platform,hastexo/edx-platform,procangroup/edx-platform,amir-qayyum-khan/edx-platform,cecep-edu/edx-platform,Livit/Livit.Learn.EdX,miptliot/edx-platform,UOMx/edx-platform,IndonesiaX/edx-platform,devs1991/test_edx_docmode,procangroup/edx-platform,hastexo/edx-platform,pepeportela/edx-platform,miptliot/edx-platform,proversity-org/edx-platform,ahmedaljazzar/edx-platform,wwj718/edx-platform,romain-li/edx-platform,gsehub/edx-platform,synergeticsedx/deployment-wipro,10clouds/edx-platform,amir-qayyum-khan/edx-platform,gsehub/edx-platform,Livit/Livit.Learn.EdX,fintech-circle/edx-platform,BehavioralInsightsTeam/edx-platform,devs1991/test_edx_docmode,IndonesiaX/edx-platform,MakeHer/edx-platform,eduNEXT/edunext-platform,stvstnfrd/edx-platform,Livit/Livit.Learn.EdX,amir-qayyum-khan/edx-platform,angelapper/edx-platform,pabloborrego93/edx-platform,eduNEXT/edx-platform,gsehub/edx-platform,chrisndodge/edx-platform,devs1991/test_edx_docmode,TeachAtTUM/edx-platform,eduNEXT/edunext-platform,itsjeyd/edx-platform,solashirai/edx-platform,TeachAtTUM/edx-platform,a-parhom/edx-platform,jzoldak/edx-platform,antoviaque/edx-platform,lduarte1991/edx-platform,edx/edx-platform,doganov/edx-platform,devs1991/test_edx_docmode,miptliot/edx-platform,longmen21/edx-platform,ampax/edx-platform,edx-solutions/edx-platform,jolyonb/edx-platform,arbrandes/edx-platform,proversity-org/edx-platform,miptliot/edx-platform,marcore/edx-platform,CourseTalk/edx-platform,waheedahmed/edx-platform,BehavioralInsightsTeam/edx-platform,romain-li/edx-platform,ampax/edx-platform,appsembler/edx-platform,tanmaykm/edx-platform,pabloborrego93/edx-platform,longmen21/edx-platform,raccoongang/edx-platform,louyihua/edx-platform,louyihua/edx-platform,doganov/edx-platform,raccoongang/edx-platform,romain-li/edx-platform,arbrandes/edx-platform,kmoocdev2/edx-platform,UOMx/edx-platform,cecep-edu/edx-platform,prarthitm/edxplatform,mitocw/edx-platform,longmen21/edx-platform,romain-li/edx-platform,Stanford-Online/edx-platform,Edraak/edraak-platform,itsjeyd/edx-platform,appsembler/edx-platform,antoviaque/edx-platform,kmoocdev2/edx-platform,alu042/edx-platform,IndonesiaX/edx-platform,pabloborrego93/edx-platform,jolyonb/edx-platform,chrisndodge/edx-platform,kmoocdev2/edx-platform,hastexo/edx-platform,caesar2164/edx-platform,jjmiranda/edx-platform,Lektorium-LLC/edx-platform,CourseTalk/edx-platform,arbrandes/edx-platform,gymnasium/edx-platform,jjmiranda/edx-platform,doganov/edx-platform,cpennington/edx-platform,deepsrijit1105/edx-platform,msegado/edx-platform,appsembler/edx-platform,louyihua/edx-platform,BehavioralInsightsTeam/edx-platform,devs1991/test_edx_docmode,tanmaykm/edx-platform,JioEducation/edx-platform,ahmedaljazzar/edx-platform,itsjeyd/edx-platform,deepsrijit1105/edx-platform,mbareta/edx-platform-ft,deepsrijit1105/edx-platform,wwj718/edx-platform,fintech-circle/edx-platform,waheedahmed/edx-platform,Edraak/edraak-platform,Stanford-Online/edx-platform,Edraak/edraak-platform,stvstnfrd/edx-platform,stvstnfrd/edx-platform,msegado/edx-platform,MakeHer/edx-platform,shabab12/edx-platform,msegado/edx-platform,mbareta/edx-platform-ft,naresh21/synergetics-edx-platform,gymnasium/edx-platform,marcore/edx-platform,lduarte1991/edx-platform,EDUlib/edx-platform | common/djangoapps/course_modes/migrations/0005_auto_20151217_0958.py | common/djangoapps/course_modes/migrations/0005_auto_20151217_0958.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_modes', '0004_auto_20151113_1457'),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[],
state_operations=[
migrations.RemoveField(
model_name='coursemode',
name='expiration_datetime',
),
migrations.AddField(
model_name='coursemode',
name='_expiration_datetime',
field=models.DateTimeField(db_column=b'expiration_datetime', default=None, blank=True, help_text='OPTIONAL: After this date/time, users will no longer be able to enroll in this mode. Leave this blank if users can enroll in this mode until enrollment closes for the course.', null=True, verbose_name='Upgrade Deadline'),
),
]
)
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_modes', '0004_auto_20151113_1457'),
]
operations = [
migrations.RemoveField(
model_name='coursemode',
name='expiration_datetime',
),
migrations.AddField(
model_name='coursemode',
name='_expiration_datetime',
field=models.DateTimeField(db_column=b'expiration_datetime', default=None, blank=True, help_text='OPTIONAL: After this date/time, users will no longer be able to enroll in this mode. Leave this blank if users can enroll in this mode until enrollment closes for the course.', null=True, verbose_name='Upgrade Deadline'),
),
]
| agpl-3.0 | Python |
0c9c9f4f86ccc2c7328309569e0ebba44de3b1de | make stages class more subclassable | olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net | util/stages.py | util/stages.py | from astrometry.util.file import *
class CallGlobal(object):
def __init__(self, pattern, globals, *args, **kwargs):
self.pat = pattern
self.args = args
self.kwargs = kwargs
self.globals = globals
def getfunc(self, stage):
func = self.pat % stage
func = eval(func, self.globals)
return func
def getkwargs(self, stage, **kwargs):
kwa = self.kwargs.copy()
kwa.update(kwargs)
return kwa
def __call__(self, stage, **kwargs):
func = self.getfunc(stage)
kwa = self.getkwargs(stage, **kwargs)
return func(*self.args, **kwa)
def runstage(stage, picklepat, stagefunc, force=[], prereqs={},
update=True, **kwargs):
print 'Runstage', stage
pfn = picklepat % stage
if os.path.exists(pfn):
if stage in force:
print 'Ignoring pickle', pfn, 'and forcing stage', stage
else:
print 'Reading pickle', pfn
R = unpickle_from_file(pfn)
return R
if stage <= 0:
P = {}
else:
prereq = prereqs.get(stage, stage-1)
P = runstage(prereq, picklepat, stagefunc,
force=force, prereqs=prereqs, **kwargs)
print 'Running stage', stage
R = stagefunc(stage, **P)
print 'Stage', stage, 'finished'
if update:
if R is not None:
P.update(R)
R = P
print 'Saving pickle', pfn
pickle_to_file(R, pfn)
print 'Saved', pfn
return R
| from astrometry.util.file import *
class CallGlobal(object):
def __init__(self, pattern, globals, *args, **kwargs):
self.pat = pattern
self.args = args
self.kwargs = kwargs
self.globals = globals
def getfunc(self, stage):
func = self.pat % stage
func = eval(func, self.globals)
return func
def __call__(self, stage, **kwargs):
func = self.getfunc(stage)
kwa = self.kwargs.copy()
kwa.update(kwargs)
return func(*self.args, **kwa)
def runstage(stage, picklepat, stagefunc, force=[], prereqs={},
update=True, **kwargs):
print 'Runstage', stage
pfn = picklepat % stage
if os.path.exists(pfn):
if stage in force:
print 'Ignoring pickle', pfn, 'and forcing stage', stage
else:
print 'Reading pickle', pfn
R = unpickle_from_file(pfn)
return R
if stage <= 0:
P = {}
else:
prereq = prereqs.get(stage, stage-1)
P = runstage(prereq, picklepat, stagefunc,
force=force, prereqs=prereqs, **kwargs)
print 'Running stage', stage
R = stagefunc(stage, **P)
print 'Stage', stage, 'finished'
if update:
if R is not None:
P.update(R)
R = P
print 'Saving pickle', pfn
pickle_to_file(R, pfn)
print 'Saved', pfn
return R
| bsd-3-clause | Python |
b1e2face6015afc560de6928141a85bb99eaf0da | bump version | getweber/weber-cli | cob/__version__.py | cob/__version__.py | __version__ = "0.0.4"
| __version__ = "0.0.3"
| bsd-3-clause | Python |
efd617d4a5835fc345e6044c604d1ec492ad7bf7 | Handle empty choices | Didero/DideRobot | commands/Choice.py | commands/Choice.py | import random
from CommandTemplate import CommandTemplate
class Command(CommandTemplate):
triggers = ['choice', 'choose']
helptext = "Helps you make a choice between options in a comma-separated list"
possibleReplies = ["{}", "Hmm, I'd go with {}", "Out of those, {} sounds the least bad", "{}, obviously",
"Let's go with... {}. No wait! No, yeah, that one", "I don't know! *rolls dice* Seems you should go for {}",
"Pick {0}, pick {0}!", "Eh, {} will do", "Why not {}?", "The first one! The last one! {}!", "Just pick {}"]
def pickRandomReply(self):
#Based on a suggestion by ekimekim
while True:
#Shuffle the list initially
random.shuffle(self.possibleReplies)
#Then just feed a reply every time one is requested. Once we run out, the list is reshuffled, ready to start again
for reply in self.possibleReplies:
yield reply
def execute(self, message):
"""
:type message: IrcMessage.IrcMessage
"""
replytext = None
if message.messagePartsLength == 0:
replytext = "My choice would be to provide me with some choices, preferably separated by commas"
else:
choices = []
if ',' in message.message:
choices = message.message.split(',')
else:
choices = message.messageParts
#Remove all the empty choices from the list
choices = filter(bool, choices)
if len(choices) == 0:
replytext = "That's just an empty list of choices... I'll pick nothing then"
elif len(choices) == 1:
replytext = "Ooh, that's a tough one. I'd go with the first option, seeing as there is only one"
else:
#Pick a random reply sentence, and then add in a random choice from the provided list, enclosed in quotes
replytext = self.pickRandomReply().next().format('"' + random.choice(choices).strip() + '"')
message.bot.sendMessage(message.source, replytext) | import random
from CommandTemplate import CommandTemplate
class Command(CommandTemplate):
triggers = ['choice', 'choose']
helptext = "Helps you make a choice between options in a comma-separated list"
possibleReplies = ["{}", "Hmm, I'd go with {}", "Out of those, {} sounds the least bad", "{}, obviously",
"Let's go with... {}. No wait! No, yeah, that one", "I don't know! *rolls dice* Seems you should go for {}",
"Pick {0}, pick {0}!", "Eh, {} will do", "Why not {}?", "The first one! The last one! {}!", "Just pick {}"]
def pickRandomReply(self):
#Based on a suggestion by ekimekim
while True:
#Shuffle the list initially
random.shuffle(self.possibleReplies)
#Then just feed a reply every time one is requested. Once we run out, the list is reshuffled, ready to start again
for reply in self.possibleReplies:
yield reply
def execute(self, message):
"""
:type message: IrcMessage.IrcMessage
"""
replytext = None
if message.messagePartsLength == 0:
replytext = "My choice would be to provide me with some choices, preferably separated by commas"
else:
choices = []
if ',' in message.message:
choices = message.message.split(',')
else:
choices = message.messageParts
if len(choices) == 1:
replytext = "Ooh, that's a tough one. I'd go with the first option, seeing as there is only one"
else:
#Pick a random reply sentence, and then add in a random choice from the provided list, enclosed in quotes
replytext = self.pickRandomReply().next().format('"' + random.choice(choices).strip() + '"')
message.bot.sendMessage(message.source, replytext) | mit | Python |
a6f95c70aaf9049465438c68080a09603c54628f | Add detail view for Build model | SchrodingersGat/InvenTree,inventree/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree | InvenTree/build/api.py | InvenTree/build/api.py | """
JSON API for the Build app
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework import generics, permissions
from django.conf.urls import url, include
from .models import Build, BuildItem
from .serializers import BuildSerializer, BuildItemSerializer
class BuildList(generics.ListCreateAPIView):
""" API endpoint for accessing a list of Build objects.
- GET: Return list of objects (with filters)
- POST: Create a new Build object
"""
queryset = Build.objects.all()
serializer_class = BuildSerializer
permission_classes = [
permissions.IsAuthenticatedOrReadOnly,
]
filter_backends = [
DjangoFilterBackend,
filters.SearchFilter,
filters.OrderingFilter,
]
filter_fields = [
'part',
]
class BuildDetail(generics.RetrieveUpdateAPIView):
""" API endpoint for detail view of a Build object """
queryset = Build.objects.all()
serializer_class = BuildSerializer
permission_classes = [
permissions.IsAuthenticatedOrReadOnly,
]
class BuildItemList(generics.ListCreateAPIView):
""" API endpoint for accessing a list of BuildItem objects
- GET: Return list of objects
- POST: Create a new BuildItem object
"""
serializer_class = BuildItemSerializer
def get_queryset(self):
""" Override the queryset method,
to allow filtering by stock_item.part
"""
# Does the user wish to filter by part?
part_pk = self.request.query_params.get('part', None)
query = BuildItem.objects.all()
if part_pk:
query = query.filter(stock_item__part=part_pk)
return query
permission_classes = [
permissions.IsAuthenticatedOrReadOnly,
]
filter_backends = [
DjangoFilterBackend,
]
filter_fields = [
'build',
'stock_item'
]
build_item_api_urls = [
url('^.*$', BuildItemList.as_view(), name='api-build-item-list'),
]
build_api_urls = [
url(r'^item/?', include(build_item_api_urls)),
url(r'^(?P<pk>\d+)/', BuildDetail.as_view(), name='api-build-detail'),
url(r'^.*$', BuildList.as_view(), name='api-build-list'),
]
| """
JSON API for the Build app
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework import generics, permissions
from django.conf.urls import url, include
from .models import Build, BuildItem
from .serializers import BuildSerializer, BuildItemSerializer
class BuildList(generics.ListCreateAPIView):
""" API endpoint for accessing a list of Build objects.
- GET: Return list of objects (with filters)
- POST: Create a new Build object
"""
queryset = Build.objects.all()
serializer_class = BuildSerializer
permission_classes = [
permissions.IsAuthenticatedOrReadOnly,
]
filter_backends = [
DjangoFilterBackend,
filters.SearchFilter,
filters.OrderingFilter,
]
filter_fields = [
'part',
]
class BuildItemList(generics.ListCreateAPIView):
""" API endpoint for accessing a list of BuildItem objects
- GET: Return list of objects
- POST: Create a new BuildItem object
"""
serializer_class = BuildItemSerializer
def get_queryset(self):
""" Override the queryset method,
to allow filtering by stock_item.part
"""
# Does the user wish to filter by part?
part_pk = self.request.query_params.get('part', None)
query = BuildItem.objects.all()
if part_pk:
query = query.filter(stock_item__part=part_pk)
return query
permission_classes = [
permissions.IsAuthenticatedOrReadOnly,
]
filter_backends = [
DjangoFilterBackend,
]
filter_fields = [
'build',
'stock_item'
]
build_item_api_urls = [
url('^.*$', BuildItemList.as_view(), name='api-build-item-list'),
]
build_api_urls = [
url(r'^item/?', include(build_item_api_urls)),
url(r'^.*$', BuildList.as_view(), name='api-build-list'),
]
| mit | Python |
e7a48fc19e965475c2d3c32e3c084a056c2927aa | Update pyyaml usage | QuteBits/resume_42 | resume_tex.py | resume_tex.py | # Converts my Resume from YAML to TeX.
# Just don't forget to drop pdflatex on the output :)
# ------------------------------------------------------------------------------
# @contributor Aleksandr Mattal <https://github.com/qutebits>
# inspired by work of Brandon Amos <https://github.com/bamos/cv>
import re
import yaml
import sys
from datetime import date
from jinja2 import Environment, FileSystemLoader
yaml_contents = yaml.safe_load(open("resume.yaml", 'r')) #read data
env = Environment(loader=FileSystemLoader("template"),
block_start_string='~{',block_end_string='}~',
variable_start_string='~{{', variable_end_string='}}~')
this_loc = len(open("resume_tex.py", 'r').readlines()) #lets keep it at 42
def generate():
body = ""
for section in yaml_contents['order']: #generate sections 1 by 1
contents = yaml_contents[section[0]]
name = section[1].title()
body += env.get_template("resume-section.tmpl.tex").render(
name = name.upper(),
contents = contents
)
#and then generate the TeX wrapper and fill it with generated sections
result = open("result/resume.tex", 'w')
result.write(env.get_template("resume.tmpl.tex").render(
name = yaml_contents['name'].upper(),
email = yaml_contents['email'],
loc = this_loc, #lines of code in this very script :)
body = body,
today = date.today().strftime("%B %d, %Y") #generation date
))
result.close()
generate() #finally, generate this beauty
| # Converts my Resume from YAML to TeX.
# Just don't forget to drop pdflatex on the output :)
# ------------------------------------------------------------------------------
# @contributor Aleksandr Mattal <https://github.com/qutebits>
# inspired by work of Brandon Amos <https://github.com/bamos/cv>
import re
import yaml
import sys
from datetime import date
from jinja2 import Environment, FileSystemLoader
yaml_contents = yaml.load(open("resume.yaml", 'r')) #read data
env = Environment(loader=FileSystemLoader("template"),
block_start_string='~{',block_end_string='}~',
variable_start_string='~{{', variable_end_string='}}~')
this_loc = len(open("resume_tex.py", 'r').readlines()) #lets keep it at 42
def generate():
body = ""
for section in yaml_contents['order']: #generate sections 1 by 1
contents = yaml_contents[section[0]]
name = section[1].title()
body += env.get_template("resume-section.tmpl.tex").render(
name = name.upper(),
contents = contents
)
#and then generate the TeX wrapper and fill it with generated sections
result = open("result/resume.tex", 'w')
result.write(env.get_template("resume.tmpl.tex").render(
name = yaml_contents['name'].upper(),
email = yaml_contents['email'],
loc = this_loc, #lines of code in this very script :)
body = body,
today = date.today().strftime("%B %d, %Y") #generation date
))
result.close()
generate() #finally, generate this beauty | mit | Python |
b6937405a9b85026f3e9cffc94fa65c87ee793c0 | Add possibility to debug csv streamer views. | jlaunonen/kirppu,jlaunonen/kirppu,jlaunonen/kirppu,jlaunonen/kirppu | kirppu/views/csv_utils.py | kirppu/views/csv_utils.py | # -*- coding: utf-8 -*-
import functools
import html
import io
from urllib.parse import quote
from django.conf import settings
from django.http import HttpResponse, StreamingHttpResponse
def strip_generator(fn):
@functools.wraps(fn)
def inner(output, event, generator=False):
if generator:
# Return the generator object only when using StringIO.
return fn(output, event)
for _ in fn(output, event):
pass
return inner
def csv_streamer_view(request, generator, filename_base):
debug = settings.DEBUG and request.GET.get("debug") is not None
def streamer():
if debug:
yield "<!DOCTYPE html>\n<html>\n<body>\n<pre>"
output = io.StringIO()
for a_string in generator(output):
val = output.getvalue()
if debug:
yield html.escape(val, quote=False)
else:
yield val
output.truncate(0)
output.seek(0)
if debug:
yield "</pre>\n</body>\n</html>"
if debug:
response = HttpResponse("".join(streamer()))
else:
response = StreamingHttpResponse(streamer(), content_type="text/plain; charset=utf-8")
if request.GET.get("download") is not None:
response["Content-Disposition"] = 'attachment; filename="%s.csv"' % quote(filename_base, safe="")
response["Content-Type"] = "text/csv; charset=utf-8"
return response
| # -*- coding: utf-8 -*-
import functools
import io
from urllib.parse import quote
from django.http import StreamingHttpResponse
def strip_generator(fn):
@functools.wraps(fn)
def inner(output, event, generator=False):
if generator:
# Return the generator object only when using StringIO.
return fn(output, event)
for _ in fn(output, event):
pass
return inner
def csv_streamer_view(request, generator, filename_base):
def streamer():
output = io.StringIO()
for a_string in generator(output):
val = output.getvalue()
yield val
output.truncate(0)
output.seek(0)
response = StreamingHttpResponse(streamer(), content_type="text/plain; charset=utf-8")
if request.GET.get("download") is not None:
response["Content-Disposition"] = 'attachment; filename="%s.csv"' % quote(filename_base, safe="")
response["Content-Type"] = "text/csv; charset=utf-8"
return response
| mit | Python |
acb787e677a76311172942ad0e0cf033d6b0b3a9 | Remove version printing | simpeg/discretize,simpeg/discretize,simpeg/discretize | docs/examples/plot_streamThickness.py | docs/examples/plot_streamThickness.py | """
Simple example to vary streamline thickness based on the vector amplitudes
=================================
"""
import numpy as np
import matplotlib.pyplot as plt
from discretize import TensorMesh
###############################################################################
# Create mesh
# -----
#
# Minimum cell size in each direction
dx = 1.
dy = 1.
dz = 1.
# Number of core cells in each direction
nCoreX = 43.
nCoreY = 43.
nCoreZ = 43.
# Cell widths
hx = [(dx,nCoreX)]
hy = [(dy,nCoreY)]
hz = [(dz,nCoreZ)]
# Desired Core mesh origin (Bottom SW corner)
x0 = -21.5
y0 = -21.5
z0 = -21.5
mesh = TensorMesh([hx, hy, hz],[x0,y0,z0])
print(mesh.nC)
###############################################################################
#
# Define arbitrary function to plot
#
X = mesh.gridCC[:,0]
Y = mesh.gridCC[:,1]
Z = mesh.gridCC[:,2]
U = -1 - X**2 + Y + Z
V = 1 + X - Y**2 + Z
W = 1 + X + Y - Z**2
###############################################################################
# Plot streamlines
# ----------------------
#
# Create figure
fig = plt.figure()
ax = plt.subplot(111)
fig.set_figheight(15)
fig.set_figwidth(15)
labelsize = 30.
ticksize = 30.
# Create data vector
dataVec = np.hstack([U,V,W])
print(dataVec.shape)
# Set streamline plotting options
streamOpts = {'color':'w', 'density':2.0}
pcolorOpts = {"cmap":"viridis"}
dat = mesh.plotSlice(dataVec, ax=ax, normal='Z', ind=5, vType='CCv', view='vec', streamOpts=streamOpts, gridOpts={"color":"k", "alpha":0.1}, grid=True, clim=None, stream_thickness=3)
###############################################################################
# Moving Forward
# --------------
#
# If you have suggestions for improving this example, please create a `pull request on the example in discretize
| """
Simple example to vary streamline thickness based on the vector amplitudes
=================================
"""
import numpy as np
import matplotlib.pyplot as plt
from discretize import TensorMesh
###############################################################################
# Create mesh
# -----
#
# Minimum cell size in each direction
dx = 1.
dy = 1.
dz = 1.
# Number of core cells in each direction
nCoreX = 43.
nCoreY = 43.
nCoreZ = 43.
# Cell widths
hx = [(dx,nCoreX)]
hy = [(dy,nCoreY)]
hz = [(dz,nCoreZ)]
# Desired Core mesh origin (Bottom SW corner)
x0 = -21.5
y0 = -21.5
z0 = -21.5
mesh = TensorMesh([hx, hy, hz],[x0,y0,z0])
print(mesh.nC)
###############################################################################
#
# Define arbitrary function to plot
#
X = mesh.gridCC[:,0]
Y = mesh.gridCC[:,1]
Z = mesh.gridCC[:,2]
U = -1 - X**2 + Y + Z
V = 1 + X - Y**2 + Z
W = 1 + X + Y - Z**2
###############################################################################
# Plot streamlines
# ----------------------
#
# Create figure
fig = plt.figure()
ax = plt.subplot(111)
fig.set_figheight(15)
fig.set_figwidth(15)
labelsize = 30.
ticksize = 30.
# Create data vector
dataVec = np.hstack([U,V,W])
print(dataVec.shape)
# Set streamline plotting options
streamOpts = {'color':'w', 'density':2.0}
pcolorOpts = {"cmap":"viridis"}
dat = mesh.plotSlice(dataVec, ax=ax, normal='Z', ind=5, vType='CCv', view='vec', streamOpts=streamOpts, gridOpts={"color":"k", "alpha":0.1}, grid=True, clim=None, stream_thickness=3)
###############################################################################
# Print the version of SimPEG and dependencies
# --------------------------------------------
#
versions()
###############################################################################
# Moving Forward
# --------------
#
# If you have suggestions for improving this example, please create a `pull request on the example in discretize
| mit | Python |
1abc730b053ba5de43983f22560267c6e790ec7c | Make main file importable. | AntonHerrNilsson/the-dungeon | the_dungeon.py | the_dungeon.py | import sys
import world
from creatures import Player
from player_ai import PlayerAI
import display
def run(world, steps=None, slp=0.5, show=True, player=None):
if steps is None:
steps = sys.maxsize
display.initialize()
for i in range(steps):
if show:
to_display = [world]
if player is not None and hasattr(player, "ai"):
to_display.append(player.ai.model)
to_display.append(player.percept())
score = player.performance
centers = [player.location,
player.ai.self_model.location,
(0,0)]
else:
score = ""
centers = [None]
display.display(i, score, centers, 5, *to_display)
time.sleep(slp)
world.step()
if __name__ == "__main__":
dungeon = world.World()
player = Player(dungeon, (5,5), PlayerAI, direction=(0,1))
world.testing_room(dungeon)
run(dungeon, player=player)
| import sys
import world
from creatures import Player
from player_ai import PlayerAI
import display
def run(world, steps=None, slp=0.5, show=True, player=None):
if steps is None:
steps = sys.maxsize
display.initialize()
for i in range(steps):
if show:
to_display = [world]
if player is not None and hasattr(player, "ai"):
to_display.append(player.ai.model)
to_display.append(player.percept())
score = player.performance
centers = [player.location,
player.ai.self_model.location,
(0,0)]
else:
score = ""
centers = [None]
display.display(i, score, centers, 5, *to_display)
time.sleep(slp)
world.step()
dungeon = world.World()
player = Player(dungeon, (5,5), PlayerAI, direction=(0,1))
world.testing_room(dungeon)
run(dungeon, player=player)
| mit | Python |
faba2bc98f08cddea51d2e0093aa5c2981c8bf15 | Add update interval constant. Add detail to constructor. | babycaseny/gdrive-linux,jimlawton/gdrive-linux-googlecode,jimlawton/gdrive-linux,jmfield2/gdrive-linux | gdrived.py | gdrived.py | #!/usr/bin/env python
#
# Copyright 2012 Jim Lawton. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import daemon
UPDATE_INTERVAL = 30 # Sync update interval in seconds.
class GDriveDaemon(daemon.Daemon, object):
def __init__(self):
"Class constructor."
# Use pidfile in Gdrive config directory.
pidfile = None
# Use loglevel from GDrive config.
loglevel = None
# Use logfile in GDrive config directory.
stdout = None
super(GDriveDaemon, self).__init__(pidfile, loglevel, stdout)
def run(self):
"Run the daemon."
while True:
time.sleep(UPDATE_INTERVAL)
| #!/usr/bin/env python
#
# Copyright 2012 Jim Lawton. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import daemon
class GDriveDaemon(daemon.Daemon):
def run(self):
while True:
time.sleep(1)
| apache-2.0 | Python |
e3e8dea40503944e1983f20ab18edd67163f384f | Bump version 0.19.0rc1 --> 0.19.0rc2 | lbryio/lbry,lbryio/lbry,lbryio/lbry | lbrynet/__init__.py | lbrynet/__init__.py | import logging
__version__ = "0.19.0rc2"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
| import logging
__version__ = "0.19.0rc1"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
| mit | Python |
82a8b7ac80fd7758a9a5e41c7751d4138577f85e | update logger in client test | h2020-endeavour/iSDX,h2020-endeavour/iSDX,h2020-endeavour/iSDX,h2020-endeavour/iSDX,h2020-endeavour/iSDX,h2020-endeavour/iSDX,h2020-endeavour/iSDX | pctrl/client_test.py | pctrl/client_test.py | #!/usr/bin/env python
# Author:
# Florian Kaufmann (DE-CIX)
import argparse
import atexit
import json
from multiprocessing.connection import Listener, Client
import os
from signal import signal, SIGTERM
from sys import exit
from threading import RLock, Thread
from lib import PConfig
import util.log
class XRS_Client(object):
def __init__(self, id, config_file, logger):
# participant id
self.id = id
# used to signal termination
self.run = True
self.prefix_lock = {}
# Initialize participant params
self.cfg = PConfig(config_file, self.id)
self.logger = logger
def xstart(self, test_file):
# Start all clients/listeners/whatevs
print("Starting XRS_Client for participant %s" % self.id)
# Client
print (self.cfg.get_xrs_info())
self.xrs_client = self.cfg.get_xrs_client(self.logger)
self.xrs_client.send(test_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('test_file', help='the test file')
parser.add_argument('id', type=int,
help='participant id (integer)')
args = parser.parse_args()
# locate test file
# TODO: Separate the config files for each participant
base_path = os.path.abspath(os.path.join(os.path.realpath(__file__),
".."))
test_file = os.path.join(base_path, args.test_file)
# locate config file
# TODO: Separate the config files for each participant
base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..","..","endeavour","examples","test-mh","config"))
config_file = os.path.join(base_path, "sdx_global.cfg")
#logger = util.log.getLogger("P_" + str(args.id))
print ("Starting controller with config file: "+str(config_file))
logger = util.log.getLogger("P_" + str(args.id))
# start controller
xrsctrlr = XRS_Client(args.id, config_file, logger)
xrsctrlr_thread = Thread(target=xrsctrlr.xstart(test_file))
xrsctrlr_thread.daemon = True
xrsctrlr_thread.start()
atexit.register(xrsctrlr.stop)
signal(SIGTERM, lambda signum, stack_frame: exit(1))
while xrsctrlr_thread.is_alive():
try:
xrsctrlr_thread.join(1)
except KeyboardInterrupt:
xrsctrlr.stop()
print ("Xrsctrlr exiting")
if __name__ == '__main__':
main() | #!/usr/bin/env python
# Author:
# Florian Kaufmann (DE-CIX)
import argparse
import atexit
import json
from multiprocessing.connection import Listener, Client
import os
from signal import signal, SIGTERM
from sys import exit
from threading import RLock, Thread
from lib import PConfig
class XRS_Client(object):
def __init__(self, id, config_file):
# participant id
self.id = id
# used to signal termination
self.run = True
self.prefix_lock = {}
# Initialize participant params
self.cfg = PConfig(config_file, self.id)
def xstart(self, test_file):
# Start all clients/listeners/whatevs
print("Starting XRS_Client for participant %s" % self.id)
# Client
print (self.cfg.get_xrs_info())
self.xrs_client = self.cfg.get_xrs_client()
self.xrs_client.send(test_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('test_file', help='the test file')
parser.add_argument('id', type=int,
help='participant id (integer)')
args = parser.parse_args()
# locate test file
# TODO: Separate the config files for each participant
base_path = os.path.abspath(os.path.join(os.path.realpath(__file__),
".."))
test_file = os.path.join(base_path, args.test_file)
# locate config file
# TODO: Separate the config files for each participant
base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..","..","endeavour","examples","test-mh","config"))
config_file = os.path.join(base_path, "sdx_global.cfg")
#logger = util.log.getLogger("P_" + str(args.id))
print ("Starting controller with config file: "+str(config_file))
# start controller
xrsctrlr = XRS_Client(args.id, config_file)
xrsctrlr_thread = Thread(target=xrsctrlr.xstart(test_file))
xrsctrlr_thread.daemon = True
xrsctrlr_thread.start()
atexit.register(xrsctrlr.stop)
signal(SIGTERM, lambda signum, stack_frame: exit(1))
while xrsctrlr_thread.is_alive():
try:
xrsctrlr_thread.join(1)
except KeyboardInterrupt:
xrsctrlr.stop()
print ("Xrsctrlr exiting")
if __name__ == '__main__':
main() | apache-2.0 | Python |
f07331b8bfe3c43e694501b44ffadfae2dc262c6 | fix null hypothesis bug in hypergeom | kellieotto/permute,statlab/permute,jarrodmillman/permute,kellieotto/permute | permute/hypergeom.py | permute/hypergeom.py | """
Hypergeometric Test
"""
import numpy as np
from .utils import get_prng
def hypergeometric(x, N, n, G, reps=10**5, alternative='greater', keep_dist=False, seed=None):
"""
Parameters
----------
x : int
number of `good` elements observed in the sample
N : int
population size
n : int
sample size
G : int
hypothesized number of good elements in population
reps : int
number of repetitions (default: 10**5)
alternative : {'greater', 'less', 'two-sided'}
alternative hypothesis to test (default: 'greater')
keep_dist : boolean
flag for whether to store and return the array of values of the test statistics (default: false)
seed : RandomState instance or {None, int, RandomState instance}
If None, the pseudorandom number generator is the RandomState
instance used by `np.random`;
If int, seed is the seed used by the random number generator;
If RandomState instance, seed is the pseudorandom number generator
Returns
-------
float
estimated p-value
float
test statistic
list
distribution of test statistics (only if keep_dist == True)
"""
prng = get_prng(seed)
def generate():
return prng.hypergeometric(G, N-G, n)
if keep_dist:
permutations = np.empty(reps)
for i in range(reps):
permutations[i] = generate()
if alternative == 'two-sided':
hits_up = np.sum(permutations >= x)
hits_low = np.sum(permutations <= x)
p_value = 2*np.min(hits_up/reps, hits_low/reps, 0.5)
elif alternative == 'greater':
p_value = np.mean(permutations >= x)
else:
p_value = np.mean(permutations <= x)
return p_value, x, permutations
else:
hits_up = 0
hits_low = 0
for i in range(reps):
ts = generate()
hits_up += (ts >= x)
hits_low += (ts <= x)
if alternative == 'two-sided':
p_value = 2*np.min([hits_up/reps, hits_low/reps, 0.5])
elif alternative == 'greater':
p_value = hits_up/reps
else:
p_value = hits_low/reps
return p_value, x
| """
Hypergeometric Test
"""
import scipy
import numpy as np
from scipy.special import comb
from .utils import get_prng
def hypergeom(population, n, g, reps=10**5, alternative='greater', keep_dist=False, seed=None):
"""
Parameters
----------
population : array-like
list of elements consisting of x in {0, 1} where 0 represents a failure and 1 represents a success
n : int
sample size
g : int
hypothesized number of good elements in sample
reps : int
number of repetitions (default: 10**5)
alternative : {'greater', 'less', 'two-sided'}
alternative hypothesis to test (default: 'greater')
keep_dist : boolean
flag for whether to store and return the array of values of the test statistics (default: false)
seed : RandomState instance or {None, int, RandomState instance}
If None, the pseudorandom number generator is the RandomState
instance used by `np.random`;
If int, seed is the seed used by the random number generator;
If RandomState instance, seed is the pseudorandom number generator
Returns
-------
float
estimated p-value
float
test statistic
list
distribution of test statistics (only if keep_dist == True)
"""
original_ts = sum(population) / len(population)
prng = get_prng(seed)
pop_G = sum(population)
pop_B = len(population) - pop_G
permutations = []
def generate():
return prng.hypergeometric(pop_G, pop_B, n)
while reps >= 0:
ts = generate()
permutations.append(ts)
reps -= 1
simulations = list(permutations)
permutations2 = list(permutations)
alternative_func = {
'greater': lambda thing: thing > g,
'less': lambda thing: thing < g
}
if alternative == "two-sided":
count = 0
while len(permutations) > 0:
val = permutations.pop()
if alternative_func['greater'](val):
count += 1
p_valueG = count /len(simulations)
counter = 0
while len(permutations2) > 0:
val = permutations2.pop()
if alternative_func['less'](val):
counter += 1
p_valueL = counter / len(simulations)
p_value = 2 * min(p_valueG, p_valueL)
else:
count = 0
while len(permutations) > 0:
val = permutations.pop()
if alternative_func[alternative](val):
count += 1
p_value = count / len(simulations)
if keep_dist == True:
return p_value, original_ts, simulations
return p_value, original_ts
| bsd-2-clause | Python |
ea2a56d7e4c45a3c760e3b9e5bb1e7e9e760f853 | Bump version to 0.0.6 | portfoliome/pgawedge | pgawedge/_version.py | pgawedge/_version.py | version_info = (0, 0, 6)
__version__ = '.'.join(map(str, version_info))
| version_info = (0, 0, 5)
__version__ = '.'.join(map(str, version_info))
| mit | Python |
850222c22e46806157d3081c70d2852a866b3351 | change links() to global | lewangbtcc/anti-XSS,lewangbtcc/anti-XSS | getLink.py | getLink.py | #encoding: utf8
# links ia a global set that stroe all of the links in the particular domain.
links = set()
# Return a set of url links in src page
def getLink(src):
source = src.lower()
# links = set()
head = 0
length = len(source)
flag = True
while ((flag) and (head < length)):
flag = False
pos1 = source[head:].find('href="') + head
pos2 = source[pos1 + 7:].find('"') + pos1 + 7
if (pos1 >= head)and(pos2 >= head):
# print pos1, pos2 ,head
flag = True
link = source[pos1 + 6:pos2]
link = link.replace('\t','')
link = link.replace('\n','')
link = link.replace(' ','')
links.add(link)
head = pos2 + 1
# print links[0]
# print links[1]
# for i in links:
# print i
return links
if __name__ == '__main__':
f = open('page.txt','rb')
src = f.read()
getLink(src)
f.close()
| #encoding: utf8
# Return a set of url links in src page
def getLink(src):
source = src.lower()
links = set()
head = 0
length = len(source)
flag = True
while ((flag) and (head < length)):
flag = False
pos1 = source[head:].find('href="') + head
pos2 = source[pos1 + 7:].find('"') + pos1 + 7
if (pos1 >= head)and(pos2 >= head):
# print pos1, pos2 ,head
flag = True
link = source[pos1 + 6:pos2]
link = link.replace('\t','')
link = link.replace('\n','')
link = link.replace(' ','')
links.add(link)
head = pos2 + 1
# print links[0]
# print links[1]
# for i in links:
# print i
return links
if __name__ == '__main__':
f = open('page.txt','rb')
src = f.read()
getLink(src)
f.close()
| mit | Python |
d88b7876570cc2bd66a19a508f22f8e7e0501243 | use the default value of getattr.. | pivotal-energy-solutions/django-company-registration,pivotal-energy-solutions/django-company-registration | company_registration/tasks.py | company_registration/tasks.py | # -*- coding: utf-8 -*-
"""tasks.py: Django company_registration"""
import logging
from celery.schedules import crontab
from celery.task.base import periodic_task
from company_registration.models import RegistrationProfile
import settings
__author__ = 'Steven Klass'
__date__ = '1/9/13 2:56 PM'
__copyright__ = 'Copyright 2012 Pivotal Energy Solutions. All rights reserved.'
__credits__ = ['Steven Klass', ]
log = logging.getLogger(__name__)
@periodic_task(run_every=crontab(hour="2", minute="20", day_of_week="*"))
def clear_expired_registrations(**kwargs):
"""
Crontab to clear expired registrations
:param kwargs: Not Unsed
"""
kwargs['log'] = clear_expired_registrations.get_logger()
kwargs['loglevel'] = logging.DEBUG if getattr(settings, 'DEBUG', False) else logging.ERROR
return RegistrationProfile.objects.delete_expired_users()
| # -*- coding: utf-8 -*-
"""tasks.py: Django company_registration"""
import logging
from celery.schedules import crontab
from celery.task.base import periodic_task
from company_registration.models import RegistrationProfile
import settings
__author__ = 'Steven Klass'
__date__ = '1/9/13 2:56 PM'
__copyright__ = 'Copyright 2012 Pivotal Energy Solutions. All rights reserved.'
__credits__ = ['Steven Klass', ]
log = logging.getLogger(__name__)
@periodic_task(run_every=crontab(hour="2", minute="20", day_of_week="*"))
def clear_expired_registrations(**kwargs):
"""
Crontab to clear expired registrations
:param kwargs: Not Unsed
"""
kwargs['log'] = clear_expired_registrations.get_logger()
kwargs['loglevel'] = logging.DEBUG if getattr(settings, 'DEBUG') else logging.ERROR
return RegistrationProfile.objects.delete_expired_users()
| apache-2.0 | Python |
9c8af5537e9371bd4843dad066a0186fe4bb7ea3 | Revert "ignoring ball_lost_time" | bit-bots/bitbots_behaviour | bitbots_body_behavior/src/bitbots_body_behavior/decisions/goal_seen.py | bitbots_body_behavior/src/bitbots_body_behavior/decisions/goal_seen.py | import rospy
from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement
class GoalSeen(AbstractDecisionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(GoalSeen, self).__init__(blackboard, dsd, parameters)
self.goal_lost_time = rospy.Duration(self.blackboard.config['goal_lost_time'])
def perform(self, reevaluate=False):
"""
Determines whether the goal was seen recently (as defined in config)
:param reevaluate:
:return:
"""
self.publish_debug_data("goal_seen_time", rospy.Time.now() - self.blackboard.world_model.goal_last_seen())
if rospy.Time.now() - self.blackboard.world_model.goal_last_seen() < self.goal_lost_time:
return 'YES'
return 'NO'
def get_reevaluate(self):
return True
| import rospy
from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement
class GoalSeen(AbstractDecisionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(GoalSeen, self).__init__(blackboard, dsd, parameters)
self.goal_lost_time = rospy.Duration(self.blackboard.config['goal_lost_time'])
def perform(self, reevaluate=False):
"""
Determines whether the goal was seen recently (as defined in config)
:param reevaluate:
:return:
"""
self.publish_debug_data("goal_seen_time", rospy.Time.now() - self.blackboard.world_model.goal_last_seen())
if self.blackboard.world_model.goal_last_seen() != rospy.Time(0):
return 'YES'
return 'NO'
def get_reevaluate(self):
return True
| bsd-3-clause | Python |
8759a164c82026e4dcb386fd9803da28e2c1c242 | Update Chapter07/PracticeQuestions/Question2.py added docstrings | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/CrackingCodesWithPython/Chapter07/PracticeQuestions/Question2.py | books/CrackingCodesWithPython/Chapter07/PracticeQuestions/Question2.py | """Chapter 7 Practice Question 2
Is each spam a global or local variable?
"""
spam = 42 # global/local
def foo() -> None:
"""Prints spam.
Prints the contents of the spam variable.
Returns:
Prints spam variable.
"""
global spam
spam = 99 # global/local
print(spam)
def main():
foo() # mind == blown
# If Question2.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main()
| # Is each spam a global or local variable?
spam = 42 # global/local
def foo():
global spam
spam = 99 # global/local
print(spam)
def main():
foo() # mind == blown
# If Question2.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main()
| mit | Python |
2a024236ff6f50689d99e9cf327a7145ae08e0f6 | add portal db config | Cepave/dashboard,Cepave/dashboard,Cepave/dashboard,Cepave/dashboard | rrd/config.py | rrd/config.py | #-*-coding:utf8-*-
import os
import json
#-- dashboard db config --
DASHBOARD_DB_HOST = "127.0.0.1"
DASHBOARD_DB_PORT = 3306
DASHBOARD_DB_USER = "root"
DASHBOARD_DB_PASSWD = ""
DASHBOARD_DB_NAME = "dashboard"
#-- graph db config --
GRAPH_DB_HOST = "127.0.0.1"
GRAPH_DB_PORT = 3306
GRAPH_DB_USER = "root"
GRAPH_DB_PASSWD = ""
GRAPH_DB_NAME = "graph"
#-- portal db config --
PORTAL_DB_HOST = "127.0.0.1"
PORTAL_DB_PORT = 3306
PORTAL_DB_USER = "root"
PORTAL_DB_PASSWD = ""
PORTAL_DB_NAME = "falcon_portal"
#-- app config --
DEBUG = True
SECRET_KEY = "secret-key"
SESSION_COOKIE_NAME = "open-falcon"
PERMANENT_SESSION_LIFETIME = 3600 * 24 * 30
SITE_COOKIE = "open-falcon-ck"
#-- query config --
QUERY_ADDR = "http://127.0.0.1:9966"
BASE_DIR = "/home/work/open-falcon/dashboard/"
LOG_PATH = os.path.join(BASE_DIR,"log/")
JSONCFG = {}
JSONCFG['database'] = {}
JSONCFG['database']['host'] = '127.0.0.1'
JSONCFG['database']['port'] = '3306'
JSONCFG['database']['account'] = 'root'
JSONCFG['database']['password'] = 'password'
JSONCFG['database']['db'] = 'uic'
JSONCFG['database']['table'] = 'session'
JSONCFG['shortcut'] = {}
JSONCFG['shortcut']['falconPortal'] = "http://127.0.0.1:5050"
JSONCFG['shortcut']['falconDashboard'] = "http://127.0.0.1:8081"
JSONCFG['shortcut']['grafanaDashboard'] = "http://127.0.0.1:3000"
JSONCFG['shortcut']['falconAlarm'] = "http://127.0.0.1:9912"
JSONCFG['shortcut']['falconUIC'] = "http://127.0.0.1:1234"
JSONCFG['redirectUrl'] = 'UrlOfRedirectedLoginPage'
try:
from rrd.local_config import *
except:
pass
| #-*-coding:utf8-*-
import os
import json
#-- dashboard db config --
DASHBOARD_DB_HOST = "127.0.0.1"
DASHBOARD_DB_PORT = 3306
DASHBOARD_DB_USER = "root"
DASHBOARD_DB_PASSWD = ""
DASHBOARD_DB_NAME = "dashboard"
#-- graph db config --
GRAPH_DB_HOST = "127.0.0.1"
GRAPH_DB_PORT = 3306
GRAPH_DB_USER = "root"
GRAPH_DB_PASSWD = ""
GRAPH_DB_NAME = "graph"
#-- app config --
DEBUG = True
SECRET_KEY = "secret-key"
SESSION_COOKIE_NAME = "open-falcon"
PERMANENT_SESSION_LIFETIME = 3600 * 24 * 30
SITE_COOKIE = "open-falcon-ck"
#-- query config --
QUERY_ADDR = "http://127.0.0.1:9966"
BASE_DIR = "/home/work/open-falcon/dashboard/"
LOG_PATH = os.path.join(BASE_DIR,"log/")
JSONCFG = {}
JSONCFG['database'] = {}
JSONCFG['database']['host'] = '127.0.0.1'
JSONCFG['database']['port'] = '3306'
JSONCFG['database']['account'] = 'root'
JSONCFG['database']['password'] = 'password'
JSONCFG['database']['db'] = 'uic'
JSONCFG['database']['table'] = 'session'
JSONCFG['shortcut'] = {}
JSONCFG['shortcut']['falconPortal'] = "http://127.0.0.1:5050"
JSONCFG['shortcut']['falconDashboard'] = "http://127.0.0.1:8081"
JSONCFG['shortcut']['grafanaDashboard'] = "http://127.0.0.1:3000"
JSONCFG['shortcut']['falconAlarm'] = "http://127.0.0.1:9912"
JSONCFG['shortcut']['falconUIC'] = "http://127.0.0.1:1234"
JSONCFG['redirectUrl'] = 'UrlOfRedirectedLoginPage'
try:
from rrd.local_config import *
except:
pass
| apache-2.0 | Python |
9b5975f92db327fe9fb5698ed9f512a03f0932c3 | Add instructions for running image analysis | tokee/juxta,tokee/juxta,tokee/juxta | gridify.py | gridify.py | # Prerequisites
#git clone git@github.com:ml4a/ml4a-ofx.git
#cd mla-ofx
#pip3 install keras
#pip3 install numpy
#pip3 install prime
# https://github.com/Quasimondo/RasterFairy
#pip3 install rasterfairy
# Run instructions
# put 300+ images in the folder 'images'
#python3 .scripts/tSNE-images.py --images_path images --output_path points.json
#Run this script
#Run juxta - remember to specify RAW_IMAGE_COLS=15
# RAW_IMAGE_COLS=16 ~/projects/juxta/juxta.sh img.dat t2
import rasterfairy
import json
import numpy as np
import math
# Expects output from tSNE-images.py and runs rasterfairy on the input,
# writing the image paths layed out to a grid as the output
with open('/home/te/projects/ml4a-ofx/points.json') as json_file:
data = json.load(json_file)
arr = []
for tup in data:
point = tup['point']
arr.append([point[0], point[1]])
# arr.append([math.floor(point[0]*100000000), math.floor(point[1]*100000000)])
#full_data = np.asarray( data )
# {'path': '/home/te/projects/ml4a-ofx/images/20190902-0853_4.jpg', 'point': [0.1758463829755783, 0.3165808618068695]}
#for entry in full_data:
# print (entry)
#print(full_data)
#arr = np.asarray( [[1000, 2000], [2, 30], [8, 100], [300, 2]])
#print(arr)
tsne = np.asarray(arr)
#print(tsne)
nx = 15
ny = 21
# 16x20 hangs forever!?
#nx = 16
#ny = 20
#grid = rasterfairy.transformPointCloud2D(tsne)
#print("Calling rasterfairy on " + str(len(arr)) + " coordinates")
gridAssignment = rasterfairy.transformPointCloud2D(tsne, target=(nx, ny))
grid, gridShape = gridAssignment
out_grid = []
for i, dat in enumerate(data):
gridX, gridY = grid[i]
out_grid.append({'gx': int(gridX), 'gy': int(gridY), 'path': dat['path']})
# print(dat['path'] + " gx:" + str(int(gridX)) + ", gy:" + str(int(gridY)))
#out_grid = out_grid.sort(key = lambda obj: obj['gx'], obj['gy'])
# We sort by secondary first - Python sort is stable; it does not change order on equal keys
out_grid.sort(key = lambda obj: obj['gy'])
out_grid.sort(key = lambda obj: obj['gx'])
for element in out_grid:
print(element['path'])
#print(out_grid)
| import rasterfairy
import json
import numpy as np
import math
# Expects output from tSNE-images.py and runs rasterfairy on the input,
# writing the image paths layed out to a grid as the output
with open('/home/te/projects/ml4a-ofx/points.json') as json_file:
data = json.load(json_file)
arr = []
for tup in data:
point = tup['point']
arr.append([point[0], point[1]])
# arr.append([math.floor(point[0]*100000000), math.floor(point[1]*100000000)])
#full_data = np.asarray( data )
# {'path': '/home/te/projects/ml4a-ofx/images/20190902-0853_4.jpg', 'point': [0.1758463829755783, 0.3165808618068695]}
#for entry in full_data:
# print (entry)
#print(full_data)
#arr = np.asarray( [[1000, 2000], [2, 30], [8, 100], [300, 2]])
#print(arr)
tsne = np.asarray(arr)
#print(tsne)
nx = 15
ny = 21
# 16x20 hangs forever!?
#nx = 16
#ny = 20
#grid = rasterfairy.transformPointCloud2D(tsne)
#print("Calling rasterfairy on " + str(len(arr)) + " coordinates")
gridAssignment = rasterfairy.transformPointCloud2D(tsne, target=(nx, ny))
grid, gridShape = gridAssignment
out_grid = []
for i, dat in enumerate(data):
gridX, gridY = grid[i]
out_grid.append({'gx': int(gridX), 'gy': int(gridY), 'path': dat['path']})
# print(dat['path'] + " gx:" + str(int(gridX)) + ", gy:" + str(int(gridY)))
#out_grid = out_grid.sort(key = lambda obj: obj['gx'], obj['gy'])
# We sort by secondary first - Python sort is stable; it does not change order on equal keys
out_grid.sort(key = lambda obj: obj['gy'])
out_grid.sort(key = lambda obj: obj['gx'])
for element in out_grid:
print(element['path'])
#print(out_grid)
| apache-2.0 | Python |
67bdac10aa9f38643339d73b437e43d5ae34be75 | Add StepFormSet inlineformset to forms.py | livingsilver94/getaride,livingsilver94/getaride,livingsilver94/getaride | planner/forms.py | planner/forms.py | from django.contrib.auth.forms import AuthenticationForm
from django import forms
from django.core.validators import MinLengthValidator
from .models import PoolingUser, Trip, Step
from users.forms import UserCreationForm
class LoginForm(AuthenticationForm):
username = forms.CharField(widget=forms.EmailInput(attrs={'placeholder': 'Email',
'class': 'form-control',
}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder': 'Password',
'class': 'form-control',
}))
class SearchTrip(forms.Form):
"""
Pay attention that id fields are meant to be hidden, since we suppose they come from
an autocomplete AJAX request via an another CharField.
"""
origin_id = forms.IntegerField()
destination_id = forms.IntegerField()
datetime = forms.DateTimeField()
class PoolingUserForm(forms.ModelForm):
class Meta:
model = PoolingUser
# Exclude the one-to-one relation with User
fields = ['birth_date', 'driving_license', 'cellphone_number']
class TripForm(forms.ModelForm):
class Meta:
model = Trip
fields = ['date_origin', 'max_num_passengers']
class StepForm(forms.ModelForm):
class Meta:
model = Step
fields = ['origin', 'destination', 'hour_origin', 'hour_destination', 'max_price']
StepFormSet = inlineformset_factory(parent_model=Trip, model=Step, form=StepForm, can_delete=False)
class UserForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
fields = ('email', 'first_name', 'last_name')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name in self.Meta.fields:
self[field_name].field.required = True
self['password1'].field.validators = [MinLengthValidator(6)]
| from django.contrib.auth.forms import AuthenticationForm
from django import forms
from django.core.validators import MinLengthValidator
from .models import PoolingUser, Trip, Step
from users.forms import UserCreationForm
class LoginForm(AuthenticationForm):
username = forms.CharField(widget=forms.EmailInput(attrs={'placeholder': 'Email',
'class': 'form-control',
}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder': 'Password',
'class': 'form-control',
}))
class SearchTrip(forms.Form):
"""
Pay attention that id fields are meant to be hidden, since we suppose they come from
an autocomplete AJAX request via an another CharField.
"""
origin_id = forms.IntegerField()
destination_id = forms.IntegerField()
datetime = forms.DateTimeField()
class PoolingUserForm(forms.ModelForm):
class Meta:
model = PoolingUser
# Exclude the one-to-one relation with User
fields = ['birth_date', 'driving_license', 'cellphone_number']
class TripForm(forms.ModelForm):
class Meta:
model = Trip
fields = ['date_origin', 'max_num_passengers']
class StepForm(forms.ModelForm):
class Meta:
model = Step
fields = ['origin', 'destination', 'hour_origin', 'hour_destination', 'max_price']
class UserForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
fields = ('email', 'first_name', 'last_name')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name in self.Meta.fields:
self[field_name].field.required = True
self['password1'].field.validators = [MinLengthValidator(6)]
| mit | Python |
375e214d6cea559e94a6f0de2ce2fc05c3dea424 | add country to the feed | qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/change_feed/management/commands/run_form_websocket_feed.py | corehq/apps/change_feed/management/commands/run_form_websocket_feed.py | from optparse import make_option
from django.core.management import BaseCommand
from django_countries.data import COUNTRIES
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed
from ws4redis.publisher import RedisPublisher
from ws4redis.redis_store import RedisMessage
import json
import time
from corehq.apps.domain.models import Domain
from corehq.util.quickcache import quickcache
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--from',
action='store',
dest='from',
default=None,
help="Start at this point in the changes feed (defaults to the end)"),
make_option('--sleep',
action='store',
dest='sleep',
default=None,
help="Sleep this long between emissions (useful for demos)"),
make_option('--compact',
action='store_true',
dest='compact',
default=False,
help="Use 'compact' mode - don't include additional domain metadata (faster)"),
)
def handle(self, *args, **options):
since = options['from']
sleep = float(options['sleep'] or '.01')
last_domain = None
change_feed = KafkaChangeFeed(topic=topics.FORM, group_id='form-feed')
for change in change_feed.iter_changes(since=since, forever=True):
if not change.deleted:
# this is just helpful for demos to find domain transitions
if change.metadata.domain != last_domain:
last_domain = change.metadata.domain
print change.sequence_id, last_domain
metadata = change.metadata.to_json()
if not options['compact']:
metadata['country'] = _get_country(change.metadata.domain)
message = RedisMessage(json.dumps(metadata))
RedisPublisher(facility='form-feed', broadcast=True).publish_message(message)
time.sleep(sleep)
@quickcache(vary_on=['domain'], timeout=600)
def _get_country(domain):
project = Domain.get_by_name(domain)
if project and project.deployment.countries:
return unicode(COUNTRIES.get(project.deployment.countries[0], ''))
| from optparse import make_option
from django.core.management import BaseCommand
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed
from ws4redis.publisher import RedisPublisher
from ws4redis.redis_store import RedisMessage
import json
import time
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--from',
action='store',
dest='from',
default=None,
help="Start at this point in the changes feed (defaults to the end)"),
make_option('--sleep',
action='store',
dest='sleep',
default=None,
help="Start at this point in the changes feed (defaults to the end)"),
)
def handle(self, *args, **options):
since = options['from']
sleep = float(options['sleep'] or '.01')
last_domain = None
change_feed = KafkaChangeFeed(topic=topics.FORM, group_id='form-feed')
for change in change_feed.iter_changes(since=since, forever=True):
if not change.deleted:
# this is just helpful for demos to find domain transitions
if change.metadata.domain != last_domain:
last_domain = change.metadata.domain
print change.sequence_id, last_domain
message = RedisMessage(json.dumps(change.metadata.to_json()))
RedisPublisher(facility='form-feed', broadcast=True).publish_message(message)
time.sleep(sleep)
| bsd-3-clause | Python |
12b8a94266411fc85f9566e25a4da2a9ca59a83d | Add setup sh scripts to handler.py | andreMonkey/camera_processing,andreMonkey/camera_processing,andreMonkey/camera_processing,andreMonkey/camera_processing | handler.py | handler.py | #!/usr/bin/env python
print("""
This example shows how you can monitor an analog input by attaching a function to its changed event.
You should see the analog value being printed out as it changes.
Try connecting up a rotary potentiometer or analog sensor to input one.
""")
from subprocess import call
import time
# discover bluetooth devices - sh ./discover.sh
call(["sh", "./discover.sh"])
# register oximeter sh ./registerDevice.sh 98:7B:F3:73:80:84 Oximeter
call(["sh", "sh ./registerDevice.sh 98:7B:F3:73:80:84 Oximeter"])
# execute command for websocket subscription - sh ./subscribe.sh ble987BF3738084 PULSE
call(["sh", "./subscribe.sh ble987BF3738084 PULSE"])
#call(["ls", "-l"])
# TODO Python module anschauen, so dass man nicht aus der bash neue scripts ausführen muss
call(["python", "camera_test.py"])
time.sleep(2)
call(["bash", "./processing/pixelsorting"])
# Work the camera with a button!
#time.sleep(0.001) # do not use all the cpu power
#pressed = 0
#pressed = read_digital_pin()
#if pressed:
#when_pressed = time.time()
#time_pressed = time.time() - when_pressed
#while pressed:
#if time_pressed > 4:
#call("sudo halt -n") # turn of PI , when pressed for more than 4 seconds
##call("sudo shutdown -h now") # turn of PI , when pressed for more than 4 seconds
| #!/usr/bin/env python
print("""
This example shows how you can monitor an analog input by attaching a function to its changed event.
You should see the analog value being printed out as it changes.
Try connecting up a rotary potentiometer or analog sensor to input one.
""")
from subprocess import call
import time
sh ./subscribe.sh ble987BF3738084 PULSE
#call(["ls", "-l"])
# TODO Python module anschauen, so dass man nicht aus der bash neue scripts ausführen muss
call(["python", "camera_test.py"])
time.sleep(2)
call(["bash", "./processing/pixelsorting"])
# Work the camera with a button!
#time.sleep(0.001) # do not use all the cpu power
#pressed = 0
#pressed = read_digital_pin()
#if pressed:
#when_pressed = time.time()
#time_pressed = time.time() - when_pressed
#while pressed:
#if time_pressed > 4:
#call("sudo halt -n") # turn of PI , when pressed for more than 4 seconds
##call("sudo shutdown -h now") # turn of PI , when pressed for more than 4 seconds
| bsd-2-clause | Python |
9909064d4dfcf9c1ae72b906318cda7b736ad9d2 | correct log file location | PhillyDSA/phillydsa-com,PhillyDSA/phillydsa-com,PhillyDSA/phillydsa-com,PhillyDSA/phillydsa-com | config/settings/production.py | config/settings/production.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import configparser
from django.core.exceptions import ImproperlyConfigured
from .base import * # noqa
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1']
config = configparser.ConfigParser()
try:
config.read(os.path.join(BASE_DIR, 'conf.ini')) # noqa
except Exception:
raise ImproperlyConfigured('BASE_DIR/confi.ini not found')
try:
SECRET_KEY = config['django_keys']['secret_key']
except KeyError:
raise ImproperlyConfigured(
"Keys not found. Ensure you have ['django_keys']['secret_key'] properly set.")
INSTALLED_APPS += ['anymail'] # noqa
ANYMAIL = {
"MAILGUN_API_KEY": config['mailgun']['api_key'],
"MAILGUN_SENDER_DOMAIN": 'mg.phillydsa.com',
}
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
DEFAULT_FROM_EMAIL = "do-not-reply@{0}.com".format(HOST_NAME) # noqa
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'phillydsa-django.log',
'maxBytes': 1024 * 1024,
'backupCount': 5,
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
},
}
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import configparser
from django.core.exceptions import ImproperlyConfigured
from .base import * # noqa
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1']
config = configparser.ConfigParser()
try:
config.read(os.path.join(BASE_DIR, 'conf.ini')) # noqa
except Exception:
raise ImproperlyConfigured('BASE_DIR/confi.ini not found')
try:
SECRET_KEY = config['django_keys']['secret_key']
except KeyError:
raise ImproperlyConfigured(
"Keys not found. Ensure you have ['django_keys']['secret_key'] properly set.")
INSTALLED_APPS += ['anymail'] # noqa
ANYMAIL = {
"MAILGUN_API_KEY": config['mailgun']['api_key'],
"MAILGUN_SENDER_DOMAIN": 'mg.phillydsa.com',
}
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
DEFAULT_FROM_EMAIL = "do-not-reply@{0}.com".format(HOST_NAME) # noqa
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': '/var/log/phillydsa-django.log',
'maxBytes': 1024 * 1024,
'backupCount': 5,
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
},
}
| agpl-3.0 | Python |
5722942e8c2898967d1ebc97e31c0cda5d39df3d | Improve security settings | FlowFX/reggae-cdmx,FlowFX/reggae-cdmx | config/settings/production.py | config/settings/production.py | from .common import *
ENVIRONMENT = 'production'
DEBUG = False
# Core settings
ALLOWED_HOSTS = ['.reggae-cdmx.com', ]
INTERNAL_IPS: list = []
# Security
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
# CSRF_USE_SESSIONS = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_SSL_REDIRECT = True # Force HTTPS
# Static files
STATIC_URL = '/static/'
STATIC_ROOT = os.path.abspath('/var/www/static/reggae-cdmx.com/')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.abspath('/var/www/media/reggae-cdmx.com/')
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': get_secret('DB_NAME'),
'USER': get_secret('DB_USER'),
'PASSWORD': get_secret('DB_PASSWORD'),
'HOST': get_secret('DB_HOST'),
'PORT': get_secret('DB_PORT'),
},
}
# Rollbar Error Tracking https://rollbar.com/flowfx/Reggae-CDMX/
MIDDLEWARE += ['rollbar.contrib.django.middleware.RollbarNotifierMiddleware']
ROLLBAR = {
'access_token': get_secret('ROLLBAR_ACCESS_TOKEN'),
'environment': ENVIRONMENT,
'branch': 'master',
'root': BASE_DIR,
}
| from .common import *
ENVIRONMENT = 'production'
DEBUG = False
# Core settings
ALLOWED_HOSTS = ['.reggae-cdmx.com', ]
INTERNAL_IPS: list = []
# Security
# CSRF_COOKIE_SECURE = True
# CSRF_USE_SESSIONS = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_SSL_REDIRECT = True # Force HTTPS
# Static files
STATIC_URL = '/static/'
STATIC_ROOT = os.path.abspath('/var/www/static/reggae-cdmx.com/')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.abspath('/var/www/media/reggae-cdmx.com/')
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': get_secret('DB_NAME'),
'USER': get_secret('DB_USER'),
'PASSWORD': get_secret('DB_PASSWORD'),
'HOST': get_secret('DB_HOST'),
'PORT': get_secret('DB_PORT'),
},
}
# Rollbar Error Tracking https://rollbar.com/flowfx/Reggae-CDMX/
MIDDLEWARE += ['rollbar.contrib.django.middleware.RollbarNotifierMiddleware']
ROLLBAR = {
'access_token': get_secret('ROLLBAR_ACCESS_TOKEN'),
'environment': ENVIRONMENT,
'branch': 'master',
'root': BASE_DIR,
}
| mit | Python |
5d46b0c7516451241037d6b46c66aa153b962b83 | update outip match | wangdiwen/ip2geo | ip2geo.py | ip2geo.py | #!/usr/bin/env python
# encoding=utf-8
import sys
import argparse
import requests
import urllib
import re
# parse the console params
parser = argparse.ArgumentParser()
parser.add_argument('domain', help='a domain like baidu.com | a ipv4 addr', type=str)
parser.add_argument('-v', help='show request detail info', action='store_true')
parser.add_argument('-s', help='show your public network GEO info', action='store_true')
args = parser.parse_args()
print('Query IP/Host is : %s\n' % args.domain)
# build remote url
ip = args.domain # query ip addr
host = 'ip.chinaz.com'
url = 'http://ip.chinaz.com/?IP=skinsharp.cn'
payload = {
'ip': ip,
}
headers = {
'Host': host,
'User-Agent': 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': "en-US,en;q=0.5",
'Accept-Encoding': "gzip, deflate",
'Referer': url,
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
}
# http request via requests python lib
try:
r = requests.post(url, headers=headers, data=payload, timeout=3)
except Exception as e:
print('HTTP request error!')
sys.exit(1)
if args.v:
print('Request URL: %s' % url)
print('Request header:')
print(headers)
print('Request payload: %s' % urllib.urlencode(payload))
print('Response status: %d' % r.status_code)
print('Response header:')
print(r.headers) # dict type
print(r.text) # ascii text type
print('')
# filter result from the html content
re_code = re.compile(r'<span class=\"Whwtdhalf w(15|50)-0\">(.*?)</span>')
fields = re.findall(re_code, r.text)
# stdout the data
i = 0
cache = []
for zone in fields:
cache.append(zone[1].strip())
i = i+1
if i == 4:
print('%s \t%-15s \t%s \t%s' % (cache[0], cache[1], cache[2], cache[3]))
i = 0
del cache[:]
del cache
del i
# filter the local ip of public network
if args.s:
re_code = re.compile(r'</span>(.*)?\t<span class=\"pl10\">(.*)?</span>(.*)?<a href=\"')
fields = re.findall(re_code, r.text)
# print fields
if fields:
print("\n出口IP:%s,物理位置:%s" % (fields[0][0].encode('utf-8'), fields[0][2].encode('utf-8')))
del fields
| #!/usr/bin/env python
# encoding=utf-8
import sys
import argparse
import requests
import urllib
import re
# parse the console params
parser = argparse.ArgumentParser()
parser.add_argument('domain', help='a domain like baidu.com | a ipv4 addr', type=str)
parser.add_argument('-v', help='show request detail info', action='store_true')
parser.add_argument('-s', help='show your public network GEO info', action='store_true')
args = parser.parse_args()
print('Query IP/Host is : %s\n' % args.domain)
# build remote url
ip = args.domain # query ip addr
host = 'ip.chinaz.com'
url = 'http://ip.chinaz.com/?IP=skinsharp.cn'
payload = {
'ip': ip,
}
headers = {
'Host': host,
'User-Agent': 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': "en-US,en;q=0.5",
'Accept-Encoding': "gzip, deflate",
'Referer': url,
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
}
# http request via requests python lib
try:
r = requests.post(url, headers=headers, data=payload, timeout=3)
except Exception as e:
print('HTTP request error!')
sys.exit(1)
if args.v:
print('Request URL: %s' % url)
print('Request header:')
print(headers)
print('Request payload: %s' % urllib.parse.urlencode(payload))
print('Response status: %d' % r.status_code)
print('Response header:')
print(r.headers) # dict type
# print(r.text) # ascii text type
print()
# filter result from the html content
re_code = re.compile(r'<span class=\"Whwtdhalf w(15|50)-0\">(.*?)</span>')
fields = re.findall(re_code, r.text)
# stdout the data
i = 0
cache = []
for zone in fields:
cache.append(zone[1].strip())
i = i+1
if i == 4:
print('%s \t%-15s \t%s \t%s' % (cache[0], cache[1], cache[2], cache[3]))
i = 0
del cache[:]
del cache
del i
# filter the local ip of public network
if args.s:
re_code = re.compile(r'您来自:</span>(.*)?<span class=\"pl10\">所在区域:</span>(.*)?<a href')
fields = re.findall(re_code, r.text)
for i in fields:
print('\n你的公网IP: %s, IP物理地址:%s' % (i[0].strip(), i[1]))
del fields
| mit | Python |
e818de96908e4ec6801d42e54c29036b3cc373b3 | Update denormalization command | barberscore/barberscore-api,dbinetti/barberscore-django,dbinetti/barberscore-django,barberscore/barberscore-api,dbinetti/barberscore,dbinetti/barberscore,barberscore/barberscore-api,barberscore/barberscore-api | project/apps/api/management/commands/denormalize.py | project/apps/api/management/commands/denormalize.py | from django.core.management.base import (
BaseCommand,
)
from apps.api.models import (
Convention,
Contest,
Award,
Contestant,
Entrant,
Session,
Performance,
Song,
Singer,
Director,
Panelist,
)
class Command(BaseCommand):
help = "Command to denormailze data."
def handle(self, *args, **options):
vs = Convention.objects.all()
for v in vs:
v.save()
ts = Contest.objects.all()
for t in ts:
t.save()
ps = Panelist.objects.all()
for p in ps:
p.save()
ws = Award.objects.all()
for w in ws:
w.save()
es = Entrant.objects.all()
for e in es:
e.save()
cs = Contestant.objects.all()
for c in cs:
c.save()
ss = Session.objects.all()
for s in ss:
s.save()
as_ = Performance.objects.all()
for a in as_:
a.save()
ps = Song.objects.all()
for p in ps:
p.save()
ss = Singer.objects.all()
for s in ss:
s.save()
js = Panelist.objects.all()
for j in js:
j.save()
ds = Director.objects.all()
for d in ds:
d.save()
return "Done"
| from django.core.management.base import (
BaseCommand,
)
from apps.api.models import (
Convention,
Contest,
Contestant,
Performance,
Song,
Group,
Singer,
Director,
Panelist,
)
class Command(BaseCommand):
help = "Command to denormailze data."
def handle(self, *args, **options):
vs = Convention.objects.all()
for v in vs:
v.save()
ts = Contest.objects.all()
for t in ts:
t.save()
cs = Contestant.objects.all()
for c in cs:
c.save()
as_ = Performance.objects.all()
for a in as_:
a.save()
ps = Song.objects.all()
for p in ps:
p.save()
ss = Singer.objects.all()
for s in ss:
s.save()
js = Panelist.objects.all()
for j in js:
j.save()
ds = Director.objects.all()
for d in ds:
d.save()
return "Done"
| bsd-2-clause | Python |
1dcee2da40eec36f049d501048b33be1208ef90b | fix a bug with import in DBFileSystemGC | yliu120/dbsystem,yliu120/dbsystem,yliu120/dbsystem | HW3/dbsys-hw3/DBFileSystemGC.py | HW3/dbsys-hw3/DBFileSystemGC.py | import os;
class DBFileSystemGC:
defaultDBPath = './data';
# close all the files in that directory.
def list_files(path):
# returns a list of names (with extension, without full path) of all files
# in folder path
files = []
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)):
files.append(name)
return files
def gc(self, opMarker=None, db=None):
fileNames = self.list_files(defaultDBPath);
for file in fileNames:
fName = defaultDBPath + '/' + file;
f = open(fName, 'r');
f.close();
if db == None:
db = Database(dataDir=defaultDBPath);
if opMarker == None:
opMarker = "";
tmpRel = list( db.storage.fileMgr.relations() );
for relKey in tmpRel:
if relKey.startswith('tmp') and relKey.endswith(opMarker):
db.storage.fileMgr.removeRelation( relKey );
| from Database import Database
from Catalog.Schema import DBSchema
import os;
class DBFileSystemGC:
defaultDBPath = './data';
# close all the files in that directory.
def list_files(path):
# returns a list of names (with extension, without full path) of all files
# in folder path
files = []
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)):
files.append(name)
return files
def gc(self, opMarker=None, db=None):
fileNames = self.list_files(defaultDBPath);
for file in fileNames:
fName = defaultDBPath + '/' + file;
f = open(fName, 'r');
f.close();
if db == None:
db = Database(dataDir=defaultDBPath);
if opMarker == None:
opMarker = "";
tmpRel = list( db.storage.fileMgr.relations() );
for relKey in tmpRel:
if relKey.startswith('tmp') and relKey.endswith(opMarker):
db.storage.fileMgr.removeRelation( relKey );
| apache-2.0 | Python |
51ea9b839c1ef4b9150da3fb35d4a0be4799674b | Update denormalization command | dbinetti/barberscore,barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore,barberscore/barberscore-api,dbinetti/barberscore-django,barberscore/barberscore-api,dbinetti/barberscore-django | project/apps/api/management/commands/denormalize.py | project/apps/api/management/commands/denormalize.py | from django.core.management.base import (
BaseCommand,
)
from apps.api.models import (
Award,
Certification,
Chapter,
Chart,
Contest,
Contestant,
Convention,
Group,
Judge,
Member,
Organization,
Performance,
Performer,
Person,
Role,
Round,
Score,
Session,
Song,
Submission,
Venue,
)
class Command(BaseCommand):
help = "Command to denormalize names."
def handle(self, *args, **options):
# Primitives
[i.save() for i in Award.objects.all()]
[i.save() for i in Chapter.objects.all()]
[i.save() for i in Chart.objects.all()]
[i.save() for i in Convention.objects.all()]
[i.save() for i in Group.objects.all()]
[i.save() for i in Organization.objects.all()]
[i.save() for i in Person.objects.all()]
[i.save() for i in Venue.objects.all()]
# Branches
[i.save() for i in Session.objects.all()]
[i.save() for i in Certification.objects.all()]
[i.save() for i in Judge.objects.all()]
[i.save() for i in Member.objects.all()]
[i.save() for i in Role.objects.all()]
[i.save() for i in Round.objects.all()]
[i.save() for i in Contest.objects.all()]
[i.save() for i in Performer.objects.all()]
[i.save() for i in Contestant.objects.all()]
[i.save() for i in Performance.objects.all()]
[i.save() for i in Submission.objects.all()]
[i.save() for i in Song.objects.all()]
[i.save() for i in Score.objects.all()]
return "Denormalized"
| from django.core.management.base import (
BaseCommand,
)
from apps.api.models import (
Award,
Certification,
Chapter,
Chart,
Contest,
Contestant,
Convention,
Group,
Judge,
Member,
Organization,
Performance,
Performer,
Person,
Role,
Round,
Score,
Session,
Song,
Submission,
Venue,
)
class Command(BaseCommand):
help = "Command to denormalize names."
def handle(self, *args, **options):
# Primitives
[i.save() for i in Award.objects.all()]
[i.save() for i in Chapter.objects.all()]
[i.save() for i in Chart.objects.all()]
[i.save() for i in Convention.objects.all()]
[i.save() for i in Group.objects.all()]
[i.save() for i in Organization.objects.all()]
[i.save() for i in Person.objects.all()]
[i.save() for i in Venue.objects.all()]
# Branches
[i.save() for i in Session.objects.all()]
[i.save() for i in Certification.objects.all()]
[i.save() for i in Judge.objects.all()]
[i.save() for i in Member.objects.all()]
[i.save() for i in Role.objects.all()]
[i.save() for i in Round.objects.all()]
[i.save() for i in Contest.objects.all()]
[i.save() for i in Performer.objects.all()]
[i.save() for i in Contestant.objects.all()]
[i.save() for i in Performance.objects.all()]
[i.save() for i in Submission.objects.all()]
[i.save() for i in Song.objects.all()]
[i.save() for i in Score.objects.all()]
return "Done"
| bsd-2-clause | Python |
86c213ab74853aaad620f31d0fabd45f9e61a7df | Remove messaging - it's too noisy. | FreddieSanchez/RunnitAutoMod | runnit-bot.py | runnit-bot.py | """
This script is used to helpout AutoMod on some things it can't do.
"""
import datetime
import os,sys
import praw
import logging
import re
import sched, time
USERNAME=os.environ['USER']
PASSWORD=os.environ['PASSWORD']
AUTHOR=os.environ['AUTHOR']
CommentsRemoved = set();
def login():
r = praw.Reddit('RunnitAutoMod test by ' + AUTHOR);
r.login(USERNAME, PASSWORD, disable_warning=True)
return r
def message_me(reddit_session ):
title = 'logged in added'
body= 'I was able to login and send you a message!'
reddit_session.send_message(AUTHOR, title, body)
def comments_by_user(reddit_session, subreddit, user_name):
all_comments = reddit_session.get_comments(subreddit, limit="none")
user_comments = [comment for comment in all_comments if comment.author.name == user_name]
return user_comments;
def run(reddit_session):
#find all comments by AutoModerator
comments = comments_by_user(reddit_session, 'Running', 'AutoModerator')
logging.info(str(datetime.datetime.now()) + ':Found ' + str(len(comments)) + ' for AutoModerator');
#get all comments with negative score
negative_score_comments = [ c for c in comments if c.score < 0 ]
logging.info(str(datetime.datetime.now()) + ':Found ' + str(len(negative_score_comments)) + ' negative for AutoModerator');
#remove the comments
for comment in negative_score_comments:
logging.debug(comment);
if comment not in CommentsRemoved:
comment.remove();
CommentsRemoved.add(comment);
if __name__ == '__main__':
#login
r = login();
#Search every 5 minutes.
while True:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.info(str(datetime.datetime.now()) + ":Starting")
#run the bot! check for negative automod comments in /r/running
run(r);
logging.info(str(datetime.datetime.now()) + ":Ending")
#done for now, check again in 5 minutes.
time.sleep(300);
| """
This script is used to helpout AutoMod on some things it can't do.
"""
import datetime
import os,sys
import praw
import logging
import re
import sched, time
USERNAME=os.environ['USER']
PASSWORD=os.environ['PASSWORD']
AUTHOR=os.environ['AUTHOR']
CommentsRemoved = set();
def login():
r = praw.Reddit('RunnitAutoMod test by ' + AUTHOR);
r.login(USERNAME, PASSWORD, disable_warning=True)
return r
def message_me(reddit_session ):
title = 'logged in added'
body= 'I was able to login and send you a message!'
reddit_session.send_message(AUTHOR, title, body)
def comments_by_user(reddit_session, subreddit, user_name):
all_comments = reddit_session.get_comments(subreddit, limit="none")
user_comments = [comment for comment in all_comments if comment.author.name == user_name]
return user_comments;
def run(reddit_session):
#find all comments by AutoModerator
comments = comments_by_user(reddit_session, 'Running', 'AutoModerator')
logging.info(str(datetime.datetime.now()) + ':Found ' + str(len(comments)) + ' for AutoModerator');
#get all comments with negative score
negative_score_comments = [ c for c in comments if c.score < 0 ]
logging.info(str(datetime.datetime.now()) + ':Found ' + str(len(negative_score_comments)) + ' negative for AutoModerator');
#remove the comments
recent_comments_removed = set();
for comment in negative_score_comments:
logging.debug(comment);
if comment not in CommentsRemoved:
comment.remove();
CommentsRemoved.add(comment);
recent_comments_removed.add(comment);
# message the author
if len(recent_comments_removed) > 0:
title = 'Removed AutoMod comments due to negative score';
body = 'Removed the following comments. \n';
body += '\n'.join([comment.permalink for comment in recent_comments_removed]);
reddit_session.send_message(AUTHOR, title, body)
if __name__ == '__main__':
#login
r = login();
#Search every 5 minutes.
while True:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.info(str(datetime.datetime.now()) + ":Starting")
#run the bot! check for negative automod comments in /r/running
run(r);
logging.info(str(datetime.datetime.now()) + ":Ending")
#done for now, check again in 5 minutes.
time.sleep(300);
| mit | Python |
ee3941e2c3a0355314b270c04de6a623f5a0730c | Add statistics about general speaking | thomasleese/smartbot-old,Cyanogenoid/smartbot,tomleese/smartbot,Muzer/smartbot | plugins/stats.py | plugins/stats.py | import operator
class Plugin:
def __call__(self, bot):
bot.on_hear(r".*", self.on_hear_anything)
bot.on_hear(r"(lol|:D|:P)", self.on_hear)
bot.on_respond(r"stats", self.on_respond)
bot.on_help("stats", self.on_help)
def on_hear_anything(self, bot, msg, reply):
stats = bot.storage.get("stats", {})
word_stats = stats.get(word, {})
word_stats[""] = word_stats.get("", 0) + 1
stats[word] = word_stats
bot.storage["stats"] = stats
def on_hear(self, bot, msg, reply):
stats = bot.storage.get("stats", {})
for word in msg["match"]:
word_stats = stats.get(word, {})
word_stats[msg["sender"]] = word_stats.get(msg["sender"], 0) + 1
stats[word] = word_stats
break # only allow one word
bot.storage["stats"] = stats
def on_respond(self, bot, msg, reply):
def respond(word, description):
stats = bot.storage.get("stats", {}).get(word, {})
if stats:
person = max(stats.items(), key=operator.itemgetter(1))[0]
reply(description.format(person))
respond("", "{0} is most talkative.")
respond("lol", "{0} laughs the most.")
respond(":D", "{0} is the happiest.")
respond(":P", "{0} sticks their tounge out the most.")
def on_help(self, bot, msg, reply):
reply("Display statistics.")
reply("Syntax: stats")
| import operator
class Plugin:
def __call__(self, bot):
bot.on_hear(r"(lol|:D|:P)", self.on_hear)
bot.on_respond(r"stats", self.on_respond)
bot.on_help("stats", self.on_help)
def on_hear(self, bot, msg, reply):
stats = bot.storage.get("stats", {})
for word in msg["match"]:
word_stats = stats.get(word, {})
word_stats[msg["sender"]] = word_stats.get(msg["sender"], 0) + 1
stats[word] = word_stats
bot.storage["stats"] = stats
def on_respond(self, bot, msg, reply):
def respond(word, description):
stats = bot.storage.get("stats", {}).get(word, {})
if stats:
person = max(stats.items(), key=operator.itemgetter(1))[0]
reply(description.format(person))
respond("lol", "{0} laughs the most.")
respond(":D", "{0} is the happiest.")
respond(":P", "{0} sticks their tounge out the most.")
def on_help(self, bot, msg, reply):
reply("Syntax: stats")
| mit | Python |
02f157652f5b93a5f8c7af8ad6e4caf31f038192 | bump version and fix logging issues | mistermatti/plugz,mistermatti/plugz | plugz/loading.py | plugz/loading.py | import os
import sys
import inspect
from collections import defaultdict
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from plugz import PluginTypeBase
import errors
""" Main storage of the plugins."""
_loaded_plugins = defaultdict(list)
def register_plugin(f):
""" Register the given class as plugin if valid.
Will be used as decorator in the framework. This function
also does basic sanity checks in order to reject invalid
plugins.
"""
# some basic sanity tests follow
# ------------------------------
if not issubclass(f, PluginTypeBase): # make sure that the plugin is of correct type
logger.warning('%s cannot be registered. It does not inherit from PluginTypeBase.' % f)
return f
if f.__abstractmethods__: # make sure all abstract methods have been implemented
methods = ','.join(f.__abstractmethods__)
logger.warning('%s cannot be registerd. It has unimplemented abstract methods: %s' % (f, methods))
return f
# register the plugin in the system
_loaded_plugins[getattr(f, 'plugintype', 'unsorted')].append(f)
# now just return the collable which is a plugin class in this case
return f
def load_plugins(paths, plugintype):
""" Load plugins of given type in given directories. """
# check if the given type is None
if not plugintype:
raise errors.NoValidPluginTypeError()
# check if the given PluginType really is a subclass
# of the provided PluginTypeBase
elif not issubclass(plugintype, PluginTypeBase):
raise errors.InvalidPluginTypeError()
# if no paths are given, complain.
elif not paths:
raise errors.NoPluginPathsProvided()
# if an invalid path is given, report that problem
else:
for path in paths:
if not os.path.isdir(path):
raise errors.InvalidPluginPath()
# we need to clear plugins that were loaded before
del _loaded_plugins[plugintype.plugintype][:]
# otherwise all data is valid for loading some plugins
for path in paths:
# find all the files and try to register them as plugins
sys.path.insert(0, path)
for pf in os.listdir(path):
if plugintype.is_valid_file(pf):
# as long as only files with extensions are used, this works.
base = os.path.basename(pf).split(os.path.extsep)[0]
_load_plugin(base)
sys.path.pop(0)
return _loaded_plugins[plugintype.plugintype]
def _load_plugin(plugin_name):
logger.debug('Loading %s...' % plugin_name)
p = __import__(plugin_name)
| import os
import sys
import inspect
from collections import defaultdict
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from plugz import PluginTypeBase
import errors
""" Main storage of the plugins."""
_loaded_plugins = defaultdict(list)
def register_plugin(f):
""" Register the given class as plugin if valid.
Will be used as decorator in the framework. This function
also does basic sanity checks in order to reject invalid
plugins.
"""
# some basic sanity tests follow
# ------------------------------
if not issubclass(f, PluginTypeBase): # make sure that the plugin is of correct type
logger.warning('%s cannot be registered. It does not inherit from PluginTypeBase.' % f}
return f
if f.__abstractmethods__: # make sure all abstract methods have been implemented
methods = ','.join(f.__abstractmethods__)
logger.warning('%s cannot be registerd. It has unimplemented abstract methods: %s' % (f, methods)
return f
# register the plugin in the system
_loaded_plugins[getattr(f, 'plugintype', 'unsorted')].append(f)
# now just return the collable which is a plugin class in this case
return f
def load_plugins(paths, plugintype):
""" Load plugins of given type in given directories. """
# check if the given type is None
if not plugintype:
raise errors.NoValidPluginTypeError()
# check if the given PluginType really is a subclass
# of the provided PluginTypeBase
elif not issubclass(plugintype, PluginTypeBase):
raise errors.InvalidPluginTypeError()
# if no paths are given, complain.
elif not paths:
raise errors.NoPluginPathsProvided()
# if an invalid path is given, report that problem
else:
for path in paths:
if not os.path.isdir(path):
raise errors.InvalidPluginPath()
# we need to clear plugins that were loaded before
del _loaded_plugins[plugintype.plugintype][:]
# otherwise all data is valid for loading some plugins
for path in paths:
# find all the files and try to register them as plugins
sys.path.insert(0, path)
for pf in os.listdir(path):
if plugintype.is_valid_file(pf):
# as long as only files with extensions are used, this works.
base = os.path.basename(pf).split(os.path.extsep)[0]
_load_plugin(base)
sys.path.pop(0)
return _loaded_plugins[plugintype.plugintype]
def _load_plugin(plugin_name):
logger.debug('Loading %s...' % plugin_name))
p = __import__(plugin_name)
| bsd-3-clause | Python |
d34f8e7a68b3c3b8023ac298f31a2ff52595ed80 | Fix some encoding problems | sbsdev/daisyproducer,sbsdev/daisyproducer,sbsdev/daisyproducer,sbsdev/daisyproducer | documents/management/commands/clean_old_versions.py | documents/management/commands/clean_old_versions.py | import shutil, tempfile, os, csv
from optparse import make_option
from daisyproducer.documents.models import Document
from django.core.management.base import BaseCommand
from django.utils.encoding import smart_str
class Command(BaseCommand):
args = ''
help = 'Clean out old versions from the database and the file system'
option_list = BaseCommand.option_list + (
make_option(
'--numberOfVersionsKept',
type="int",
dest='numberOfVersionsKept',
default=7,
help='Number of versions that should be kept for a document. If a document contains more versions than the specified number only said number of versions are kept. Older versions are removed.'),
)
def handle(self, *args, **options):
numberOfVersionsKept = options['numberOfVersionsKept']
verbosity = int(options['verbosity'])
for document in Document.objects.all():
if verbosity >= 2:
self.stdout.write('Removing excess versions for %s...\n' % smart_str(document.title))
document.remove_excess_versions(numberOfVersionsKept)
| import shutil, tempfile, os, csv
from optparse import make_option
from daisyproducer.documents.models import Document
from django.core.management.base import BaseCommand
class Command(BaseCommand):
args = ''
help = 'Clean out old versions from the database and the file system'
option_list = BaseCommand.option_list + (
make_option(
'--numberOfVersionsKept',
type="int",
dest='numberOfVersionsKept',
default=7,
help='Number of versions that should be kept for a document. If a document contains more versions than the specified number only said number of versions are kept. Older versions are removed.'),
)
def handle(self, *args, **options):
numberOfVersionsKept = options['numberOfVersionsKept']
verbosity = int(options['verbosity'])
for document in Document.objects.all():
if verbosity >= 2:
self.stdout.write('Removing excess versions for %s...\n' % document.title)
document.remove_excess_versions(numberOfVersionsKept)
| agpl-3.0 | Python |
517f9062be966c4374f2efbdfb636872f45024bd | remove https from redirect middleware | blcook223/bencook.info,blcook223/bencook.info,blcook223/bencook.info,blcook223/bencook.info | core/middleware.py | core/middleware.py | """
Middleware for core app.
"""
from django.contrib.sites.models import Site
from django.http import HttpResponsePermanentRedirect
from django.core.urlresolvers import resolve
from django.core import urlresolvers
from django.utils.http import urlquote
# http://eikke.com/django-domain-redirect-middleware/
class DomainRedirectMiddleware(object):
"""
Redirect to canonical domains.
"""
def process_request(self, request):
"""
Return permanent redirect to correct domain.
"""
host = request.get_host()
site = Site.objects.get_current()
if host == site.domain:
return None
try:
resolve(request.path)
except urlresolvers.Resolver404:
return None
new_uri = '%s://%s%s%s' % (
'http',
site.domain,
urlquote(request.path),
(request.method == 'GET' and len(request.GET) > 0) and
'?{0}'.format(request.GET.urlencode()) or
''
)
return HttpResponsePermanentRedirect(new_uri)
| """
Middleware for core app.
"""
from django.contrib.sites.models import Site
from django.http import HttpResponsePermanentRedirect
from django.core.urlresolvers import resolve
from django.core import urlresolvers
from django.utils.http import urlquote
# http://eikke.com/django-domain-redirect-middleware/
class DomainRedirectMiddleware(object):
"""
Redirect to canonical domains.
"""
def process_request(self, request):
"""
Return permanent redirect to correct domain.
"""
host = request.get_host()
site = Site.objects.get_current()
if host == site.domain:
return None
try:
resolve(request.path)
except urlresolvers.Resolver404:
return None
new_uri = '%s://%s%s%s' % (
request.is_secure() and 'https' or 'http',
site.domain,
urlquote(request.path),
(request.method == 'GET' and len(request.GET) > 0) and
'?{0}'.format(request.GET.urlencode()) or
''
)
return HttpResponsePermanentRedirect(new_uri)
| isc | Python |
d9b3f20eba5a18cffc9ae2022a22c06be5880bf5 | Update admin.py | ulule/django-courriers,ulule/django-courriers | courriers/admin.py | courriers/admin.py | from django.contrib import admin
from django.conf.urls.defaults import patterns, url
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from .models import Newsletter, NewsletterItem, NewsletterSubscriber, NewsletterList
class NewsletterAdmin(admin.ModelAdmin):
change_form_template = 'admin/courriers/newsletter/change_form.html'
list_display = ('name', 'headline', 'languages', 'published_at', 'status', 'newsletter_list',)
list_filter = ('published_at', 'status',)
def get_urls(self):
urls = super(NewsletterAdmin, self).get_urls()
my_urls = patterns(
'',
url(r'^send/(?P<newsletter_id>(\d+))/$',
self.send_newsletter,
name="send_newsletter")
)
return my_urls + urls
def send_newsletter(self, request, newsletter_id):
from courriers.backends import get_backend
backend_klass = get_backend()
backend = backend_klass()
newsletter = get_object_or_404(Newsletter, pk=newsletter_id)
backend.send_mails(newsletter)
self.message_user(request, _('The newsletter %s has been sent.') % newsletter)
return HttpResponseRedirect(reverse('admin:courriers_newsletter_change', args=(newsletter.id,)))
class NewsletterItemAdmin(admin.ModelAdmin):
list_display = ('description', 'content_type', 'newsletter',)
class NewsletterSubscriberAdmin(admin.ModelAdmin):
list_display = ('email', 'user', 'lang', 'is_unsubscribed',)
list_filter = ('is_unsubscribed',)
class NewsletterListAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'languages', 'created_at',)
admin.site.register(Newsletter, NewsletterAdmin)
admin.site.register(NewsletterItem, NewsletterItemAdmin)
admin.site.register(NewsletterSubscriber, NewsletterSubscriberAdmin)
admin.site.register(NewsletterList, NewsletterListAdmin)
| from django.contrib import admin
from django.conf.urls.defaults import patterns, url
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from .models import Newsletter, NewsletterItem, NewsletterSubscriber, NewsletterList
class NewsletterAdmin(admin.ModelAdmin):
change_form_template = 'admin/courriers/newsletter/change_form.html'
list_display = ('name', 'headline', 'languages', 'published_at', 'status', 'newsletter_list',)
list_filter = ('published_at', 'status',)
def get_urls(self):
urls = super(NewsletterAdmin, self).get_urls()
my_urls = patterns(
'',
url(r'^send/(?P<newsletter_id>(\d+))/$',
self.send_newsletter,
name="send_newsletter")
)
return my_urls + urls
def send_newsletter(self, request, newsletter_id):
from courriers.backends import get_backend
backend_klass = get_backend()
backend = backend_klass()
newsletter = get_object_or_404(Newsletter, pk=newsletter_id)
backend.send_mails(newsletter)
self.message_user(request, _('The newsletter %s has been sent.') % newsletter)
return HttpResponseRedirect(reverse('admin:courriers_newsletter_change', args=(newsletter.id,)))
class NewsletterItemAdmin(admin.ModelAdmin):
list_display = ('description', 'content_type', 'newsletter',)
class NewsletterSubscriberAdmin(admin.ModelAdmin):
list_display = ('email', 'user', 'lang', 'is_unsubscribed',)
list_filter = ('is_unsubscribed',)
class NewsletterListAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'languages', 'created_at',)
admin.site.register(Newsletter, Newsletter)
admin.site.register(NewsletterItem, NewsletterItemAdmin)
admin.site.register(NewsletterSubscriber, NewsletterSubscriberAdmin)
admin.site.register(NewsletterList, NewsletterListAdmin)
| mit | Python |
2d4cce26f8b6c45b0c92783475285273e6a0722c | Allow hardcoded error headers per #10 | nullism/pycnic,nullism/pycnic | pycnic/errors.py | pycnic/errors.py | from .data import STATUSES
class PycnicError(Exception):
pass
class HTTPError(PycnicError):
status_code = 0
status = None
message = None
data = None
headers = None
def __init__(self, status_code, message, data=None, headers=[]):
if self.status_code:
status_code = self.status_code
self.status_code = status_code
self.status = STATUSES[status_code]
self.message = message
self.data = data
if headers:
self.headers = headers
def response(self):
return {
"status": self.status,
"status_code": self.status_code,
"error":self.message,
"data":self.data
}
class HTTPNumeric(HTTPError):
status_code = 0
def __init__(self, message, data=None, headers=[]):
super(HTTPError, self).__init__(self.status_code, message, data, headers)
self.status = STATUSES[self.status_code]
self.message = message
self.data = data
self.headers = headers
class HTTP_400(HTTPNumeric):
status_code = 400
class HTTP_401(HTTPNumeric):
status_code = 401
class HTTP_403(HTTPNumeric):
status_code = 403
class HTTP_404(HTTPNumeric):
status_code = 404
class HTTP_405(HTTPNumeric):
status_code = 405
class HTTP_408(HTTPNumeric):
status_code = 408
class HTTP_500(HTTPNumeric):
status_code = 500
| from .data import STATUSES
class PycnicError(Exception):
pass
class HTTPError(PycnicError):
status_code = 0
status = None
message = None
data = None
headers = None
def __init__(self, status_code, message, data=None, headers=[]):
if self.status_code:
status_code = self.status_code
self.status_code = status_code
self.status = STATUSES[status_code]
self.message = message
self.data = data
self.headers = headers
def response(self):
return {
"status": self.status,
"status_code": self.status_code,
"error":self.message,
"data":self.data
}
class HTTPNumeric(HTTPError):
status_code = 0
def __init__(self, message, data=None, headers=[]):
super(HTTPError, self).__init__(self.status_code, message, data, headers)
self.status = STATUSES[self.status_code]
self.message = message
self.data = data
self.headers = headers
class HTTP_400(HTTPNumeric):
status_code = 400
class HTTP_401(HTTPNumeric):
status_code = 401
class HTTP_403(HTTPNumeric):
status_code = 403
class HTTP_404(HTTPNumeric):
status_code = 404
class HTTP_405(HTTPNumeric):
status_code = 405
class HTTP_408(HTTPNumeric):
status_code = 408
class HTTP_500(HTTPNumeric):
status_code = 500
| mit | Python |
f6ff634bc532585fefb7c30d33709857f6b6d336 | add kepsff to init | gully/PyKE,christinahedges/PyKE | pyke/__init__.py | pyke/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
PACKAGEDIR = os.path.abspath(os.path.dirname(__file__))
from .version import __version__
from .keparray import *
from .kepbls import *
from .kepclip import *
from .kepconvert import *
from .kepcotrend import *
from .kepdetrend import *
from .kepdiffim import *
from .kepdraw import *
from .kepdynamic import *
from .kepextract import *
from .kepfilter import *
from .kepfit import *
from .kepflatten import *
from .kepfold import *
from .kepfourier import *
from .kepft import *
from .kepfunc import *
from .kephead import *
from .kepimages import *
from .kepio import *
from .kepkey import *
from .kepmask import *
from .kepmsg import *
from .kepoutlier import *
from .keppca import *
from .keppixseries import *
from .kepplot import *
from .kepprf import *
from .kepprfphot import *
from .kepsff import *
from .kepsmooth import *
from .kepstat import *
from .kepstddev import *
from .kepstitch import *
from .keptimefix import *
from .keptrial import *
from .keptrim import *
from .kepwindow import *
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
PACKAGEDIR = os.path.abspath(os.path.dirname(__file__))
from .version import __version__
from .keparray import *
from .kepbls import *
from .kepclip import *
from .kepconvert import *
from .kepcotrend import *
from .kepdetrend import *
from .kepdiffim import *
from .kepdraw import *
from .kepdynamic import *
from .kepextract import *
from .kepfilter import *
from .kepfit import *
from .kepflatten import *
from .kepfold import *
from .kepfourier import *
from .kepft import *
from .kepfunc import *
from .kephead import *
from .kepimages import *
from .kepio import *
from .kepkey import *
from .kepmask import *
from .kepmsg import *
from .kepoutlier import *
from .keppca import *
from .keppixseries import *
from .kepplot import *
from .kepprf import *
from .kepprfphot import *
from .kepsmooth import *
from .kepstat import *
from .kepstddev import *
from .kepstitch import *
from .keptimefix import *
from .keptrial import *
from .keptrim import *
from .kepwindow import *
| mit | Python |
5260a603965dbe5a36d6b1737692c5fd093b7944 | Make kmer positions 1-offset. | drtconway/pykmer | pykmer/basics.py | pykmer/basics.py | from pykmer.bits import ffs, rev, popcnt, m1
nuc = { 'A':0, 'a':0, 'C':1, 'c':1, 'G':2, 'g':2, 'T':3, 't':3 }
def kmer(seq):
"Turn a string in to an integer k-mer"
r = 0
for c in seq:
if c not in nuc:
return None
r = (r << 2) | nuc[c]
return r
def render(k, x):
"Turn an integer k-mer in to a string"
r = []
for i in range(k):
r.append("ACGT"[x&3])
x >>= 2
return ''.join(r[::-1])
fas = [
'*', # 0000
'A', # 0001
'C', # 0010
'M', # 0011
'G', # 0100
'R', # 0101
'S', # 0110
'V', # 0111
'T', # 1000
'W', # 1001
'Y', # 1010
'H', # 1011
'K', # 1100
'D', # 1101
'B', # 1110
'N' # 1111
]
def fasta(ind):
return fas[ind]
def rc(k, x):
"Compute the reverse complement of a k-mer"
return rev(~x) >> (64 - 2*k)
def ham(x, y):
"Compute the hamming distance between two k-mers."
z = x ^ y
# NB: if k > 32, the constant below will need extending.
v = (z | (z >> 1)) & m1
return popcnt(v)
def lcp(k, x, y):
"Find the length of the common prefix between 2 k-mers"
z = x ^ y
if z == 0:
return k
v = 1 + ffs(z) // 2
return k - v
def kmers(k, str, bothStrands=False):
"Extract k-mers from a string sequence"
for i in range(len(str) - k + 1):
x = kmer(str[i:i+k])
if x:
yield x
if bothStrands:
yield rc(k, x)
def kmersWithPos(k, str, bothStrands=False):
"Extract k-mers and positions (1-based, negative denoting rc-strand) from a string sequence"
for i in range(len(str) - k + 1):
j = i + 1
x = kmer(str[i:i+k])
if x:
yield (x, j)
if bothStrands:
yield (rc(k, x), -j)
| from pykmer.bits import ffs, rev, popcnt, m1
nuc = { 'A':0, 'a':0, 'C':1, 'c':1, 'G':2, 'g':2, 'T':3, 't':3 }
def kmer(seq):
"Turn a string in to an integer k-mer"
r = 0
for c in seq:
if c not in nuc:
return None
r = (r << 2) | nuc[c]
return r
def render(k, x):
"Turn an integer k-mer in to a string"
r = []
for i in range(k):
r.append("ACGT"[x&3])
x >>= 2
return ''.join(r[::-1])
fas = [
'*', # 0000
'A', # 0001
'C', # 0010
'M', # 0011
'G', # 0100
'R', # 0101
'S', # 0110
'V', # 0111
'T', # 1000
'W', # 1001
'Y', # 1010
'H', # 1011
'K', # 1100
'D', # 1101
'B', # 1110
'N' # 1111
]
def fasta(ind):
return fas[ind]
def rc(k, x):
"Compute the reverse complement of a k-mer"
return rev(~x) >> (64 - 2*k)
def ham(x, y):
"Compute the hamming distance between two k-mers."
z = x ^ y
# NB: if k > 32, the constant below will need extending.
v = (z | (z >> 1)) & m1
return popcnt(v)
def lcp(k, x, y):
"Find the length of the common prefix between 2 k-mers"
z = x ^ y
if z == 0:
return k
v = 1 + ffs(z) // 2
return k - v
def kmers(k, str, bothStrands=False):
"Extract k-mers from a string sequence"
for i in range(len(str) - k + 1):
x = kmer(str[i:i+k])
if x:
yield x
if bothStrands:
yield rc(k, x)
def kmersWithPos(k, str, bothStrands=False):
"Extract k-mers and positions from a string sequence"
for i in range(len(str) - k + 1):
x = kmer(str[i:i+k])
if x:
yield (x, i)
if bothStrands:
yield (rc(k, x), -i)
| apache-2.0 | Python |
cf0223f6a725641431fa9f704cab47d96d4783a2 | Add method for parsing articles from archive | HIIT/mediacollection | sites/ksml.py | sites/ksml.py | # -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import processor
from datetime import datetime
def parse( url ):
r = requests.get( url )
if r.status_code == 404:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
r.encoding = 'UTF-8'
soup = BeautifulSoup( r.text, "html.parser" )
article = soup.find( role = 'main' )
if article == None:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
processor.decompose_all( article.find_all( 'script' ) )
categories = processor.collect_categories( article.find_all( class_ = 'article__section' ) )
datetime_list = processor.collect_datetime( article.find( class_ = 'article__published' ) )
author = processor.collect_text( article.find( class_ = 'article__author' ) )
title = processor.collect_text( article.find( class_ = 'article__title' ) )
ingress = processor.collect_text( article.find( class_ = 'article__summary' ) )
text = processor.collect_text( article.find( class_ = 'article__body' ) )
images = processor.collect_images_by_parent( article.find_all( class_ = 'article__images' ), '' )
captions = processor.collect_image_captions( article.find_all( itemprop = 'caption description' ) )
return processor.create_dictionary('Keskisuomalainen', url, r.status_code, categories, datetime_list, author, title, ingress, text, images, captions)
def parse_from_archive(url, content):
article = BeautifulSoup( content, "html.parser" )
if article == None:
return processor.create_dictionary('', url, 404, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
processor.decompose_all( article.find_all( 'script' ) )
meta = article.find( class_ = 'date' )
categories = [processor.collect_text(meta).split(' ')[0]]
datetime_list = str(processor.collect_datetime( meta ))
author = processor.collect_text( article.find( class_ = 'author'), True )
processor.decompose( meta )
title_parts = article.find_all('h2')
title = ''
for part in title_parts:
title += processor.collect_text(part, True) + ' '
title = title.strip()
ingress_parts = article.find_all('h4')
ingress = ''
for part in ingress_parts:
ingress += processor.collect_text(part, True) + ' '
ingress = ingress.strip()
processor.decompose( article.find_all( 'p' )[-1] )
text = processor.collect_text( article )
return processor.create_dictionary('Keskisuomalainen', url, 200, categories, datetime_list, author, title, ingress, text, [u''], [u''])
if __name__ == '__main__':
parse("http://www.ksml.fi/uutiset/ulkomaat/kalifornian-ennatyskuivuus-paattyi-rankkasateisiin/1944276", file('keski.txt', 'w'))
| # -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import processor
from datetime import datetime
def parse( url ):
r = requests.get( url )
if r.status_code == 404:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
r.encoding = 'UTF-8'
soup = BeautifulSoup( r.text, "html.parser" )
article = soup.find( role = 'main' )
if article == None:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
processor.decompose_all( article.find_all( 'script' ) )
categories = processor.collect_categories( article.find_all( class_ = 'article__section' ) )
datetime_list = processor.collect_datetime( article.find( class_ = 'article__published' ) )
author = processor.collect_text( article.find( class_ = 'article__author' ) )
title = processor.collect_text( article.find( class_ = 'article__title' ) )
ingress = processor.collect_text( article.find( class_ = 'article__summary' ) )
text = processor.collect_text( article.find( class_ = 'article__body' ) )
images = processor.collect_images_by_parent( article.find_all( class_ = 'article__images' ), '' )
captions = processor.collect_image_captions( article.find_all( itemprop = 'caption description' ) )
return processor.create_dictionary('Keskisuomalainen', url, r.status_code, categories, datetime_list, author, title, ingress, text, images, captions)
if __name__ == '__main__':
parse("http://www.ksml.fi/uutiset/ulkomaat/kalifornian-ennatyskuivuus-paattyi-rankkasateisiin/1944276", file('keski.txt', 'w'))
| mit | Python |
25904df895e495570d8fab05619809c84b7d45e1 | Comment B Combinator | esehara/skiski | skiski/bfc.py | skiski/bfc.py | from ski import S, K, I
from helper import Typename
class B(metaclass=Typename("B")):
"""
which composes two function
>>> B(lambda x: x).dot(lambda x: x + 5).dot(5).b()
10
"""
is_class = True
def __init__(self, x):
self.x = x
self.is_class = False
def dot(self, y):
return B2(self.x, y)
def __str__(self):
return "(B " + str(self.x) + ")"
def __repr__(self):
return "<" + self.__str__() + ">"
@classmethod
def to_ski(cls):
return S(K(S)).dot(K)
class B2(metaclass=Typename("B")):
is_class = True
def __init__(self, x, y):
self.x = x
self.y = y
self.is_class = False
def dot(self, y):
return B3(self.x, self.y, z)
def __str__(self):
return "(B " + str(self.x) + " " + str(self.y) + ")"
def __repr__(self):
return "<" + self.__str__() + ">"
class B3(metaclass=Typename("B")):
is_class = True
def __init__(self, x, y):
self.x = x
self.y = y
self.z = z
self.is_class = False
def b(self):
return x(y(z))
def __str__(self):
return "(B " + str(self.x) + " " + str(self.y) + str(self.z) + ")"
def __repr__(self):
return "<" + self.__str__() + ">"
| from ski import S, K, I
from helper import Typename
class B(metaclass=Typename("B")):
is_class = True
def __init__(self, x):
self.x = x
self.is_class = False
def dot(self, y):
return B2(self.x, y)
def __str__(self):
return "(B " + str(self.x) + ")"
def __repr__(self):
return "<" + self.__str__() + ">"
@classmethod
def to_ski(cls):
return S(K(S)).dot(K)
class B2(metaclass=Typename("B")):
is_class = True
def __init__(self, x, y):
self.x = x
self.y = y
self.is_class = False
def dot(self, y):
return B3(self.x, self.y, z)
def __str__(self):
return "(B " + str(self.x) + " " + str(self.y) + ")"
def __repr__(self):
return "<" + self.__str__() + ">"
class B3(metaclass=Typename("B")):
is_class = True
def __init__(self, x, y):
self.x = x
self.y = y
self.z = z
self.is_class = False
def b(self):
return x(y(z))
def __str__(self):
return "(B " + str(self.x) + " " + str(self.y) + str(self.z) + ")"
def __repr__(self):
return "<" + self.__str__() + ">"
| mit | Python |
13981e78604234a0f736d52e5d1dacc92e95882b | update system encoding | antoine-tran/Wikisearch-example | python/search.py | python/search.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interface to ElasticSearch for serving search requests
#
# Tuan Tran (ttran@l3s.de)
#
import sys
from sys import argv
from conn import connect,close as es_close
QUERY = { "query":{ "multi_match" : {"query": "%s", "fields": [ "text", "title", "contributor" ]}}}
def simplesearch(client, term, k):
'''
Get the top-k results from ES using default ranking function. The results are cached into
main memory
'''
res = client.search(index='wiki',body= {
"query": {
"multi_match" : {
"query": "%s" % term,
"fields": [ "text", "title", "contributor" ]
}
}
})
if res == None or len(res) == 0: return [];
for hit in res['hits']['hits']:
yield hit["_source"]
def rerankedsearch(client, term, k):
'''
Get the results and rerank
'''
return
if __name__ == "__main__":
if argv[1] == 'simple':
try:
client = connect()
for hit in simplesearch(client,argv[2],int(argv[3])):
print(hit.encode('utf-8'))
finally:
es_close(client)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interface to ElasticSearch for serving search requests
#
# Tuan Tran (ttran@l3s.de)
#
import sys
from sys import argv
from conn import connect,close as es_close
QUERY = { "query":{ "multi_match" : {"query": "%s", "fields": [ "text", "title", "contributor" ]}}}
def simplesearch(client, term, k):
'''
Get the top-k results from ES using default ranking function. The results are cached into
main memory
'''
res = client.search(index='wiki',body= {
"query": {
"multi_match" : {
"query": "%s" % term,
"fields": [ "text", "title", "contributor" ]
}
}
})
if res == None or len(res) == 0: return [];
for hit in res['hits']['hits']:
yield hit["_source"]
def rerankedsearch(client, term, k):
'''
Get the results and rerank
'''
return
if __name__ == "__main__":
if argv[1] == 'simple':
try:
client = connect()
for hit in simplesearch(client,argv[2],int(argv[3])):
print(hit)
finally:
es_close(client)
| apache-2.0 | Python |
6333ccb825b026bdb805672ff14f99123d8a381e | remove gen | connyay/speed,connyay/speed | python/server.py | python/server.py | import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
| import tornado.ioloop
import tornado.web
from tornado import gen
class MainHandler(tornado.web.RequestHandler):
@gen.coroutine
def get(self):
self.write("Hello, world")
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
| mit | Python |
2ac0509e6a6e1a92ee04def45cf502664b1bd101 | Fix storemagic test for new alias API | ipython/ipython,ipython/ipython | IPython/extensions/tests/test_storemagic.py | IPython/extensions/tests/test_storemagic.py | import tempfile, os
from IPython.config.loader import Config
import nose.tools as nt
ip = get_ipython()
ip.magic('load_ext storemagic')
def test_store_restore():
ip.user_ns['foo'] = 78
ip.magic('alias bar echo "hello"')
tmpd = tempfile.mkdtemp()
ip.magic('cd ' + tmpd)
ip.magic('store foo')
ip.magic('store bar')
# Check storing
nt.assert_equal(ip.db['autorestore/foo'], 78)
nt.assert_in('bar', ip.db['stored_aliases'])
# Remove those items
ip.user_ns.pop('foo', None)
ip.alias_manager.undefine_alias('bar')
ip.magic('cd -')
ip.user_ns['_dh'][:] = []
# Check restoring
ip.magic('store -r')
nt.assert_equal(ip.user_ns['foo'], 78)
assert ip.alias_manager.is_alias('bar')
nt.assert_in(os.path.realpath(tmpd), ip.user_ns['_dh'])
os.rmdir(tmpd)
def test_autorestore():
ip.user_ns['foo'] = 95
ip.magic('store foo')
del ip.user_ns['foo']
c = Config()
c.StoreMagics.autorestore = False
orig_config = ip.config
try:
ip.config = c
ip.extension_manager.reload_extension('storemagic')
nt.assert_not_in('foo', ip.user_ns)
c.StoreMagics.autorestore = True
ip.extension_manager.reload_extension('storemagic')
nt.assert_equal(ip.user_ns['foo'], 95)
finally:
ip.config = orig_config
| import tempfile, os
from IPython.config.loader import Config
import nose.tools as nt
ip = get_ipython()
ip.magic('load_ext storemagic')
def test_store_restore():
ip.user_ns['foo'] = 78
ip.magic('alias bar echo "hello"')
tmpd = tempfile.mkdtemp()
ip.magic('cd ' + tmpd)
ip.magic('store foo')
ip.magic('store bar')
# Check storing
nt.assert_equal(ip.db['autorestore/foo'], 78)
nt.assert_in('bar', ip.db['stored_aliases'])
# Remove those items
ip.user_ns.pop('foo', None)
ip.alias_manager.undefine_alias('bar')
ip.magic('cd -')
ip.user_ns['_dh'][:] = []
# Check restoring
ip.magic('store -r')
nt.assert_equal(ip.user_ns['foo'], 78)
nt.assert_in('bar', ip.alias_manager.alias_table)
nt.assert_in(os.path.realpath(tmpd), ip.user_ns['_dh'])
os.rmdir(tmpd)
def test_autorestore():
ip.user_ns['foo'] = 95
ip.magic('store foo')
del ip.user_ns['foo']
c = Config()
c.StoreMagics.autorestore = False
orig_config = ip.config
try:
ip.config = c
ip.extension_manager.reload_extension('storemagic')
nt.assert_not_in('foo', ip.user_ns)
c.StoreMagics.autorestore = True
ip.extension_manager.reload_extension('storemagic')
nt.assert_equal(ip.user_ns['foo'], 95)
finally:
ip.config = orig_config
| bsd-3-clause | Python |
18a6b18f6668a059dcc337c7ab175497f1448a29 | Update honeypot.py | FabioChiodini/HoneypotCF | honeypot.py | honeypot.py | from flask import Flask, jsonify, request
import os
import requests
from pprint import pprint
import json
import logging
import logstash
#added this K
#from cfenv import AppEnv
#added this K
#env = AppEnv()
#env.LOG_HOST # 'test-app'
#env.LOG_PORT # 5000
if 'LOG_HOST' not in os.environ or 'LOG_PORT' not in os.environ:
raise(Exception("LOG_HOST OR LOG_PORT NOT DEFINED"))
POST_URL = "http://{host}:{port}/log".format(host=os.environ['LOG_HOST'],port=os.environ['LOG_PORT'])
host = os.environ['LOG_HOST']
test_logger = logging.getLogger('python-logstash-logger')
test_logger.setLevel(logging.INFO)
test_logger.addHandler(logstash.TCPLogstashHandler(host, 5000, version=1))
app = Flask(__name__)
def log_request(req):
extra = {
'ip': request.environ.get('X-Forwarded-For', request.remote_addr),
'url': req.full_path,
}
test_logger.info('honeypotCF: ', extra=extra)
#data to log
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def honey(path):
log_request(request)
return jsonify({'CF result': 'ok'})
if __name__ == '__main__':
app.run(host="0.0.0.0",port=8080)
| from flask import Flask, jsonify, request
import os
import requests
from pprint import pprint
import json
import logging
import logstash
#added this K
#from cfenv import AppEnv
#added this K
#env = AppEnv()
#env.LOG_HOST # 'test-app'
#env.LOG_PORT # 5000
if 'LOG_HOST' not in os.environ or 'LOG_PORT' not in os.environ:
raise(Exception("LOG_HOST OR LOG_PORT NOT DEFINED"))
POST_URL = "http://{host}:{port}/log".format(host=os.environ['LOG_HOST'],port=os.environ['LOG_PORT'])
host = os.environ['LOG_HOST']
test_logger = logging.getLogger('python-logstash-logger')
test_logger.setLevel(logging.INFO)
test_logger.addHandler(logstash.TCPLogstashHandler(host, 5000, version=1))
app = Flask(__name__)
def log_request(req):
extra = {
'ip': request.environ.get('X-Forwarded-For', request.remote_addr),
'url': req.full_path,
}
test_logger.info('honeypot: ', extra=extra)
#data to log
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def honey(path):
log_request(request)
return jsonify({'CF result': 'ok'})
if __name__ == '__main__':
app.run(host="0.0.0.0",port=8080)
| mit | Python |
e40736c1055747cab45adb8ce6a511b9889000fd | test case for quotation | Aptitudetech/ERPNext,indictranstech/erpnext,gsnbng/erpnext,njmube/erpnext,anandpdoshi/erpnext,geekroot/erpnext,anandpdoshi/erpnext,njmube/erpnext,gsnbng/erpnext,geekroot/erpnext,geekroot/erpnext,anandpdoshi/erpnext,geekroot/erpnext,gsnbng/erpnext,indictranstech/erpnext,anandpdoshi/erpnext,njmube/erpnext,njmube/erpnext,indictranstech/erpnext,gsnbng/erpnext,indictranstech/erpnext | erpnext/selling/doctype/quotation/test_quotation.py | erpnext/selling/doctype/quotation/test_quotation.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.utils import flt
import unittest
test_dependencies = ["Product Bundle"]
class TestQuotation(unittest.TestCase):
def test_make_sales_order(self):
from erpnext.selling.doctype.quotation.quotation import make_sales_order
quotation = frappe.copy_doc(test_records[0])
quotation.insert()
self.assertRaises(frappe.ValidationError, make_sales_order, quotation.name)
quotation.submit()
sales_order = make_sales_order(quotation.name)
self.assertEquals(sales_order.doctype, "Sales Order")
self.assertEquals(len(sales_order.get("items")), 1)
self.assertEquals(sales_order.get("items")[0].doctype, "Sales Order Item")
self.assertEquals(sales_order.get("items")[0].prevdoc_docname, quotation.name)
self.assertEquals(sales_order.customer, "_Test Customer")
sales_order.delivery_date = "2014-01-01"
sales_order.naming_series = "_T-Quotation-"
sales_order.transaction_date = "2013-05-12"
sales_order.insert()
def test_create_quotation_with_margin(self):
from erpnext.selling.doctype.quotation.quotation import make_sales_order
from erpnext.selling.doctype.sales_order.sales_order \
import make_material_request, make_delivery_note, make_sales_invoice
test_records[0]['items'][0]['price_list_rate'] = 1500
test_records[0]['items'][0]['type'] = 'Percentage'
test_records[0]['items'][0]['rate_or_amount'] = 20
quotation = frappe.copy_doc(test_records[0])
quotation.insert()
self.assertRaises(frappe.ValidationError, make_sales_order, quotation.name)
quotation.submit()
sales_order = make_sales_order(quotation.name)
sales_order.delivery_date = "2016-01-02"
sales_order.naming_series = "_T-Quotation-"
sales_order.transaction_date = "2016-01-01"
sales_order.insert()
sales_order.submit()
dn = make_delivery_note(sales_order.name)
dn.save()
si = make_sales_invoice(sales_order.name)
si.save()
test_records = frappe.get_test_records('Quotation')
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.utils import flt
import unittest
test_dependencies = ["Product Bundle"]
class TestQuotation(unittest.TestCase):
def test_make_sales_order(self):
from erpnext.selling.doctype.quotation.quotation import make_sales_order
quotation = frappe.copy_doc(test_records[0])
quotation.insert()
self.assertRaises(frappe.ValidationError, make_sales_order, quotation.name)
quotation.submit()
sales_order = make_sales_order(quotation.name)
self.assertEquals(sales_order.doctype, "Sales Order")
self.assertEquals(len(sales_order.get("items")), 1)
self.assertEquals(sales_order.get("items")[0].doctype, "Sales Order Item")
self.assertEquals(sales_order.get("items")[0].prevdoc_docname, quotation.name)
self.assertEquals(sales_order.customer, "_Test Customer")
sales_order.delivery_date = "2014-01-01"
sales_order.naming_series = "_T-Quotation-"
sales_order.transaction_date = "2013-05-12"
sales_order.insert()
test_records = frappe.get_test_records('Quotation')
| agpl-3.0 | Python |
5efca5a3e8fb978cb47e986b1bd7296fe2cae3ce | Add some spaces to get_list_as_english | hawkrives/gobbldygook,hawkrives/gobbldygook,hawkrives/gobbldygook | helpers.py | helpers.py | def get_readable_list(passed_list, sep=', '):
output = ""
for i, item in enumerate(passed_list):
if len(passed_list) is 1:
output += str(item)
else:
if i is not (len(passed_list) - 1):
output += str(item) + sep
else:
output += str(item)
return output
def get_list_as_english(passed_list):
output = ""
for i, item in enumerate(passed_list):
if len(passed_list) is 1:
output += str(item) + ' '
elif len(passed_list) is 2:
output += str(item)
if i is not (len(passed_list) - 1):
output += " and "
else:
output += " "
else:
if i is not (len(passed_list) - 1):
output += str(item) + ", "
else:
output += "and " + str(item) + ", "
return output
| def get_readable_list(passed_list, sep=', '):
output = ""
for i, item in enumerate(passed_list):
if len(passed_list) is 1:
output += str(item)
else:
if i is not (len(passed_list) - 1):
output += str(item) + sep
else:
output += str(item)
return output
def get_list_as_english(passed_list):
output = ""
for i, item in enumerate(passed_list):
if len(passed_list) is 1:
output += str(item)
elif len(passed_list) is 2:
output += str(item)
if i is not (len(passed_list) - 1):
output += " and "
else:
output += ""
else:
if i is not (len(passed_list) - 1):
output += str(item) + ", "
else:
output += "and " + str(item) + ", "
return output
| agpl-3.0 | Python |
05de768e2b62cec0808f9bd6530f82984e9ad438 | Define resource, send the transport to the resource as a parameter | harrissoerja/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix,TouK/vumi,harrissoerja/vumi,TouK/vumi,vishwaprakashmishra/xmatrix,vishwaprakashmishra/xmatrix,TouK/vumi | vumi/transports/mtn_rwanda/mtn_rwanda_ussd.py | vumi/transports/mtn_rwanda/mtn_rwanda_ussd.py | from twisted.internet import reactor
from twisted.web import xmlrpc, server
from twisted.internet.defer import inlineCallbacks
from vumi import log
from vumi.transports.base import Transport
from vumi.config import ConfigText
class MTNRwandaUSSDTransportConfig(Transport.CONFIG_CLASS):
"""
MTN Rwanda USSD transport configuration.
"""
# TODO: Configure elements described in chapter 8 of the spec
class MTNRwandaUSSDTransport(Transport):
"""
"""
transport_type = 'ussd'
xmlrpc_server = None
CONFIG_CLASS = MTNRwandaUSSDTransportConfig
def validate_config(self):
# Hard-coded for now.
# TODO: self.config.get()
self.port = 7080
@inlineCallbacks
def setup_transport(self):
"""
Transport specific setup - it initiates things, sets up a
connection, for example.
:self.xmlrpc_server: An IListeningPort instance.
"""
r = MTNRwandaXMLRPCResource(self)
factory = server.Site(r)
self.xmlrpc_server = yield reactor.listenTCP(self.port, factory)
@inlineCallbacks
def teardown_transport(self):
"""
Clean-up of setup done in setup_transport.
"""
if self.xmlrpc_server is not None:
yield self.xmlrpc_server.stopListening()
def handle_outbound_message(self, message):
"""
Read outbound message and do what needs to be done with them.
"""
def handle_raw_inbound_message(self):
"""
Called by the XML-RPC server when it receives a payload that
needs processing.
"""
class MTNRwandaXMLRPCResource(xmlrpc.XMLRPC):
"""
A Resource object implementing XML-RPC, can be published using
twisted.web.server.Site.
"""
def __init__(self, transport):
self.transport = transport
xmlrpc.XMLRPC.__init__(self)
def xmlrpc_(self, request, request_id=None):
request_id = request_id or Transport.generate_message_id()
request.setHeader("content-type", self.transport.content_type)
self.transport.set_request(request_id, request)
self.transport.handle_raw_inbound_message(request_id, request)
return server.NOT_DONE_YET
def xmlrpc_healthResource(self, request):
request.setResponseCode(http.OK)
request.do_not_log = True
return self.transport.get_health_response()
| from twisted.internet import reactor
from twisted.web import xmlrpc, server
from vumi import log
from vumi.transports.base import Transport
from vumi.config import ConfigText
class MTNRwandaUSSDTransportConfig(Transport.CONFIG_CLASS):
"""
MTN Rwanda USSD transport configuration.
"""
# TODO: Configure elements described in chapter 8 of the spec
class MTNRwandaUSSDTransport(Transport):
"""
"""
transport_type = 'ussd'
xmlrpc_server = None
CONFIG_CLASS = MTNRwandaUSSDTransportConfig
def validate_config(self):
# Hard-coded for now.
# TODO: self.config.get()
self.port = 7080
@inlineCallbacks
def setup_transport(self):
"""
Transport specific setup - it initiates things, sets up a
connection, for example.
:self.xmlrpc_server: An IListeningPort instance.
"""
r = MTNRwandaXMLRPCResource()
factory = server.Site(r)
self.xmlrpc_server = yield reactor.listenTCP(self.port, factory)
@inlineCallbacks
def teardown_transport(self):
"""
Clean-up of setup done in setup_transport.
"""
if self.xmlrpc_server is not None:
yield self.xmlrpc_server.stopListening()
def handle_outbound_message(self, message):
"""
Read outbound message and do what needs to be done with them.
"""
def handle_raw_inbound_message(self):
"""
Called by the XML-RPC server when it receives a payload that
needs processing.
"""
class MTNRwandaXMLRPCResource(xmlrpc.XMLRPC):
"""
A Resource object implementing XML-RPC, can be published using
twisted.web.server.Site.
"""
def __init__(self, transport):
xmlrpc.XMLRPC.__init__(self)
| bsd-3-clause | Python |
9ebd20e98bc196885885fd8cdfc121f170fd0fa2 | make command_line.py actually work as a standalone wrapper | MatthewCox/colour-valgrind | colourvalgrind/command_line.py | colourvalgrind/command_line.py | #!/usr/bin/env python
from colourvalgrind import colour_valgrind
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-t", "--test",
help="valgrind log file to run through colour filters",
default=None)
args, valgrind_args = parser.parse_known_args()
if args.test:
with open(args.test) as f:
for line in f:
print(colour_valgrind(line))
else:
cmd = ['valgrind']
cmd.extend(valgrind_args)
s = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(s.stdout.readline, b''):
print(colour_valgrind(line.rstrip('\n')))
if __name__ == "__main__":
main()
| #!/usr/bin/env python
from colourvalgrind import colour_valgrind
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input",
help="valgrind log file to run through colour filters",
required=True)
args = parser.parse_args()
with open(args.input) as f:
for line in f:
print(colour_valgrind(line))
if __name__ == "__main__":
main()
| mit | Python |
f081cddddc4bc92775afb152aba22b2b5245a164 | fix errors reported by http://validator.w3.org/ | ellson/graphviz-web-static,MjAbuz/graphviz-web-static,ellson/graphviz-web-static,MjAbuz/graphviz-web-static,MjAbuz/graphviz-web-static,ellson/graphviz-web-static | ht2html.py | ht2html.py | #!/usr/bin/python
import sys
if len(sys.argv) < 2:
exit
pageset = sys.argv[1].split()
source = sys.argv[2]
basename = source.split('.')[0]
fout = open(basename + '.html', 'w')
fout.write('''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<!--
This is a generated document. Please edit "''' + basename + '''.ht" instead
and then type "make".
-->
<html>
<head>
<title>GraphViz</title>
</head>
<body bgcolor="white">
<table cellspacing="20">
<tr><td>
<!-- icon -->
<img src="doc.png" alt="">
</td><td>
<!-- header -->
<h2>GraphViz - Graph Drawing Tools</h2>
<p>
<h1>''' + basename + '''</h1>
</td></tr>
<tr><td valign="top">
<!-- menu -->
\t<table bgcolor="#c0c0ff">\n''')
for page in pageset:
menuitem = page.split('.')[0]
if len(menuitem.split('_')) > 1:
menuname = menuitem.split('_')[1]
indent = ' '
else:
menuname = menuitem
indent = ''
if basename == menuitem:
fout.write('\t<tr><td bgcolor="#c0ffc0">' + indent + menuname + '</td></tr>\n')
else:
fout.write('\t<tr><td>' + indent + '<a href="' + menuitem + '.html">' + menuname + '</a></td></tr>\n')
fout.write('''\t</table>
</td><td valign="top">
<!-- body -->\n''')
fin = open(source, 'r')
fout.write(fin.read())
fin.close
fout.write('''</td></tr>
</table>
</body>
</html>\n''')
fout.close
| #!/usr/bin/python
import sys
if len(sys.argv) < 2:
exit
pageset = sys.argv[1].split()
source = sys.argv[2]
basename = source.split('.')[0]
fout = open(basename + '.html', 'w')
fout.write('''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<!--
This is a generated document. Please edit "''' + basename + '''.ht" instead
and then type "make".
-->
<html>
<head>
<title>GraphViz</title>
</head>
<body bgcolor="white">
<table cellspacing="20">
<tr><td>
<!-- icon -->
<img src="doc.png" alt="">
</td><td>
<!-- header -->
<h2>GraphViz - Graph Drawing Tools</h1>
<p>
<h1>''' + basename + '''</h1>
</td></tr>
<tr><td valign="top">
<!-- menu -->
\t<table bgcolor="#c0c0ff">\n''')
for page in pageset:
menuitem = page.split('.')[0]
if len(menuitem.split('_')) > 1:
menuname = menuitem.split('_')[1]
indent = ' '
else:
menuname = menuitem
indent = ''
if basename == menuitem:
fout.write('\t<tr><td bgcolor="#c0ffc0">' + indent + menuname + '</td></tr>\n')
else:
fout.write('\t<tr><td>' + indent + '<a href="' + menuitem + '.html">' + menuname + '</a></td></tr>\n')
fout.write('''\t</table>
</td><td valign="top">
<!-- body -->\n''')
fin = open(source, 'r')
fout.write(fin.read())
fin.close
fout.write('''</td></tr>
</table>
</body>
</html>\n''')
fout.close
| epl-1.0 | Python |
590e27f30cb58da535b8cd6bc0e9e48011409a18 | Add reference to actual bug | tmobile/jazz-installer,tmobile/jazz-installer,tmobile/jazz-installer,tmobile/jazz-installer | feature-extensions/apigee/terraformBugWorkaround.py | feature-extensions/apigee/terraformBugWorkaround.py | import json
import subprocess
# Exists because of:
# https://github.com/terraform-providers/terraform-provider-aws/issues/5742
# and can be nuked if that is ever fixed.
terraformNewGatewayRoleOutput = "apigee-lambda-gateway-role-arn"
terraformPreviousRoleOutput = "previous-role-arn"
# Replaces function's role with one created by Terraform
def linkNewRoleToExistingFunctionWithCLI(functionName):
updateFunctionRole(functionName, getRoleArnFromTerraform())
# Restores function's original role to what it was before
def restoreOldRoleToExistingFunctionWithCLI(functionName):
updateFunctionRole(functionName, getPreviousRoleArnFromTerraform())
def getFunctionArn(gatewayFuncName):
return getFunctionConfig(gatewayFuncName)['FunctionArn']
def getFunctionRole(gatewayFuncName):
return getFunctionConfig(gatewayFuncName)['Role']
def getRoleArnFromTerraform():
return getTerraformOutput(terraformNewGatewayRoleOutput)
def getPreviousRoleArnFromTerraform():
return getTerraformOutput(terraformPreviousRoleOutput)
def getTerraformOutput(outputVarName):
return subprocess.check_output(
[
'terraform',
'output',
outputVarName
],
cwd='./terraform').rstrip()
def updateFunctionRole(functionName, roleArn):
subprocess.check_call([
'aws',
'lambda',
'update-function-configuration',
'--function-name',
functionName,
'--role',
str(roleArn)
])
def getFunctionConfig(gatewayFuncName):
return json.loads(subprocess.check_output([
'aws',
'lambda',
'get-function-configuration',
'--function-name',
gatewayFuncName,
'--output',
'json'
]).rstrip())
| import json
import subprocess
terraformNewGatewayRoleOutput = "apigee-lambda-gateway-role-arn"
terraformPreviousRoleOutput = "previous-role-arn"
# Replaces function's role with one created by Terraform
def linkNewRoleToExistingFunctionWithCLI(functionName):
updateFunctionRole(functionName, getRoleArnFromTerraform())
# Restores function's original role to what it was before
def restoreOldRoleToExistingFunctionWithCLI(functionName):
updateFunctionRole(functionName, getPreviousRoleArnFromTerraform())
def getFunctionArn(gatewayFuncName):
return getFunctionConfig(gatewayFuncName)['FunctionArn']
def getFunctionRole(gatewayFuncName):
return getFunctionConfig(gatewayFuncName)['Role']
def getRoleArnFromTerraform():
return getTerraformOutput(terraformNewGatewayRoleOutput)
def getPreviousRoleArnFromTerraform():
return getTerraformOutput(terraformPreviousRoleOutput)
def getTerraformOutput(outputVarName):
return subprocess.check_output(
[
'terraform',
'output',
outputVarName
],
cwd='./terraform').rstrip()
def updateFunctionRole(functionName, roleArn):
subprocess.check_call([
'aws',
'lambda',
'update-function-configuration',
'--function-name',
functionName,
'--role',
str(roleArn)
])
def getFunctionConfig(gatewayFuncName):
return json.loads(subprocess.check_output([
'aws',
'lambda',
'get-function-configuration',
'--function-name',
gatewayFuncName,
'--output',
'json'
]).rstrip())
| apache-2.0 | Python |
fd237a2208c419ade371640f947dcc9c70af344c | add get parameters function | shl198/Projects,shl198/Projects,shl198/Projects,shl198/Pipeline,shl198/Pipeline,shl198/Projects,shl198/Pipeline,shl198/Pipeline | Modules/FileProcess.py | Modules/FileProcess.py | import subprocess
def remove(files):
"""
this function can remove files provided
"""
if isinstance(files,str):
subprocess.call('rm {file}'.format(file=files),shell=True)
if isinstance(files,list):
cmd = ''
for f in files:
cmd = cmd + 'rm {file} & '.format(file=f)
subprocess.call(cmd[:-3],shell=True)
def get_parameters(parFile):
"""
This function list all parameters for all pipelines.
And return a dictionary
"""
res = open(parFile)
dic = {}
for line in res:
if line[0].isalpha():
item = line.split('\t')
value = item[1] # combine all values into a single string
if ',' in value:
rg = value.split(',')
rg[-1] = rg[-1][:-1]
dic[item[0]] = rg
else:
dic[item[0]] = item[1][:-1]
else:
continue
if isinstance(dic['readGroup'],str):
dic['readGroup'] = [dic['readGroup'][:-1]]
return dic
| import subprocess
def remove(files):
"""
this function can remove files provided
"""
if isinstance(files,str):
subprocess.call('rm {file}'.format(file=files),shell=True)
if isinstance(files,list):
cmd = ''
for f in files:
cmd = cmd + 'rm {file} & '.format(file=f)
subprocess.call(cmd[:-2],shell=True)
| mit | Python |
f431943bf8f94f8fe5229c8c55e9d341bb417015 | fix for issue #11 | arq5x/poretools,arq5x/poretools | poretools/formats.py | poretools/formats.py | class Fastq(object):
def __init__(self, s):
self.s = s
self.parse()
def parse(self):
(self.name, self.seq, self.sep, self.qual) = self.s.strip().split('\n')
def __repr__(self):
return '\n'.join([self.name, self.seq, self.sep, self.qual])
class Fasta(object):
def __init__(self, s):
self.s = s
self.parse()
def parse(self):
(self.name, self.seq, self.sep, self.qual) = self.s.strip().split('\n')
self.name = self.name.lstrip('@')
def __repr__(self):
return '\n'.join(['>'+self.name, self.seq]) | class Fastq(object):
def __init__(self, s):
self.s = s
self.parse()
def parse(self):
(self.name, self.seq, self.sep, self.qual) = self.s.strip().split('\n')
def __repr__(self):
return '\n'.join([self.name, self.seq, self.sep, self.qual])
class Fasta(object):
def __init__(self, s):
self.s = s
self.parse()
def parse(self):
(self.name, self.seq, self.sep, self.qual) = self.s.strip().split('\n')
def __repr__(self):
return '\n'.join(['>'+self.name, self.seq]) | mit | Python |
f591fa8d337db4487d4485f943aef67c97e49b30 | Fix about.py | Akuli/the-simple-noteprogram,Akuli/the-simple-noteprogram | install-prefix/lib/the-simple-noteprogram/the_simple_noteprogram/about.py | install-prefix/lib/the-simple-noteprogram/the_simple_noteprogram/about.py | """An about dialog and information about this program"""
from gettext import gettext as _
# Information about authors, add your name here if you've helped with
# making this program but your name is not here yet
AUTHORS = ["Akuli"]
TRANSLATORS = {
_("Finnish"): "Akuli",
}
# General information
SHORT_DESCRIPTION = "Simple GTK+ 3 application for taking notes"
LONG_DESCRIPTION = "This is a simple note-taking program written in \
Python 3 with GTK+ 3 aimed at GNU/Linux users that displays a note \
icon in the system tray. The tray icon can be clicked and notes with a \
title and a description can be easily made. The notes are always saved \
automatically."
VERSION = '1.0'
KEYWORDS = ["notes", "Gtk+ 3"]
def about(*ign):
"""Shows an about dialog"""
# This is not a module-level import because that way this file can
# be used without having Gtk installed
from gi.repository import Gtk, GdkPixbuf
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(
Gtk.IconTheme.get_default().lookup_icon(
'the-simple-noteprogram', 48,
Gtk.IconLookupFlags.NO_SVG,
).get_filename(),
48, 48,
)
# This may result in a warning about setting a transient parent but
# the application doesn't have any kind of main window to set as the
# parent
dialog = Gtk.AboutDialog(
program_name="The Simple Noteprogram",
version=VERSION,
comments=SHORT_DESCRIPTION,
license_type=Gtk.License.MIT_X11,
authors=AUTHORS,
logo=pixbuf,
translator_credits="\n".join(
": ".join(item) for item in TRANSLATORS.items()
),
)
dialog.run()
dialog.destroy()
| """An about dialog and information about this program"""
from gettext import gettext as _
# Information about authors, add your name here if you've helped with
# making this program but your name is not here yet
AUTHORS = ["Akuli"]
TRANSLATORS = {
_("Finnish"): "Akuli",
}
# General information
SHORT_DESCRIPTION = "Simple GTK+ 3 application for taking notes"
LONG_DESCRIPTION = "This is a simple note-taking program written in \
Python 3 with GTK+ 3 aimed at GNU/Linux users that displays a note \
icon in the system tray. The tray icon can be clicked and notes with a \
title and a description can be easily made. The notes are always saved \
automatically."
VERSION = '1.0'
KEYWORDS = ["notes", "Gtk+ 3"]
def about(*ign):
"""Shows an about dialog"""
# This is not a module-level import because that way this file can
# be used without having Gtk installed
from gi.repository import Gtk, GdkPixbuf
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(
Gtk.IconTheme.get_default().lookup_icon(
DASHES,
48,
Gtk.IconLookupFlags.NO_SVG,
).get_filename(),
48,
48,
)
# This may result in a warning about setting a transient parent but
# the application doesn't have any kind of main window to set as the
# parent
dialog = Gtk.AboutDialog(
program_name="The Simple Noteprogram",
version=VERSION,
comments=SHORT_DESCRIPTION,
license_type=Gtk.License.MIT_X11,
authors=AUTHORS,
logo=pixbuf,
translator_credits="\n".join(
": ".join(item) for item in TRANSLATORS.items()
),
)
dialog.run()
dialog.destroy()
| mit | Python |
2d92120b292f23bcd3c927e50d2be4bf27616722 | fix mistake in template tag | saippuakauppias/django-simple-open-graph | simple_open_graph/templatetags/simple_open_graph.py | simple_open_graph/templatetags/simple_open_graph.py | from django import template
from ..utils import string_to_dict
register = template.Library()
@register.tag
def opengraph_meta(parser, token):
try:
tag_name, properties = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires two arguments" % token.contents.split()[0]
)
properties = string_to_dict(properties[1:-1])
return OpenGraphNode(properties)
class OpenGraphNode(template.Node):
def __init__(self, properties):
self.properties = properties
def render(self, context):
og_layout = u'<meta property="og:{0}" content="{1}" />'
result_list = []
for key, value in self.properties.items():
value = template.Variable(value).resolve(context)
value = value.replace('"', ' ')
key = key.replace('"', '')
og_formatted = og_layout.format(key, value)
result_list.append(og_formatted)
return u'\n'.join(result_list)
| from django import template
from ..utils import string_to_dict
register = template.Library()
@register.tag
def opengraph_meta(parser, token):
try:
tag_name, properties = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires two arguments" % token.contents.split()[0]
)
properties = string_to_dict(properties[1:-1])
return OpenGraphNode(properties)
class OpenGraphNode(template.Node):
def __init__(self, properties):
self.properties = properties
def render(self, context):
og_layout = u'<meta property="og:{0}" content="{1}" />'
result_list = []
for key, value in self.properties.items():
value = template.Variable(value).resolve(context)
value = value.replace('"', ' ')
key = value.replace('"', '')
og_formatted = og_layout.format(key, value)
result_list.append(og_formatted)
return u'\n'.join(result_list)
| isc | Python |
f2fd7fa693b5be7ae37445fc185611e80aacddf3 | Fix bug to actually find the sdk | pebble/libpebble,pebble/libpebble,pebble/libpebble,pebble/libpebble | pebble/PblBuildCommand.py | pebble/PblBuildCommand.py | import sh, os
from PblCommand import PblCommand
class PblBuildCommand(PblCommand):
name = 'build'
help = 'Build your Pebble project'
def configure_subparser(self, parser):
parser.add_argument('--sdk', help='Path to Pebble SDK (ie: ~/pebble-dev/PebbleSDK-2.X/)')
def run(self, args):
waf_path = os.path.join(os.path.join(self.sdk_path(args), 'Pebble'), 'waf')
print "Path to waf: {}".format(waf_path)
os.system(waf_path + " configure build")
def sdk_path(self, args):
"""
Tries to guess the location of the Pebble SDK
"""
if args.sdk:
return args.sdk
else:
return os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..'))
| import sh, os
from PblCommand import PblCommand
class PblBuildCommand(PblCommand):
name = 'build'
help = 'Build your Pebble project'
def configure_subparser(self, parser):
parser.add_argument('--sdk', help='Path to Pebble SDK (ie: ~/pebble-dev/PebbleSDK-2.X/)')
def run(self, args):
waf_path = os.path.join(os.path.join(self.sdk_path(args), 'Pebble'), 'waf')
print "Path to waf: {}".format(waf_path)
os.system(waf_path + " configure build")
def sdk_path(self, args):
"""
Tries to guess the location of the Pebble SDK
"""
if args.sdk:
return args.sdk
else:
return os.path.normpath(os.path.join(os.path.dirname(__file__), os.path.join('..', '..', '..')))
| mit | Python |
09450c14e4d3cdfb645e1284eaf80350204a6f9e | fix warning | galaxy-genome-annotation/python-apollo,erasche/python-apollo,galaxy-genome-annotation/python-apollo | arrow/config.py | arrow/config.py | from __future__ import absolute_import
import os
import yaml
DEFAULT_CONFIG = {
}
def global_config_path():
config_path = os.environ.get(
"ARROW_GLOBAL_CONFIG_PATH",
"~/.apollo-arrow.yml"
)
config_path = os.path.expanduser(config_path)
return config_path
def read_global_config():
config_path = global_config_path()
if not os.path.exists(config_path):
return DEFAULT_CONFIG
with open(config_path) as f:
return yaml.safe_load(f)
| from __future__ import absolute_import
import os
import yaml
DEFAULT_CONFIG = {
}
def global_config_path():
config_path = os.environ.get(
"ARROW_GLOBAL_CONFIG_PATH",
"~/.apollo-arrow.yml"
)
config_path = os.path.expanduser(config_path)
return config_path
def read_global_config():
config_path = global_config_path()
if not os.path.exists(config_path):
return DEFAULT_CONFIG
with open(config_path) as f:
return yaml.load(f, Loader=yaml.safe_load)
| mit | Python |
ade326e47be07abb634c61632aa913b641780a59 | fix the field names in serializers | jonboiser/content-curation,jayoshih/content-curation,DXCanas/content-curation,fle-internal/content-curation,jayoshih/content-curation,fle-internal/content-curation,DXCanas/content-curation,aronasorman/content-curation,jonboiser/content-curation,fle-internal/content-curation,DXCanas/content-curation,jonboiser/content-curation,aronasorman/content-curation,aronasorman/content-curation,jayoshih/content-curation,DXCanas/content-curation,jayoshih/content-curation,fle-internal/content-curation,jonboiser/content-curation | contentcuration/contentcuration/serializers.py | contentcuration/contentcuration/serializers.py | from contentcuration.models import * # TODO: Change this later?
from rest_framework import serializers
from rest_framework_bulk import BulkListSerializer, BulkSerializerMixin
class LicenseSerializer(serializers.ModelSerializer):
class Meta:
model = License
fields = ('license_name', 'exists', 'id')
class ChannelSerializer(serializers.ModelSerializer):
class Meta:
model = Channel
fields = ('name', 'description', 'editors', 'id', 'draft', 'clipboard', 'deleted', 'published','channel_id')
class TopicTreeSerializer(serializers.ModelSerializer):
class Meta:
model = TopicTree
fields = ('name', 'channel', 'root_node', 'id')
class FileSerializer(serializers.ModelSerializer):
content_copy = serializers.FileField(use_url=False)
def get(*args, **kwargs):
return super.get(*args, **kwargs)
class Meta:
model = File
fields = ('id', 'checksum', 'file_size', 'content_copy', 'contentmetadata', 'file_format', 'preset', 'lang')
class FormatPresetSerializer(serializers.ModelSerializer):
# files = FileSerializer(many=True, read_only=True)
class Meta:
model = FormatPreset
fields = ('id', 'readable_name', 'multi_language', 'supplementary', 'order', 'kind', 'allowed_formats')
class ContentNodeSerializer(BulkSerializerMixin, serializers.ModelSerializer):
children = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
preset = FormatPresetSerializer(many=True, read_only=True)
class Meta:
model = ContentNode
fields = ('title', 'published', 'total_file_size', 'id', 'description', 'published',
'sort_order', 'license_owner', 'license', 'kind', 'children', 'parent', 'content_id',
'preset', 'original_filename')
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = ContentTag
fields = ('tag_name', 'tag_type', 'id')
class ExerciseSerializer(serializers.ModelSerializer):
class Meta:
model = Exercise
fields = ('title', 'description', 'id')
class AssessmentItemSerializer(BulkSerializerMixin, serializers.ModelSerializer):
exercise = serializers.PrimaryKeyRelatedField(queryset=Exercise.objects.all())
class Meta:
model = AssessmentItem
fields = ('question', 'type', 'answers', 'id', 'exercise')
list_serializer_class = BulkListSerializer
| from contentcuration.models import * # TODO: Change this later?
from rest_framework import serializers
from rest_framework_bulk import BulkListSerializer, BulkSerializerMixin
class LicenseSerializer(serializers.ModelSerializer):
class Meta:
model = License
fields = ('license_name', 'exists', 'id')
class ChannelSerializer(serializers.ModelSerializer):
class Meta:
model = Channel
fields = ('name', 'description', 'editors', 'id', 'draft', 'clipboard', 'deleted', 'published','channel_id')
class TopicTreeSerializer(serializers.ModelSerializer):
class Meta:
model = TopicTree
fields = ('name', 'channel', 'root_node', 'id')
class FileSerializer(serializers.ModelSerializer):
content_copy = serializers.FileField(use_url=False)
def get(*args, **kwargs):
return super.get(*args, **kwargs)
class Meta:
model = File
fields = ('checksum', 'extension', 'file_size', 'content_copy', 'id', 'available', 'format')
class FormatPresetSerializer(serializers.ModelSerializer):
# files = FileSerializer(many=True, read_only=True)
class Meta:
model = Format
fields = ('id', 'readable_name', 'multi_language', 'supplementary', 'order', 'kind', 'allowed_formats')
class ContentNodeSerializer(BulkSerializerMixin, serializers.ModelSerializer):
children = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
preset = FormatPresetSerializer(many=True, read_only=True)
class Meta:
model = ContentNode
fields = ('title', 'published', 'total_file_size', 'id', 'description', 'published',
'sort_order', 'license_owner', 'license', 'kind', 'children', 'parent', 'content_id',
'preset', 'original_filename')
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = ContentTag
fields = ('tag_name', 'tag_type', 'id')
class ExerciseSerializer(serializers.ModelSerializer):
class Meta:
model = Exercise
fields = ('title', 'description', 'id')
class AssessmentItemSerializer(BulkSerializerMixin, serializers.ModelSerializer):
exercise = serializers.PrimaryKeyRelatedField(queryset=Exercise.objects.all())
class Meta:
model = AssessmentItem
fields = ('question', 'type', 'answers', 'id', 'exercise')
list_serializer_class = BulkListSerializer
| mit | Python |
c961b2bde6f5ca13888d2eeae2ba0bdaf8e494f9 | Fix homepage migration | ljean/coop_cms,ljean/coop_cms,ljean/coop_cms | coop_cms/migrations/0004_auto_20160620_1310.py | coop_cms/migrations/0004_auto_20160620_1310.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.core.urlresolvers import reverse
def set_homepage(apps, schema_editor):
# Move from homepage_for_site to SiteSettings
site_class = apps.get_model("sites", "Site")
for site in site_class.objects.all():
try:
homepage_article = site.homepage_article.all()[0]
except (models.ObjectDoesNotExist, IndexError):
homepage_article = None
if homepage_article:
site_settings_class = apps.get_model("coop_cms", "SiteSettings")
site_settings = site_settings_class.objects.get_or_create(site=site)[0]
if not site_settings.homepage_url:
homepage_url = reverse('coop_cms_view_article', args=[homepage_article.slug])
site_settings.homepage_url = homepage_url
site_settings.save()
class Migration(migrations.Migration):
dependencies = [
('coop_cms', '0003_auto_20160204_1540'),
]
operations = [
migrations.RunPython(set_homepage),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def set_homepage(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
site_class = apps.get_model("sites", "Site")
for site in site_class.objects.all():
try:
homepage_article = site.homepage_article
except models.ObjectDoesNotExist:
homepage_article = None
if homepage_article:
site_settings_class = apps.get_model("coop_cms", "SiteSettings")
site_settings = site_settings_class.objects.get_or_create(site=site)[0]
site_settings.homepage_url = homepage_article.get_absolute_url()
site_settings.save()
class Migration(migrations.Migration):
dependencies = [
('coop_cms', '0003_auto_20160204_1540'),
]
operations = [
migrations.RunPython(set_homepage),
]
| bsd-3-clause | Python |
e8a28f4bda8092e8b8ad3ee8ee0bfaac200e3334 | Update plot_regress_continuous.py | cjayb/mne-python,olafhauk/mne-python,antiface/mne-python,cjayb/mne-python,larsoner/mne-python,larsoner/mne-python,bloyl/mne-python,alexandrebarachant/mne-python,drammock/mne-python,mne-tools/mne-python,wmvanvliet/mne-python,drammock/mne-python,mne-tools/mne-python,adykstra/mne-python,olafhauk/mne-python,leggitta/mne-python,yousrabk/mne-python,kingjr/mne-python,Teekuningas/mne-python,Teekuningas/mne-python,dimkal/mne-python,yousrabk/mne-python,pravsripad/mne-python,jaeilepp/mne-python,rkmaddox/mne-python,kambysese/mne-python,mne-tools/mne-python,drammock/mne-python,wronk/mne-python,cmoutard/mne-python,ARudiuk/mne-python,teonlamont/mne-python,rkmaddox/mne-python,Teekuningas/mne-python,jaeilepp/mne-python,alexandrebarachant/mne-python,wmvanvliet/mne-python,kingjr/mne-python,teonlamont/mne-python,trachelr/mne-python,jmontoyam/mne-python,jniediek/mne-python,olafhauk/mne-python,Eric89GXL/mne-python,larsoner/mne-python,nicproulx/mne-python,cmoutard/mne-python,Eric89GXL/mne-python,adykstra/mne-python,lorenzo-desantis/mne-python,kambysese/mne-python,dimkal/mne-python,pravsripad/mne-python,trachelr/mne-python,kingjr/mne-python,leggitta/mne-python,wronk/mne-python,jmontoyam/mne-python,lorenzo-desantis/mne-python,pravsripad/mne-python,jniediek/mne-python,ARudiuk/mne-python,nicproulx/mne-python,wmvanvliet/mne-python,andyh616/mne-python,bloyl/mne-python,antiface/mne-python,andyh616/mne-python | examples/stats/plot_regress_continuous.py | examples/stats/plot_regress_continuous.py | """
=========================================
Regression on continuous data (rER[P/F])
=========================================
This demonstrates how rERPs/regressing the continuous data is a
generalisation of traditional averaging. If all preprocessing steps
are the same and if no overlap between epochs exists and if all
predictors are binary, regression is virtually identical to traditional
averaging.
If overlap exists and/or predictors are continuous, traditional averaging
is inapplicable, but regression can estimate, including those of
continuous predictors.
References
-------------
See Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
Authors: Jona Sassenhagen <jona.sassenhagen@gmail.de>
License: BSD (3-clause)
"""
import numpy as np
import mne
from mne.datasets import spm_face
from mne.stats.regression import linear_regression_raw
mne.set_log_level(False)
# Preprocess data
data_path = spm_face.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
raw = mne.io.Raw(raw_fname % 1, preload=True) # Take first run
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 45, method='iir')
events = mne.find_events(raw, stim_channel='UPPT001')
event_id = {"faces": 1, "scrambled": 2}
tmin, tmax = -.1, .5
raw.pick_types(meg=True)
# regular epoching
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None,
baseline=None, preload=True, verbose=False)
# rERF
evoked_dict = linear_regression_raw(raw, events=events, event_id=event_id,
reject=False, tmin=tmin, tmax=tmax)
# plot both results
cond = "faces"
print("traditional ERF:")
epochs[cond].average().plot()
print("rERF:")
evoked_dict[cond].plot()
# check if results are virtually identical
# (as they should be, in the case of no overlap)
print("Are the two methods virtually identical? ",
np.allclose(epochs[cond].average().data * 1e+15,
evoked_dict[cond].data * 1e+15))
| """
=========================================
Regression on continuous data (rER[P/F])
=========================================
This demonstrates how rERPs/regressing the continuous data is a
generalisation of traditional averaging. If all preprocessing steps
are the same and if no overlap between epochs exists and if all
predictors are binary, regression is virtually identical to traditional
averaging.
If overlap exists and/or predictors are continuous, traditional averaging
is inapplicable, but regression can estimate, including those of
continuous predictors.
References
-------------
See Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
Authors: Jona Sassenhagen <jona.sassenhagen@gmail.de>
License: BSD (3-clause)
"""
import numpy as np
import mne
from mne.datasets import spm_face
from mne.stats.regression import linear_regression_raw
mne.set_log_level(False)
# Preprocess data
data_path = spm_face.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
raw = mne.io.Raw(raw_fname % 1, preload=True) # Take first run
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 45, method='iir')
events = mne.find_events(raw, stim_channel='UPPT001')
event_id = {"faces": 1, "scrambled": 2}
tmin, tmax = -.1, .5
raw.pick_types(meg=True)
# regular epoching
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None,
baseline=None, preload=True, verbose=False)
# rERF
evoked_dict = linear_regression_raw(raw, events=events, event_id=event_id,
reject=False, tmin=tmin, tmax=tmax)
# plot both results
cond = "faces"
print("traditional ERF:")
epochs[cond].average().plot()
print("rERF:")
evoked_dict[cond].plot()
# check if results are virtually identical
# (as they should be, in the case of no overlap)
print("Are the two methods virtually identical? ",
np.allclose(epochs[cond].average().data * 1e+15,
evoked_dict[cond].data * 1e+15))
| bsd-3-clause | Python |
b3f9583047c93aa0c07529a53c1bec6ec3820196 | Make image.Image class abstract | alvarolopez/atrope | atrope/image.py | atrope/image.py | # -*- coding: utf-8 -*-
# Copyright 2014 Alvaro Lopez Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os.path
import requests
from atrope import exception
class BaseImage(object):
__metaclass__ = abc.ABCMeta
uri = sha512 = identifier = None
@abc.abstractmethod
def __init__(self, image_info):
pass
@abc.abstractmethod
def download(self, dest):
"""
Download the image.
:param dest: destionation directory.
"""
class VMCasterImage(BaseImage):
# TODO(aloga): are all of this really required?
required_fields = (
"ad:group",
"ad:mpuri",
"ad:user:fullname",
"ad:user:guid",
"ad:user:uri",
"dc:description",
"dc:identifier",
"dc:title",
"hv:hypervisor",
"hv:format",
"hv:size",
"hv:uri",
"hv:version",
"sl:arch",
"sl:checksum:sha512",
"sl:comments",
"sl:os",
"sl:osname",
"sl:osversion",
)
def __init__(self, image_info):
super(VMCasterImage, self).__init__(image_info)
image_dict = image_info.get("hv:image", {})
keys = image_dict.keys()
if not all(i in keys for i in self.required_fields):
raise exception.InvalidImageList(
reason="Invalid image definition."
)
self.uri = image_dict.get("hv:uri")
self.sha512 = image_dict.get("sl:checksum:sha512")
self.identifier = image_dict.get("dc:identifier")
def download(self, basedir):
dest = os.path.join(basedir, self.identifier)
with open(dest, 'wb') as f:
response = requests.get(self.uri, stream=True)
if not response.ok:
# FIXME(aloga)
pass
for block in response.iter_content(1024):
if block:
f.write(block)
f.flush()
| # -*- coding: utf-8 -*-
# Copyright 2014 Alvaro Lopez Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import requests
from atrope import exception
class BaseImage(object):
pass
class VMCasterImage(BaseImage):
# TODO(aloga): are all of this really required?
required_fields = (
"ad:group",
"ad:mpuri",
"ad:user:fullname",
"ad:user:guid",
"ad:user:uri",
"dc:description",
"dc:identifier",
"dc:title",
"hv:hypervisor",
"hv:format",
"hv:size",
"hv:uri",
"hv:version",
"sl:arch",
"sl:checksum:sha512",
"sl:comments",
"sl:os",
"sl:osname",
"sl:osversion",
)
def __init__(self, image_dict):
image_dict = image_dict.get("hv:image", {})
keys = image_dict.keys()
if not all(i in keys for i in self.required_fields):
raise exception.InvalidImageList(
reason="Invalid image definition."
)
self.uri = image_dict.get("hv:uri")
self.sha512 = image_dict.get("sl:checksum:sha512")
self.identifier = image_dict.get("dc:identifier")
def download(self, basedir):
dest = os.path.join(basedir, self.identifier)
with open(dest, 'wb') as f:
response = requests.get(self.uri, stream=True)
if not response.ok:
# FIXME(aloga)
pass
for block in response.iter_content(1024):
if block:
f.write(block)
f.flush()
| apache-2.0 | Python |
173ec0e1488cc4697ec8ca949433d7f49a216344 | fix bug | spencerpomme/coconuts-on-fire,spencerpomme/coconuts-on-fire,spencerpomme/coconuts-on-fire,spencerpomme/coconuts-on-fire,spencerpomme/coconuts-on-fire,spencerpomme/coconuts-on-fire | auto_upgrade.py | auto_upgrade.py | #! /usr/local/bin/python3
# can not be used on windows due to line end difference.
import pip
from subprocess import call
for dist in pip.get_installed_distributions():
call("pip3 install --upgrade --no-cache-dir " + dist.project_name, shell=True)
| #! /usr/local/bin/python3
# can not be used on windows due to line end difference.
import pip
from subprocess import call
for dist in pip.get_installed_distributions():
call("pip3 install --upgrade --no-cache-dir" + dist.project_name, shell=True)
| apache-2.0 | Python |
9d401d7f68c3b145f73bd7ec029361e1e85d6483 | Fix import order | thinkopensolutions/server-tools,thinkopensolutions/server-tools | external_file_location/models/__init__.py | external_file_location/models/__init__.py | # -*- coding: utf-8 -*-
from . import task
from . import attachment
from . import location
| # -*- coding: utf-8 -*-
from . import attachment
from . import location
from . import task
| agpl-3.0 | Python |
3309450ca3cc410626c98e7c599817c54f8d30b3 | Remove unused import | faide/py3o.template,faide/py3o.template | py3o/template/tests/test_templates.py | py3o/template/tests/test_templates.py | __author__ = 'faide'
import unittest
from py3o.template.main import Template
import lxml.etree
import pkg_resources
from pyjon.utils import get_secure_filename
import zipfile
from io import BytesIO
import os
from lxml.etree import XMLSyntaxError
class TestTemplate(unittest.TestCase):
def tearDown(self):
pass
def setUp(self):
pass
def test_list_duplicate(self):
"""test duplicated listed get a unique id"""
template_name = pkg_resources.resource_filename(
'py3o.template',
'tests/templates/py3o_list_template.odt'
)
outname = get_secure_filename()
template = Template(template_name, outname)
class Item(object):
def __init__(self, val):
self.val = val
data_dict = {
"items": [Item(1), Item(2), Item(3), Item(4)]
}
error = False
template.set_image_path('logo', pkg_resources.resource_filename(
'py3o.template',
'tests/templates/images/new_logo.png'
))
template.render(data_dict)
outodt = zipfile.ZipFile(outname, 'r')
try:
content_trees = [
lxml.etree.parse(BytesIO(outodt.read(filename)))
for filename in template.templated_files
]
except XMLSyntaxError as e:
error = True
print(
"List is were not deduplicated->{}".format(e)
)
# remove end file
os.unlink(outname)
assert error is False
| __author__ = 'faide'
import unittest
from py3o.template.main import move_siblings
from py3o.template.main import Template
import lxml.etree
import pkg_resources
from pyjon.utils import get_secure_filename
import zipfile
from io import BytesIO
import os
from lxml.etree import XMLSyntaxError
class TestTemplate(unittest.TestCase):
def tearDown(self):
pass
def setUp(self):
pass
def test_list_duplicate(self):
"""test duplicated listed get a unique id"""
template_name = pkg_resources.resource_filename(
'py3o.template',
'tests/templates/py3o_list_template.odt'
)
outname = get_secure_filename()
template = Template(template_name, outname)
class Item(object):
def __init__(self, val):
self.val = val
data_dict = {
"items": [Item(1), Item(2), Item(3), Item(4)]
}
error = False
template.set_image_path('logo', pkg_resources.resource_filename(
'py3o.template',
'tests/templates/images/new_logo.png'
))
template.render(data_dict)
outodt = zipfile.ZipFile(outname, 'r')
try:
content_trees = [
lxml.etree.parse(BytesIO(outodt.read(filename)))
for filename in template.templated_files
]
except XMLSyntaxError as e:
error = True
print(
"List is were not deduplicated->{}".format(e)
)
# remove end file
os.unlink(outname)
assert error is False
| mit | Python |
d133913d11e5384eddc472bda63ca781d3adc532 | Handle basic ISBN | rootulp/exercism,rootulp/exercism,rootulp/exercism,rootulp/exercism,rootulp/exercism,rootulp/exercism,rootulp/exercism,rootulp/exercism | python/isbn-verifier/isbn_verifier.py | python/isbn-verifier/isbn_verifier.py | class IsbnVerifier(object):
def __init__(self, string):
self.string = string
def is_valid(self):
sum_so_far = 0
for i, c in enumerate(IsbnVerifier.remove_slashes(self.string)):
sum_so_far += IsbnVerifier.convert_char_to_int(c) * (10 - i)
return sum_so_far % 11 == 0
@staticmethod
def remove_slashes(string):
return "".join(filter(lambda char: char != "-", string))
@staticmethod
def convert_char_to_int(char):
return int(IsbnVerifier.convert_x_to_ten(char))
@staticmethod
def convert_x_to_ten(char):
return 10 if char == 'X' else char
def verify(isbn):
return IsbnVerifier(isbn).is_valid()
| class IsbnVerifier(object):
def __init__(self, string):
self.string = string
def is_valid(self):
return True
def verify(isbn):
return IsbnVerifier(isbn).is_valid()
| mit | Python |
7a116c18ae8d63a18d646aed899b3f21893501c6 | Update Python 2 deprecation error message | JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp | python/sparknlp/annotator/__init__.py | python/sparknlp/annotator/__init__.py | # Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all available Annotators of Spark NLP and their base
classes.
"""
# New Annotators need to be imported here
from sparknlp.annotator.classifier_dl import *
from sparknlp.annotator.embeddings import *
from sparknlp.annotator.er import *
from sparknlp.annotator.keyword_extraction import *
from sparknlp.annotator.ld_dl import *
from sparknlp.annotator.matcher import *
from sparknlp.annotator.ner import *
from sparknlp.annotator.dependency import *
from sparknlp.annotator.pos import *
from sparknlp.annotator.sentence import *
from sparknlp.annotator.sentiment import *
from sparknlp.annotator.seq2seq import *
from sparknlp.annotator.spell_check import *
from sparknlp.annotator.token import *
from sparknlp.annotator.ws import *
from sparknlp.annotator.chunker import *
from sparknlp.annotator.document_normalizer import *
from sparknlp.annotator.graph_extraction import *
from sparknlp.annotator.lemmatizer import *
from sparknlp.annotator.n_gram_generator import *
from sparknlp.annotator.normalizer import *
from sparknlp.annotator.stemmer import *
from sparknlp.annotator.stop_words_cleaner import *
if sys.version_info[0] == 2:
raise ImportError(
"Spark NLP only supports Python 3.6 and above. "
"Please use Python 3.6 or above that is compatible with both Spark NLP and PySpark"
)
else:
__import__("com.johnsnowlabs.nlp")
annotators = sys.modules[__name__]
pos = sys.modules[__name__]
pos.perceptron = sys.modules[__name__]
ner = sys.modules[__name__]
ner.crf = sys.modules[__name__]
ner.dl = sys.modules[__name__]
regex = sys.modules[__name__]
sbd = sys.modules[__name__]
sbd.pragmatic = sys.modules[__name__]
sda = sys.modules[__name__]
sda.pragmatic = sys.modules[__name__]
sda.vivekn = sys.modules[__name__]
spell = sys.modules[__name__]
spell.norvig = sys.modules[__name__]
spell.symmetric = sys.modules[__name__]
spell.context = sys.modules[__name__]
parser = sys.modules[__name__]
parser.dep = sys.modules[__name__]
parser.typdep = sys.modules[__name__]
embeddings = sys.modules[__name__]
classifier = sys.modules[__name__]
classifier.dl = sys.modules[__name__]
ld = sys.modules[__name__]
ld.dl = sys.modules[__name__]
keyword = sys.modules[__name__]
keyword.yake = sys.modules[__name__]
sentence_detector_dl = sys.modules[__name__]
seq2seq = sys.modules[__name__]
ws = sys.modules[__name__]
er = sys.modules[__name__]
| # Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all available Annotators of Spark NLP and their base
classes.
"""
# New Annotators need to be imported here
from sparknlp.annotator.classifier_dl import *
from sparknlp.annotator.embeddings import *
from sparknlp.annotator.er import *
from sparknlp.annotator.keyword_extraction import *
from sparknlp.annotator.ld_dl import *
from sparknlp.annotator.matcher import *
from sparknlp.annotator.ner import *
from sparknlp.annotator.dependency import *
from sparknlp.annotator.pos import *
from sparknlp.annotator.sentence import *
from sparknlp.annotator.sentiment import *
from sparknlp.annotator.seq2seq import *
from sparknlp.annotator.spell_check import *
from sparknlp.annotator.token import *
from sparknlp.annotator.ws import *
from sparknlp.annotator.chunker import *
from sparknlp.annotator.document_normalizer import *
from sparknlp.annotator.graph_extraction import *
from sparknlp.annotator.lemmatizer import *
from sparknlp.annotator.n_gram_generator import *
from sparknlp.annotator.normalizer import *
from sparknlp.annotator.stemmer import *
from sparknlp.annotator.stop_words_cleaner import *
if sys.version_info[0] == 2:
raise ImportError(
"Spark NLP for Python 2.x is deprecated since version >= 4.0. "
"Please use an older versions to use it with this Python version."
)
else:
__import__("com.johnsnowlabs.nlp")
annotators = sys.modules[__name__]
pos = sys.modules[__name__]
pos.perceptron = sys.modules[__name__]
ner = sys.modules[__name__]
ner.crf = sys.modules[__name__]
ner.dl = sys.modules[__name__]
regex = sys.modules[__name__]
sbd = sys.modules[__name__]
sbd.pragmatic = sys.modules[__name__]
sda = sys.modules[__name__]
sda.pragmatic = sys.modules[__name__]
sda.vivekn = sys.modules[__name__]
spell = sys.modules[__name__]
spell.norvig = sys.modules[__name__]
spell.symmetric = sys.modules[__name__]
spell.context = sys.modules[__name__]
parser = sys.modules[__name__]
parser.dep = sys.modules[__name__]
parser.typdep = sys.modules[__name__]
embeddings = sys.modules[__name__]
classifier = sys.modules[__name__]
classifier.dl = sys.modules[__name__]
ld = sys.modules[__name__]
ld.dl = sys.modules[__name__]
keyword = sys.modules[__name__]
keyword.yake = sys.modules[__name__]
sentence_detector_dl = sys.modules[__name__]
seq2seq = sys.modules[__name__]
ws = sys.modules[__name__]
er = sys.modules[__name__]
| apache-2.0 | Python |
e19b93477ab8d8d7def9561076e69eff43bf2ca6 | Sort the archive box so most recent year is at the top. | grundleborg/mesosphere | mesoblog/boxes.py | mesoblog/boxes.py | # This file contains the functions for MesoBoxes
# See mesobox/boxes.py for the enabling code.
from mesoblog.models import Article, Category
import calendar
import functools
BOX_INCLUDES = (
'categories',
'dates',
)
def categories(request):
result = {}
if request.resolver_match.app_name is "mesoblog":
c = Category.objects.all().order_by('name')
result = { "boxes": {"left": ['mesoblog/boxes/category-list.html',]}, "context": {"all_categories": c}}
return result
class ArchiveMonth:
def __init__(self, month):
self.month = month
self.name = calendar.month_name[month]
self.count = 0
self.nameNumber = str(month).zfill(2)
def inc(self):
self.count = self.count + 1
@functools.total_ordering
class ArchiveYear:
def __init__(self, year):
self.year = year
self.name = str(year).zfill(4)
self.months = []
for n in range(1,12):
self.months.append(ArchiveMonth(n))
def __eq__(self, other):
return self.year == other
def __lt__(self, other):
if self.year < other.year:
return True
else:
return False
def dates(request):
result = {}
if request.resolver_match.app_name is "mesoblog":
articles = Article.objects.all()
d = {}
for a in articles:
year = None
for y in d.values():
if y.year == a.date_published.year:
year = y
break
if year is None:
year = ArchiveYear(a.date_published.year)
month = year.months[a.date_published.month-1]
month.inc()
year.months[a.date_published.month-1] = month
d[a.date_published.year] = year
d = sorted(d.values(), reverse=True)
result = { "boxes": {"right": ['mesoblog/boxes/dates-list.html',]}, "context": {"dates": d}}
return result
| # This file contains the functions for MesoBoxes
# See mesobox/boxes.py for the enabling code.
from mesoblog.models import Article, Category
import calendar
BOX_INCLUDES = (
'categories',
'dates',
)
def categories(request):
result = {}
if request.resolver_match.app_name is "mesoblog":
c = Category.objects.all().order_by('name')
result = { "boxes": {"left": ['mesoblog/boxes/category-list.html',]}, "context": {"all_categories": c}}
return result
class ArchiveMonth:
def __init__(self, month):
self.month = month
self.name = calendar.month_name[month]
self.count = 0
self.nameNumber = str(month).zfill(2)
def inc(self):
self.count = self.count + 1
class ArchiveYear:
def __init__(self, year):
self.year = year
self.name = str(year).zfill(4)
self.months = []
for n in range(1,12):
self.months.append(ArchiveMonth(n))
def __eq__(self, other):
return self.year == other
def dates(request):
result = {}
if request.resolver_match.app_name is "mesoblog":
articles = Article.objects.all().order_by("-date_published")
d = {}
for a in articles:
year = None
for y in d.values():
if y.year == a.date_published.year:
year = y
break
if year is None:
year = ArchiveYear(a.date_published.year)
month = year.months[a.date_published.month-1]
month.inc()
year.months[a.date_published.month-1] = month
d[a.date_published.year] = year
d = d.values()
result = { "boxes": {"right": ['mesoblog/boxes/dates-list.html',]}, "context": {"dates": d}}
return result
| mit | Python |
a5753a6665bcddf71a892f47bc393da322e21cce | Remove ugettext import | otto-torino/django-baton,otto-torino/django-baton,otto-torino/django-baton | baton/config.py | baton/config.py | # -*- coding: utf-8 -*-
from django.conf import settings
from django.utils.html import mark_safe
from django.utils.translation import gettext as _
default_config = {
'SITE_TITLE': 'Baton',
'SITE_HEADER': '<img src="%sbaton/img/logo.png" />' % settings.STATIC_URL,
'INDEX_TITLE': _('Site administration'),
'MENU_TITLE': _('Menu'),
'SUPPORT_HREF': 'https://github.com/otto-torino/django-baton/issues',
'COPYRIGHT': 'copyright © 2020 <a href="https://www.otto.to.it">Otto srl</a>', # noqa
'POWERED_BY': '<a href="https://www.otto.to.it">Otto srl</a>',
'CONFIRM_UNSAVED_CHANGES': True,
'SHOW_MULTIPART_UPLOADING': True,
'ENABLE_IMAGES_PREVIEW': True,
'COLLAPSABLE_USER_AREA': False,
'CHANGELIST_FILTERS_IN_MODAL': False,
'CHANGELIST_FILTERS_ALWAYS_OPEN': False,
'CHANGELIST_FILTERS_FORM': False,
'MENU_ALWAYS_COLLAPSED': False,
'MESSAGES_TOASTS': False,
'GRAVATAR_DEFAULT_IMG': 'retro',
'LOGIN_SPLASH': None,
'SEARCH_FIELD': None,
}
def get_config(key):
safe = ['SITE_HEADER', 'COPYRIGHT', 'POWERED_BY', ]
user_settings = getattr(settings, 'BATON', None)
if user_settings is None:
value = default_config.get(key, None)
else:
value = user_settings.get(key, default_config.get(key, None))
if key in safe:
return mark_safe(value)
return value
| # -*- coding: utf-8 -*-
from django.conf import settings
from django.utils.html import mark_safe
from django.utils.translation import ugettext as _
default_config = {
'SITE_TITLE': 'Baton',
'SITE_HEADER': '<img src="%sbaton/img/logo.png" />' % settings.STATIC_URL,
'INDEX_TITLE': _('Site administration'),
'MENU_TITLE': _('Menu'),
'SUPPORT_HREF': 'https://github.com/otto-torino/django-baton/issues',
'COPYRIGHT': 'copyright © 2020 <a href="https://www.otto.to.it">Otto srl</a>', # noqa
'POWERED_BY': '<a href="https://www.otto.to.it">Otto srl</a>',
'CONFIRM_UNSAVED_CHANGES': True,
'SHOW_MULTIPART_UPLOADING': True,
'ENABLE_IMAGES_PREVIEW': True,
'COLLAPSABLE_USER_AREA': False,
'CHANGELIST_FILTERS_IN_MODAL': False,
'CHANGELIST_FILTERS_ALWAYS_OPEN': False,
'CHANGELIST_FILTERS_FORM': False,
'MENU_ALWAYS_COLLAPSED': False,
'MESSAGES_TOASTS': False,
'GRAVATAR_DEFAULT_IMG': 'retro',
'LOGIN_SPLASH': None,
'SEARCH_FIELD': None,
}
def get_config(key):
safe = ['SITE_HEADER', 'COPYRIGHT', 'POWERED_BY', ]
user_settings = getattr(settings, 'BATON', None)
if user_settings is None:
value = default_config.get(key, None)
else:
value = user_settings.get(key, default_config.get(key, None))
if key in safe:
return mark_safe(value)
return value
| mit | Python |
6ccdada25f8b6c0e38955e955efb235aea9945ee | Refactor testing console exporter (#2877) | open-telemetry/opentelemetry-python,open-telemetry/opentelemetry-python | opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py | opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import StringIO
from json import loads
from unittest import TestCase
from opentelemetry.metrics import get_meter, set_meter_provider
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import (
ConsoleMetricExporter,
PeriodicExportingMetricReader,
)
from opentelemetry.test.globals_test import reset_metrics_globals
class TestConsoleExporter(TestCase):
def setUp(self):
reset_metrics_globals()
def tearDown(self):
reset_metrics_globals()
def test_console_exporter(self):
output = StringIO()
exporter = ConsoleMetricExporter(out=output)
reader = PeriodicExportingMetricReader(
exporter, export_interval_millis=100
)
provider = MeterProvider(metric_readers=[reader])
set_meter_provider(provider)
meter = get_meter(__name__)
counter = meter.create_counter(
"name", description="description", unit="unit"
)
counter.add(1, attributes={"a": "b"})
provider.shutdown()
output.seek(0)
result_0 = loads(output.readlines()[0])
self.assertGreater(len(result_0), 0)
metrics = result_0["resource_metrics"][0]["scope_metrics"][0]
self.assertEqual(metrics["scope"]["name"], "test_console_exporter")
metrics = metrics["metrics"][0]
self.assertEqual(metrics["name"], "name")
self.assertEqual(metrics["description"], "description")
self.assertEqual(metrics["unit"], "unit")
metrics = metrics["data"]
self.assertEqual(metrics["aggregation_temporality"], 2)
self.assertTrue(metrics["is_monotonic"])
metrics = metrics["data_points"][0]
self.assertEqual(metrics["attributes"], {"a": "b"})
self.assertEqual(metrics["value"], 1)
| # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from opentelemetry import metrics
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import (
ConsoleMetricExporter,
PeriodicExportingMetricReader,
)
class TestConsoleExporter(TestCase):
def test_console_exporter(self):
try:
exporter = ConsoleMetricExporter()
reader = PeriodicExportingMetricReader(exporter)
provider = MeterProvider(metric_readers=[reader])
metrics.set_meter_provider(provider)
meter = metrics.get_meter(__name__)
counter = meter.create_counter("test")
counter.add(1)
except Exception as error:
self.fail(f"Unexpected exception {error} raised")
| apache-2.0 | Python |
f39006397687ba071e89c3ab6dd16cce33235ca4 | fix the css rules conversion for if-config='' | huubbouma/diazo,huubbouma/diazo,ebrehault/diazo,ebrehault/diazo,huubbouma/diazo,ebrehault/diazo | lib/xdv/cssrules.py | lib/xdv/cssrules.py | #!/usr/bin/env python
"""\
Usage: %prog RULES
RULES is a file defining a set of xdv rules in css syntax, e.g:
<rules xmlns="http://namespaces.plone.org/xdv"
xmlns:css="http://namespaces.plone.org/xdv+css">
<copy css:content="#content-wrapper" css:theme="#page-content"/>
</rules>\
"""
usage = __doc__
from optparse import OptionParser
from lxml import etree
from lxml.cssselect import css_to_xpath
import utils
import logging
logger = logging.getLogger('xdv')
def convert_css_selectors(rules, prefix='//'):
"""Convert css rules to xpath rules element tree in place
"""
#XXX: There is a :root pseudo-class - http://www.w3.org/TR/css3-selectors/#root-pseudo
# We may wish to add support to lxml.cssselect for it some day.
for element in rules.xpath("//@*[namespace-uri()='%s']/.." % utils.namespaces['css']):
for name, value in element.attrib.items():
if name.startswith('{%s}' % utils.namespaces['css']):
if value:
element.attrib[utils.localname(name)] = css_to_xpath(value, prefix=prefix)
else:
element.attrib[utils.fullname(element.nsmap[element.prefix], utils.localname(name))] = ""
def main():
"""Called from console script
"""
parser = OptionParser(usage=usage)
parser.add_option("-o", "--output", metavar="output.html",
help="Output filename (instead of stdout)",
dest="output", default=sys.stdout)
parser.add_option("-p", "--pretty-print", action="store_true",
help="Pretty print output",
dest="pretty_print", default=False)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Invalid number of arguments")
rules = etree.parse(args[0])
convert_css_selectors(rules)
rules.write(options.output, pretty_print=options.pretty_print)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""\
Usage: %prog RULES
RULES is a file defining a set of xdv rules in css syntax, e.g:
<rules xmlns="http://namespaces.plone.org/xdv"
xmlns:css="http://namespaces.plone.org/xdv+css">
<copy css:content="#content-wrapper" css:theme="#page-content"/>
</rules>\
"""
usage = __doc__
from optparse import OptionParser
from lxml import etree
from lxml.cssselect import css_to_xpath
import utils
import logging
logger = logging.getLogger('xdv')
def convert_css_selectors(rules, prefix='//'):
"""Convert css rules to xpath rules element tree in place
"""
#XXX: There is a :root pseudo-class - http://www.w3.org/TR/css3-selectors/#root-pseudo
# We may wish to add support to lxml.cssselect for it some day.
for element in rules.xpath("//@*[namespace-uri()='%s']/.." % utils.namespaces['css']):
for name, value in element.attrib.items():
if value:
if name.startswith('{%s}' % utils.namespaces['css']):
element.attrib[utils.localname(name)] = css_to_xpath(value, prefix=prefix)
else:
element.attrib[utils.fullname(element.nsmap[element.prefix], utils.localname(name))] = ""
def main():
"""Called from console script
"""
parser = OptionParser(usage=usage)
parser.add_option("-o", "--output", metavar="output.html",
help="Output filename (instead of stdout)",
dest="output", default=sys.stdout)
parser.add_option("-p", "--pretty-print", action="store_true",
help="Pretty print output",
dest="pretty_print", default=False)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Invalid number of arguments")
rules = etree.parse(args[0])
convert_css_selectors(rules)
rules.write(options.output, pretty_print=options.pretty_print)
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
e03b3d35858fc70ffbc1441321acd1166470c542 | Format code using black | tensorflow/cloud,tensorflow/cloud | src/python/tensorflow_cloud/core/tests/examples/multi_file_example/scale_model.py | src/python/tensorflow_cloud/core/tests/examples/multi_file_example/scale_model.py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow_cloud as tfc
tfc.run(
entry_point="train_model.py", requirements_txt="requirements.txt", stream_logs=True,
)
| # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow_cloud as tfc
tfc.run(
entry_point="train_model.py",
requirements_txt="requirements.txt",
stream_logs=True,
)
| apache-2.0 | Python |
7ebee43c0461c79d97d3023c0e404298c641c896 | update notebooks | sdpython/pyrsslocal,sdpython/pyrsslocal,sdpython/pyrsslocal | _unittests/ut_module/test_convert_notebooks.py | _unittests/ut_module/test_convert_notebooks.py | """
@brief test log(time=0s)
"""
import sys
import os
import unittest
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from pyquickhelper.filehelper import explore_folder_iterfile
from pyquickhelper.ipythonhelper import upgrade_notebook, remove_execution_number
class TestConvertNotebooks(unittest.TestCase):
def test_convert_notebooks(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fold = os.path.abspath(os.path.dirname(__file__))
fold2 = os.path.normpath(
os.path.join(fold, "..", "..", "_doc", "notebooks"))
for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"):
t = upgrade_notebook(nbf)
if t:
fLOG("modified", nbf)
# remove numbers
remove_execution_number(nbf, nbf)
fold2 = os.path.normpath(os.path.join(fold, "..", "..", "_unittests"))
for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"):
t = upgrade_notebook(nbf)
if t:
fLOG("modified", nbf)
if __name__ == "__main__":
unittest.main()
| """
@brief test log(time=0s)
"""
import sys
import os
import unittest
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from pyquickhelper.filehelper import explore_folder_iterfile
from pyquickhelper.ipythonhelper import upgrade_notebook
class TestConvertNotebooks(unittest.TestCase):
def test_convert_notebooks(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fold = os.path.abspath(os.path.dirname(__file__))
fold2 = os.path.normpath(
os.path.join(fold, "..", "..", "_doc", "notebooks"))
for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"):
t = upgrade_notebook(nbf)
if t:
fLOG("modified", nbf)
fold2 = os.path.normpath(os.path.join(fold, "..", "..", "_unittests"))
for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"):
t = upgrade_notebook(nbf)
if t:
fLOG("modified", nbf)
if __name__ == "__main__":
unittest.main()
| mit | Python |
4f8cf6c7349f7596a19d35e30af3c21491b00d03 | Add docstring | NUinfolab/context,NUinfolab/context,Pantsworth/political-pundits,NUinfolab/context,Pantsworth/political-pundits,Pantsworth/political-pundits,Pantsworth/political-pundits,NUinfolab/context | web/session.py | web/session.py | """
Flask session utilities
"""
import flask
def session_get(key):
return flask.session.get(key)
def session_set(key, value):
flask.session.permanent = True # Safari seems to need this
flask.session[key] = value
def session_pop(key):
if key in flask.session:
flask.session.pop(key)
def session_pop_list(key_list):
for k in key_list:
if k in flask.session:
flask.session.pop(k)
def remove_session_credentials():
session_pop_list(['auth_token', 'auth_token_secret',
'auth_redirect', 'access_token', 'access_token_secret'])
| #
# per-app session management
#
import flask
def session_get(key):
return flask.session.get(key)
def session_set(key, value):
flask.session.permanent = True # Safari seems to need this
flask.session[key] = value
def session_pop(key):
if key in flask.session:
flask.session.pop(key)
def session_pop_list(key_list):
for k in key_list:
if k in flask.session:
flask.session.pop(k)
def remove_session_credentials():
session_pop_list(['auth_token', 'auth_token_secret',
'auth_redirect', 'access_token', 'access_token_secret'])
| mit | Python |
5a21917049397e3414d55a739aa295a9d15322a7 | add comments | pranavj1001/LearnLanguages,pranavj1001/LearnLanguages,pranavj1001/LearnLanguages,pranavj1001/LearnLanguages,pranavj1001/LearnLanguages,pranavj1001/LearnLanguages,pranavj1001/LearnLanguages | python/SearchAlgorithms/SequentialSearch/Sequential.py | python/SearchAlgorithms/SequentialSearch/Sequential.py | # method to search for an element in an unordered list
def sequential_search_unordered_list(array, element):
pos = 0
found = False
while pos < len(array) and not found:
if array[pos] == element:
found = True
else:
pos += 1
return found
unordered_list = [1, 65, 37, 49, 52]
print(sequential_search_unordered_list(unordered_list, 6))
# method to search for an element in an ordered list
# the only difference between the two is that
# we stop when we find an element greater than our search target
def sequential_search_ordered_list(array, element):
pos = 0
found = False
stopped = False
while pos < len(array) and not found and not stopped:
if array[pos] == element:
found = True
else:
if array[pos] > element:
stopped = True
else:
pos += 1
return found
ordered_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(sequential_search_ordered_list(ordered_list, 8))
| def sequential_search_unordered_list(array, element):
pos = 0
found = False
while pos < len(array) and not found:
if array[pos] == element:
found = True
else:
pos += 1
return found
unordered_list = [1, 65, 37, 49, 52]
print(sequential_search_unordered_list(unordered_list, 6))
def sequential_search_ordered_list(array, element):
pos = 0
found = False
stopped = False
while pos < len(array) and not found and not stopped:
if array[pos] == element:
found = True
else:
if array[pos] > element:
stopped = True
else:
pos += 1
return found
ordered_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(sequential_search_ordered_list(ordered_list, 8))
| mit | Python |
eb9b540d97d1538668ccc198717c73fd1f806719 | Fix registration command to comply with virtualenvs. | dgnorth/drift,dgnorth/drift,dgnorth/drift | drift/management/commands/register.py | drift/management/commands/register.py | """
Register or update a deploable.
"""
import sys
import subprocess
from driftconfig.config import TSTransaction
from drift.utils import pretty
from driftconfig.util import register_this_deployable
def get_options(parser):
parser.add_argument(
"--preview", help="Only preview the changes, do not commit to origin.", action="store_true"
)
def run_command(args):
info = get_package_info()
name = info['name']
print "Registering/updating deployable {}:".format(name)
print "Package info:"
print pretty(info)
print ""
# TODO: This is perhaps not ideal, or what?
from drift.flaskfactory import load_flask_config
app_config = load_flask_config()
# Make current dir importable.
sys.path.insert(0, '.')
with TSTransaction(commit_to_origin=not args.preview) as ts:
ret = register_this_deployable(
ts=ts,
package_info=info,
resources=app_config.get("resources", []),
resource_attributes=app_config.get("resource_attributes", {}),
)
orig_row = ret['old_registration']
row = ret['new_registration']
if orig_row is None:
print "New registration entry added:"
print pretty(row)
elif orig_row == row:
print "Current registration unchanged:"
print pretty(row)
else:
print "Updating current registration info:"
print pretty(row)
print "\nPrevious registration info:"
print pretty(orig_row)
if args.preview:
print "Preview changes only, not committing to origin."
_package_classifiers = [
'name',
'version',
'description',
'long-description',
'author',
'author-email',
'license'
]
def get_package_info():
"""
Returns info from current package.
"""
# HACK: Get app root:
from drift.flaskfactory import _find_app_root
app_root = _find_app_root()
p = subprocess.Popen(
['python', 'setup.py'] + ['--' + classifier for classifier in _package_classifiers],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=app_root
)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError(
"Can't get '{}' of this deployable. Error: {} - {}".format(classifier, p.returncode, err)
)
info = dict(zip(_package_classifiers, out.split('\n')))
return info
| """
Register or update a deploable.
"""
import subprocess
from driftconfig.config import TSTransaction
from drift.utils import pretty
from driftconfig.util import register_this_deployable
def get_options(parser):
parser.add_argument(
"--preview", help="Only preview the changes, do not commit to origin.", action="store_true"
)
def run_command(args):
info = get_package_info()
name = info['name']
print "Registering/updating deployable {}:".format(name)
print "Package info:"
print pretty(info)
print ""
# TODO: This is perhaps not ideal, or what?
from drift.flaskfactory import load_flask_config
app_config = load_flask_config()
with TSTransaction(commit_to_origin=not args.preview) as ts:
ret = register_this_deployable(
ts=ts,
package_info=info,
resources=app_config.get("resources", []),
resource_attributes=app_config.get("resource_attributes", {}),
)
orig_row = ret['old_registration']
row = ret['new_registration']
if orig_row is None:
print "New registration entry added:"
print pretty(row)
elif orig_row == row:
print "Current registration unchanged:"
print pretty(row)
else:
print "Updating current registration info:"
print pretty(row)
print "\nPrevious registration info:"
print pretty(orig_row)
if args.preview:
print "Preview changes only, not committing to origin."
_package_classifiers = [
'name',
'version',
'description',
'long-description',
'author',
'author-email',
'license'
]
def get_package_info():
"""
Returns info from current package.
"""
# HACK: Get app root:
from drift.flaskfactory import _find_app_root
app_root = _find_app_root()
p = subprocess.Popen(
['python', 'setup.py'] + ['--' + classifier for classifier in _package_classifiers],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=app_root
)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError(
"Can't get '{}' of this deployable. Error: {} - {}".format(classifier, p.returncode, err)
)
info = dict(zip(_package_classifiers, out.split('\n')))
return info
| mit | Python |
b0a4a6e4fdd46c7c11e1ad7358fb211e68563d7c | update docstring. (#33) | rm-hull/pcd8544,rm-hull/luma.lcd | luma/lcd/__init__.py | luma/lcd/__init__.py | # -*- coding: utf-8 -*-
# Copyright (c) 2013-17 Richard Hull and contributors
# See LICENSE.rst for details.
"""
LCD display drivers.
"""
| # -*- coding: utf-8 -*-
# Copyright (c) 2013-17 Richard Hull and contributors
# See LICENSE.rst for details.
"""
LCD display driver for PCD8544 devices.
"""
| mit | Python |
051422346bc0d55bb7f2261b9bd23dce5afc48ca | test for dm | ooici/coi-services,ooici/coi-services,ooici/coi-services,ooici/coi-services,ooici/coi-services | ion/services/dm/utility/test/test_coverage_craft.py | ion/services/dm/utility/test/test_coverage_craft.py | #!/usr/bin/env python
'''
@author Luke Campbell <LCampbell@ASAScience.com>
@file ion/services/dm/utility/test/test_coverage_craft.py
@date Thu Jul 19 16:44:05 EDT 2012
@description Utilities for crafting granules into a coverage
'''
from pyon.util.unit_test import PyonTestCase
from ion.services.dm.utility.granule_utils import CoverageCraft, RecordDictionaryTool, build_granule, TaxyTool
from nose.plugins.attrib import attr
import numpy as np
@attr('UNIT')
class CoverageCraftUnitTest(PyonTestCase):
def sample_granule(self):
rdt = RecordDictionaryTool(CoverageCraft.tx)
rdt['time'] = np.arange(20)
rdt['temp'] = np.array([5] * 20)
rdt['cond'] = np.array([10] * 20)
rdt['lat'] = np.array([0] * 20)
rdt['lon'] = np.array([0] * 20)
rdt['depth'] = np.array([0] * 20)
rdt['data'] = np.array([0x01] * 20)
return build_granule('sample', CoverageCraft.tx, rdt)
def test_to_coverage(self):
granule = self.sample_granule()
crafter = CoverageCraft(granule=granule)
crafter.add_granule()
coverage = crafter.coverage
time_vals = coverage.get_time_values()
comp = time_vals == np.arange(20)
self.assertTrue(comp.all())
| #!/usr/bin/env python
'''
@author Luke Campbell <LCampbell@ASAScience.com>
@file ion/services/dm/utility/test/test_coverage_craft.py
@date Thu Jul 19 16:44:05 EDT 2012
@description Utilities for crafting granules into a coverage
'''
from pyon.util.unit_test import PyonTestCase
class CoverageCraftUnitTest(PyonTestCase):
pass
| bsd-2-clause | Python |
c43e46a71f99e2fa7d5836260ac05519ac0c0329 | Remove dotenv from wsgi | kyleconroy/speakers,kyleconroy/speakers,kyleconroy/speakers | calltospeakers/wsgi.py | calltospeakers/wsgi.py | """
WSGI config for calltospeakers project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
ENVIRONMENT = os.getenv('ENVIRONMENT')
if ENVIRONMENT == 'PRODUCTION':
settings = 'production'
else:
settings = 'development'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'calltospeakers.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', settings.title())
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
| """
WSGI config for calltospeakers project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
import dotenv
try:
dotenv.read_dotenv(
os.path.join(os.path.dirname(os.path.dirname(__file__)), '.env'))
except Exception as e:
print(e)
ENVIRONMENT = os.getenv('ENVIRONMENT')
if ENVIRONMENT == 'PRODUCTION':
settings = 'production'
else:
settings = 'development'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'calltospeakers.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', settings.title())
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
| mit | Python |
3f72b358d3c8cbf0d063f2dfc0dfd94fa4e4393c | Add support for WTF fields in r16 headers | njvack/pfile-tools | pfile_tools/anonymizer.py | pfile_tools/anonymizer.py | # Part of the pfile-tools package
# Copyright (c) 2012, Board of Regents of the University of Wisconsin
# Written by Nathan Vack <njvack@wisc.edu>
# A library for performing anonymization of GE p-file headers.
from collections import namedtuple
import struct_utils
import logging
logger = logging.getLogger(__name__)
# option_name and description are for the command-line tool
AnonEntry = namedtuple("AnonEntry",
["key", "value", "option_name", "description"])
# Note: key and option_name should be unique in the list.
DEFAULT_LIST = [
AnonEntry("patient_name", "ANONYMIZED", "name", "patient name"),
AnonEntry("patient_id", "ANONYMIZED", "id", "patient ID"),
AnonEntry("patient_name_2", "ANONYMIZED", "name_2", "patient name 2"),
AnonEntry("patient_id_2", "ANONYMIZED", "id_2", "patient ID 2"),
AnonEntry("date_of_birth", "", "dateofbirth", "date of birth"),
AnonEntry("patient_age", 0, "age", "age"),
AnonEntry("patient_weight_g", 0, "weight", "weight"),
AnonEntry("patient_sex", 0, "sex", "sex"),
]
class Anonymizer(object):
def __init__(self, anonymization_list=DEFAULT_LIST):
"""Creates a new Anonymizer.
Arguments:
anonymization_list -- a list of objects specifying what attributes
are to anonymized, and how. Objects must contain "key" and "value"
properties. Keys are dotted string struct value identifiers.
See DEFAULT_LIST for examples. Defaults to DEFAULT_LIST.
"""
self.anonymization_list = anonymization_list
def anonymize(self, header):
"""Runs through self.anonymization_list and anonymizes the header
in place.
Arguments:
header -- a pfile header. NOTE: This structure will be modified
in place!
"""
logger.debug("Working with a %s" % (type(header)))
for entry in self.anonymization_list:
if struct_utils.has_struct_value(header, entry.key):
logger.debug("Setting %s to %s" % (entry.key, entry.value))
struct_utils.set_struct_value(header, entry.key, entry.value)
else:
logger.debug("%s not found in header" % entry.key)
| # Part of the pfile-tools package
# Copyright (c) 2012, Board of Regents of the University of Wisconsin
# Written by Nathan Vack <njvack@wisc.edu>
# A library for performing anonymization of GE p-file headers.
from collections import namedtuple
import struct_utils
import logging
logger = logging.getLogger(__name__)
# option_name and description are for the command-line tool
AnonEntry = namedtuple("AnonEntry",
["key", "value", "option_name", "description"])
# Note: key and option_name should be unique in the list.
DEFAULT_LIST = [
AnonEntry("patient_name", "ANONYMIZED", "name", "patient name"),
AnonEntry("patient_id", "ANONYMIZED", "id", "patient ID"),
AnonEntry("date_of_birth", "", "dateofbirth", "date of birth"),
AnonEntry("patient_age", 0, "age", "age"),
AnonEntry("patient_weight_g", 0, "weight", "weight"),
AnonEntry("patient_sex", 0, "sex", "sex"),
]
class Anonymizer(object):
def __init__(self, anonymization_list=DEFAULT_LIST):
"""Creates a new Anonymizer.
Arguments:
anonymization_list -- a list of objects specifying what attributes
are to anonymized, and how. Objects must contain "key" and "value"
properties. Keys are dotted string struct value identifiers.
See DEFAULT_LIST for examples. Defaults to DEFAULT_LIST.
"""
self.anonymization_list = anonymization_list
def anonymize(self, header):
"""Runs through self.anonymization_list and anonymizes the header
in place.
Arguments:
header -- a pfile header. NOTE: This structure will be modified
in place!
"""
for entry in self.anonymization_list:
logger.debug("Setting %s to %s" % (entry.key, entry.value))
struct_utils.set_struct_value(header, entry.key, entry.value)
| bsd-3-clause | Python |
87172c2f23bed0b6a3566052624416a8c4a6e29b | Fix double typedef | lazka/pgi,lazka/pgi | pgi/cffilib/glib/_cdef.py | pgi/cffilib/glib/_cdef.py | # Copyright 2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
GLIB_CDEF = """
typedef char gchar;
typedef const void * gconstpointer;
typedef double gdouble;
typedef float gfloat;
typedef int gboolean;
typedef int16_t gint16;
typedef int32_t gint32;
typedef int64_t gint64;
typedef int8_t gint8;
typedef int gint;
typedef long glong;
typedef short gshort;
typedef size_t gsize;
typedef uint16_t guint16;
typedef uint32_t guint32;
typedef uint64_t guint64;
typedef uint8_t guint8;
typedef unsigned int guint;
typedef unsigned long gulong;
typedef unsigned short gushort;
typedef intptr_t gpointer;
typedef signed long gssize;
// utility functions
gpointer g_malloc0(gsize);
void g_free(gpointer);
gpointer g_try_malloc0(gsize);
gchar* g_strdup(gchar*);
typedef void (*GFunc)(gpointer data, gpointer user_data);
// GQuark
typedef guint32 GQuark;
GQuark g_quark_from_string(gchar*);
gchar* g_quark_to_string(GQuark);
GQuark g_quark_try_string(gchar*);
// GError
typedef struct {
GQuark domain;
gint code;
gchar *message;
} GError;
void g_error_free (GError *error);
GError* g_error_copy(const GError *error);
GError* g_error_new(GQuark domain, gint code, const gchar *format, ...);
// GMappedFile
typedef struct _GMappedFile GMappedFile;
GMappedFile* g_mapped_file_new(const gchar *filename,
gboolean writable,
GError **error);
GMappedFile* g_mapped_file_ref(GMappedFile *file);
void g_mapped_file_unref(GMappedFile *file);
gsize g_mapped_file_get_length(GMappedFile *file);
gchar* g_mapped_file_get_contents(GMappedFile *file);
// GOptionGroup
typedef struct _GOptionGroup GOptionGroup;
void g_option_group_free(GOptionGroup *group);
// GSList
typedef struct _GSList {
gpointer data;
struct _GSList *next;
} GSList;
GSList* g_slist_alloc(void);
GSList* g_slist_append(GSList *list, gpointer data);
void g_slist_free(GSList *list);
guint g_slist_length(GSList *list);
void g_slist_foreach (GSList *list, GFunc func, gpointer user_data);
// GList
typedef struct _GList {
gpointer data;
struct _GList *next;
struct _GList *prev;
} GList;
void g_list_free(GList *list);
"""
| # Copyright 2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
GLIB_CDEF = """
typedef char gchar;
typedef const void * gconstpointer;
typedef double gdouble;
typedef float gfloat;
typedef int gboolean;
typedef int16_t gint16;
typedef int32_t gint32;
typedef int64_t gint64;
typedef int8_t gint8;
typedef int gint;
typedef long glong;
typedef short gshort;
typedef size_t gsize;
typedef uint16_t guint16;
typedef uint32_t guint32;
typedef uint64_t guint64;
typedef uint8_t guint8;
typedef unsigned int guint;
typedef unsigned long gulong;
typedef unsigned short gushort;
typedef intptr_t gpointer;
typedef gulong GType;
typedef signed long gssize;
// utility functions
gpointer g_malloc0(gsize);
void g_free(gpointer);
gpointer g_try_malloc0(gsize);
gchar* g_strdup(gchar*);
typedef void (*GFunc)(gpointer data, gpointer user_data);
// GQuark
typedef guint32 GQuark;
GQuark g_quark_from_string(gchar*);
gchar* g_quark_to_string(GQuark);
GQuark g_quark_try_string(gchar*);
// GError
typedef struct {
GQuark domain;
gint code;
gchar *message;
} GError;
void g_error_free (GError *error);
GError* g_error_copy(const GError *error);
GError* g_error_new(GQuark domain, gint code, const gchar *format, ...);
// GMappedFile
typedef struct _GMappedFile GMappedFile;
GMappedFile* g_mapped_file_new(const gchar *filename,
gboolean writable,
GError **error);
GMappedFile* g_mapped_file_ref(GMappedFile *file);
void g_mapped_file_unref(GMappedFile *file);
gsize g_mapped_file_get_length(GMappedFile *file);
gchar* g_mapped_file_get_contents(GMappedFile *file);
// GOptionGroup
typedef struct _GOptionGroup GOptionGroup;
void g_option_group_free(GOptionGroup *group);
// GSList
typedef struct _GSList {
gpointer data;
struct _GSList *next;
} GSList;
GSList* g_slist_alloc(void);
GSList* g_slist_append(GSList *list, gpointer data);
void g_slist_free(GSList *list);
guint g_slist_length(GSList *list);
void g_slist_foreach (GSList *list, GFunc func, gpointer user_data);
// GList
typedef struct _GList {
gpointer data;
struct _GList *next;
struct _GList *prev;
} GList;
void g_list_free(GList *list);
"""
| lgpl-2.1 | Python |
cbf47d00da391f8799485b1b8c5bb449496b4604 | Use stable MADAM API. | eseifert/madam-rest | madam_rest/api_v1.py | madam_rest/api_v1.py | from datetime import datetime
from fractions import Fraction
from flask import Blueprint, jsonify, send_file, url_for
from frozendict import frozendict
from madam_rest import asset_storage
api = Blueprint('v1', __name__, url_prefix='/v1')
def _serializable(value):
"""
Utility function to convert data structures with immutable types to
mutable, serializable data structures.
:param value: data structure with immutable types
:return: mutable, serializable data structure
"""
if isinstance(value, (tuple, set, frozenset)):
return [_serializable(v) for v in value]
elif isinstance(value, frozendict):
return {k: _serializable(v) for k, v in value.items()}
elif isinstance(value, datetime):
return value.isoformat()
elif isinstance(value, Fraction):
return float(value)
return value
@api.route('/assets/')
def assets_retrieve():
asset_keys = [asset_key for asset_key in asset_storage]
return jsonify({
"data": asset_keys,
"meta": {
"count": len(asset_keys)
}
})
@api.route('/assets/<asset_key>/')
def asset_retrieve(asset_key):
asset, tags = asset_storage.get(asset_key)
return jsonify({
"links": {
"self": url_for('.asset_retrieve', asset_key=asset_key),
"essence": url_for('.asset_essence_retrieve', asset_key=asset_key)
},
"meta": _serializable(asset.metadata)
})
@api.route('/assets/<asset_key>/essence')
def asset_essence_retrieve(asset_key):
asset, tags = asset_storage.get(asset_key)
return send_file(asset.essence, mimetype=asset.mime_type)
| from datetime import datetime
from fractions import Fraction
from flask import Blueprint, jsonify, send_file, url_for
from frozendict import frozendict
from madam_rest import asset_storage
api = Blueprint('v1', __name__, url_prefix='/v1')
def _serializable(value):
"""
Utility function to convert data structures with immutable types to
mutable, serializable data structures.
:param value: data structure with immutable types
:return: mutable, serializable data structure
"""
if isinstance(value, (tuple, set, frozenset)):
return [_serializable(v) for v in value]
elif isinstance(value, frozendict):
return {k: _serializable(v) for k, v in value.items()}
elif isinstance(value, datetime):
return value.isoformat()
elif isinstance(value, Fraction):
return float(value)
return value
@api.route('/assets/')
def assets_retrieve():
asset_keys = [asset_key for asset_key in asset_storage]
return jsonify({
"data": asset_keys,
"meta": {
"count": len(asset_keys)
}
})
@api.route('/assets/<asset_key>/')
def asset_retrieve(asset_key):
asset = asset_storage[asset_key]
return jsonify({
"links": {
"self": url_for(asset_retrieve, asset_key=asset_key),
"essence": url_for(asset_essence_retrieve, asset_key=asset_key)
},
"meta": _serializable(asset.metadata)
})
@api.route('/assets/<asset_key>/essence')
def asset_essence_retrieve(asset_key):
asset = asset_storage[asset_key]
return send_file(asset.essence, mimetype=asset.mime_type)
| agpl-3.0 | Python |
898028dea2e04d52c32854752bda34d331c7696f | Move on if email exists | DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative | ynr/apps/candidatebot/management/commands/candidatebot_import_email_from_csv.py | ynr/apps/candidatebot/management/commands/candidatebot_import_email_from_csv.py | from __future__ import unicode_literals
import csv
from django.core.management.base import BaseCommand
from candidatebot.helpers import CandidateBot
from popolo.models import Person
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'filename',
help='Path to the file with the email addresses'
)
parser.add_argument(
'--source',
help='Source of the data. The source CSV column takes precedence'
)
def handle(self, **options):
with open(options['filename'], 'r') as fh:
reader = csv.DictReader(fh)
for row in reader:
source = row.get('source', options.get('source'))
if not row['democlub_id']:
continue
if not source:
raise ValueError("A source is required")
try:
bot = CandidateBot(row['democlub_id'])
try:
bot.add_email(row['email'])
bot.save(source)
except ValueError:
#Email exists, move on
pass
except Person.DoesNotExist:
print("Person ID {} not found".format(
row['democlub_id']))
# print(row)
| from __future__ import unicode_literals
import csv
from django.core.management.base import BaseCommand
from candidatebot.helpers import CandidateBot
from popolo.models import Person
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'filename',
help='Path to the file with the email addresses'
)
parser.add_argument(
'--source',
help='Source of the data. The source CSV column takes precedence'
)
def handle(self, **options):
with open(options['filename'], 'r') as fh:
reader = csv.DictReader(fh)
for row in reader:
source = row.get('source', options.get('source'))
if not row['democlub_id']:
continue
if not source:
raise ValueError("A source is required")
try:
bot = CandidateBot(row['democlub_id'])
bot.add_email(row['email'])
bot.save(source)
# print(person)
except Person.DoesNotExist:
print("Person ID {} not found".format(
row['democlub_id']))
# print(row)
| agpl-3.0 | Python |
00a87b50596e4b81ea103c895c3b764e39b5c8ea | Stop to use the __future__ module. | openstack/oslo.serialization | oslo_serialization/base64.py | oslo_serialization/base64.py | # Copyright 2015 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities to encode and decode Base64.
.. versionadded:: 1.10
"""
import base64
import binascii
def encode_as_bytes(s, encoding='utf-8'):
"""Encode a string using Base64.
If *s* is a text string, first encode it to *encoding* (UTF-8 by default).
:param s: bytes or text string to be encoded
:param encoding: encoding used to encode *s* if it's a text string
:returns: Base64 encoded byte string (bytes)
Use encode_as_text() to get the Base64 encoded string as text.
"""
if isinstance(s, str):
s = s.encode(encoding)
return base64.b64encode(s)
def encode_as_text(s, encoding='utf-8'):
"""Encode a string using Base64.
If *s* is a text string, first encode it to *encoding* (UTF-8 by default).
:param s: bytes or text string to be encoded
:param encoding: encoding used to encode *s* if it's a text string
:returns: Base64 encoded text string (Unicode)
Use encode_as_bytes() to get the Base64 encoded string as bytes.
"""
encoded = encode_as_bytes(s, encoding=encoding)
return encoded.decode('ascii')
def decode_as_bytes(encoded):
"""Decode a Base64 encoded string.
:param encoded: bytes or text Base64 encoded string to be decoded
:returns: decoded bytes string (bytes)
Use decode_as_text() to get the decoded string as text.
A TypeError is raised if the input is invalid (or incorrectly padded).
"""
if isinstance(encoded, bytes):
encoded = encoded.decode('ascii')
try:
return base64.b64decode(encoded)
except binascii.Error as e:
# Transform this exception for consistency.
raise TypeError(str(e))
def decode_as_text(encoded, encoding='utf-8'):
"""Decode a Base64 encoded string.
Decode the Base64 string and then decode the result from *encoding*
(UTF-8 by default).
:param encoded: bytes or text Base64 encoded string to be decoded
:returns: decoded text string (bytes)
Use decode_as_bytes() to get the decoded string as bytes.
"""
decoded = decode_as_bytes(encoded)
return decoded.decode(encoding)
| # Copyright 2015 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities to encode and decode Base64.
.. versionadded:: 1.10
"""
from __future__ import absolute_import
import base64
import binascii
def encode_as_bytes(s, encoding='utf-8'):
"""Encode a string using Base64.
If *s* is a text string, first encode it to *encoding* (UTF-8 by default).
:param s: bytes or text string to be encoded
:param encoding: encoding used to encode *s* if it's a text string
:returns: Base64 encoded byte string (bytes)
Use encode_as_text() to get the Base64 encoded string as text.
"""
if isinstance(s, str):
s = s.encode(encoding)
return base64.b64encode(s)
def encode_as_text(s, encoding='utf-8'):
"""Encode a string using Base64.
If *s* is a text string, first encode it to *encoding* (UTF-8 by default).
:param s: bytes or text string to be encoded
:param encoding: encoding used to encode *s* if it's a text string
:returns: Base64 encoded text string (Unicode)
Use encode_as_bytes() to get the Base64 encoded string as bytes.
"""
encoded = encode_as_bytes(s, encoding=encoding)
return encoded.decode('ascii')
def decode_as_bytes(encoded):
"""Decode a Base64 encoded string.
:param encoded: bytes or text Base64 encoded string to be decoded
:returns: decoded bytes string (bytes)
Use decode_as_text() to get the decoded string as text.
A TypeError is raised if the input is invalid (or incorrectly padded).
"""
if isinstance(encoded, bytes):
encoded = encoded.decode('ascii')
try:
return base64.b64decode(encoded)
except binascii.Error as e:
# Transform this exception for consistency.
raise TypeError(str(e))
def decode_as_text(encoded, encoding='utf-8'):
"""Decode a Base64 encoded string.
Decode the Base64 string and then decode the result from *encoding*
(UTF-8 by default).
:param encoded: bytes or text Base64 encoded string to be decoded
:returns: decoded text string (bytes)
Use decode_as_bytes() to get the decoded string as bytes.
"""
decoded = decode_as_bytes(encoded)
return decoded.decode(encoding)
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.