commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
ac0523cbc7b0b545720f9bca157165a8c2675954 | support not json parsable data | tariqdaouda/pyArango,tariqdaouda/pyArango | pyArango/index.py | pyArango/index.py | import json
from .theExceptions import (CreationError, DeletionError, UpdateError)
class Index(object) :
"""An index on a collection's fields. Indexes are meant to de created by ensureXXX functions of Collections.
Indexes have a .infos dictionary that stores all the infos about the index"""
def __init__(self, collection, infos = None, creationData = None) :
self.collection = collection
self.connection = self.collection.database.connection
self.indexesURL = "%s/index" % self.collection.database.URL
self.infos = None
if infos :
self.infos = infos
elif creationData :
self._create(creationData)
if self.infos :
self.URL = "%s/%s" % (self.indexesURL, self.infos["id"])
def _create(self, postData) :
"""Creates an index of any type according to postData"""
if self.infos is None :
r = self.connection.session.post(self.indexesURL, params = {"collection" : self.collection.name}, data = json.dumps(postData, default=str))
data = r.json()
if (r.status_code >= 400) or data['error'] :
raise CreationError(data['errorMessage'], data)
self.infos = data
def delete(self) :
"""Delete the index"""
r = self.connection.session.delete(self.URL)
data = r.json()
if (r.status_code != 200 and r.status_code != 202) or data['error'] :
raise DeletionError(data['errorMessage'], data)
| import json
from .theExceptions import (CreationError, DeletionError, UpdateError)
class Index(object) :
"""An index on a collection's fields. Indexes are meant to de created by ensureXXX functions of Collections.
Indexes have a .infos dictionary that stores all the infos about the index"""
def __init__(self, collection, infos = None, creationData = None) :
self.collection = collection
self.connection = self.collection.database.connection
self.indexesURL = "%s/index" % self.collection.database.URL
self.infos = None
if infos :
self.infos = infos
elif creationData :
self._create(creationData)
if self.infos :
self.URL = "%s/%s" % (self.indexesURL, self.infos["id"])
def _create(self, postData) :
"""Creates an index of any type according to postData"""
if self.infos is None :
r = self.connection.session.post(self.indexesURL, params = {"collection" : self.collection.name}, data = json.dumps(postData))
data = r.json()
if (r.status_code >= 400) or data['error'] :
raise CreationError(data['errorMessage'], data)
self.infos = data
def delete(self) :
"""Delete the index"""
r = self.connection.session.delete(self.URL)
data = r.json()
if (r.status_code != 200 and r.status_code != 202) or data['error'] :
raise DeletionError(data['errorMessage'], data)
| apache-2.0 | Python |
095d8d0136ff3942a9fcc76564a61e17dae56b71 | Fix breakage. The website is looking for user-agent header | sharuzzaman/sharuzzaman-code-repo.maybank-gia-rate | goldprice.py | goldprice.py | #!/usr/bin/python
# Maybank Gold Investment Account price scraper
# Using BeautifulSoup package
# Developed and tested on Debian Testing (Jessie)
# Initial development 25 July 2012
# Copyright (C) 2012,2013 Sharuzzaman Ahmat Raslan (sharuzzaman@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib2
from BeautifulSoup import BeautifulSoup
import datetime
#maybank website looking for user-agent header
req = urllib2.Request('http://www.maybank2u.com.my/mbbfrx/gold_rate.htm')
req.add_header('User-Agent', 'Mozilla')
website=urllib2.urlopen(req)
data=website.read()
soup = BeautifulSoup(data)
date=soup('td')[31].string
selling=soup('td')[32].string
buying=soup('td')[33].string
print "%s,%s,%s" % (date,selling,buying)
| #!/usr/bin/python
# Maybank Gold Investment Account price scraper
# Using BeautifulSoup package
# Developed and tested on Debian Testing (Jessie)
# Initial development 25 July 2012
# Copyright (C) 2012,2013 Sharuzzaman Ahmat Raslan (sharuzzaman@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib2
from BeautifulSoup import BeautifulSoup
import datetime
website=urllib2.urlopen('http://www.maybank2u.com.my/mbbfrx/gold_rate.htm')
data=website.read()
soup = BeautifulSoup(data)
date=soup('td')[31].string
selling=soup('td')[32].string
buying=soup('td')[33].string
print "%s,%s,%s" % (date,selling,buying)
| agpl-3.0 | Python |
a2582b3352582034af1b8dff99d4ac39a15d9b54 | Fix script to pass pep8 | aebm/shuffler | shuffler.py | shuffler.py | #!/usr/bin/env python3
import argparse
import random
import sys
DESCRIPTION = '''Shuffle the arguments received, if called without arguments
the lines read from stdin will be shuffled and printed to
stdout'''
def get_list():
return sys.stdin.readlines()
def print_list(list_):
for elem in list_:
print(elem.rstrip())
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
(args, list_) = parser.parse_known_args()
r = random.SystemRandom()
if not list_:
list_ = get_list()
r.shuffle(list_)
print_list(list_)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import argparse
import random
import sys
DESCRIPTION = '''Shuffle the arguments received, if called without arguments
the lines read from stdin will be shuffled and printed to
stdout'''
def get_list():
return sys.stdin.readlines()
def print_list(list_):
for elem in list_:
print(elem.rstrip())
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
(args, list_) = parser.parse_known_args()
r = random.SystemRandom()
if not list_:
list_ = get_list()
r.shuffle(list_)
print_list(list_)
if __name__ == '__main__':
main()
| apache-2.0 | Python |
7f356e3191344ad300f8b59f35e861833d09f693 | Fix typo | nlesc-ave/ave-rest-service | avedata/avedata.py | avedata/avedata.py | import os
import connexion
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
from flask_cors import CORS
connexion_app = connexion.App(__name__, specification_dir='../')
app = connexion_app.app
CORS(app)
app.config.update(dict(
DATABASE='ave.db'
))
app.config.from_pyfile(os.path.join(os.getcwd(),
'settings.cfg'), silent=True)
| import os
import connexion
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
from flask_corse import CORS
connexion_app = connexion.App(__name__, specification_dir='../')
app = connexion_app.app
CORS(app)
app.config.update(dict(
DATABASE='ave.db'
))
app.config.from_pyfile(os.path.join(os.getcwd(),
'settings.cfg'), silent=True)
| apache-2.0 | Python |
a556674523972bf4cb1f9cced73e6783bcf2f492 | update load and transform function to simplify data | epfahl/inaworld | inaworld/data.py | inaworld/data.py | """Load the corpus of data.
"""
import re
import pandas as pd
import toolz as tz
from . import utils
DEFAULT_DATA_PATH = 'movie_data.csv'
def strlst_to_lststr(genres):
"""Convert a string of list entries to a list of lowercase strings.
Examples
--------
>>> strlst_to_lststr('["Banking Hijinks", "Actuarial adventure"]')
['banking hijinks', "actuarial adventure"]
"""
return list(map(
lambda g: g.strip().lower()[1:-1],
re.sub('[\[\]]', '', genres).split(',')))
def to_datetime(date):
"""Given a string date, return a Python datetime.date. If only the year is
given, the date is (<year>, 1, 1). If the result is a null type or if an
exception is raised, None is returned.
"""
try:
ret = pd.to_datetime(date).to_pydatetime().date()
except:
ret = None
if pd.isnull(ret):
ret = None
return ret
def load(path=None):
"""Load the CSV data, transform into an appropriate form for exploitation,
and return a list of dicts
Notes
-----
* Only the genres and summaries are retained. It is straightforward to
retain additional columns and include other transformations.
"""
drop_cols = [
'id', 'title', 'release_date',
'runtime', 'box_office_revenue']
if path is None:
path = utils.local_filepath(DEFAULT_DATA_PATH)
def tx(d):
return tz.merge(d, {
'genres': strlst_to_lststr(d['genres'])})
return list(map(
tx,
(
pd.read_csv(path)
.drop(drop_cols, axis=1)
.to_dict('records'))))
| """Load the corpus of data.
"""
import re
import pandas as pd
import toolz as tz
def strlst_to_lststr(genres):
"""Convert a string of list entries to a list of lowercase strings.
Examples
--------
>>> strlst_to_lststr('["Banking Hijinks", "Actuarial adventure"]')
['banking hijinks', "actuarial adventure"]
"""
return list(map(
lambda g: g.strip().lower()[1:-1],
re.sub('[\[\]]', '', genres).split(',')))
def to_datetime(date):
"""Given a string date, return a Python datetime.date. If only the year is
given, the date is (<year>, 1, 1). If the result is a null type or if an
exception is raised, None is returned.
"""
try:
ret = pd.to_datetime(date).to_pydatetime().date()
except:
ret = None
if pd.isnull(ret):
ret = None
return ret
def load(path):
"""Load the CSV data, transform into an appropriate form for exploitation,
and return a list of dicts
"""
def tx(d):
return tz.merge(d, {
'genres': strlst_to_lststr(d['genres']),
'release_date': to_datetime(d['release_date'])})
return list(map(tx, pd.read_csv(path).to_dict('records')))
| mit | Python |
6a83c4808d7f1104aba832f53bcd25fb98be1686 | Bump to 1.0 dev version | karimbahgat/PyCRS | pycrs/__init__.py | pycrs/__init__.py | """
# PyCRS
PyCRS is a pure Python GIS package for reading, writing, and converting between various
common coordinate reference system (CRS) string and data source formats.
- [Home Page](http://github.com/karimbahgat/PyCRS)
- [API Documentation](http://pythonhosted.org/PyCRS)
"""
__version__ = "1.0.0-dev"
from . import loader
from . import parser
from . import utils
from .elements.containers import CRS
| """
# PyCRS
PyCRS is a pure Python GIS package for reading, writing, and converting between various
common coordinate reference system (CRS) string and data source formats.
- [Home Page](http://github.com/karimbahgat/PyCRS)
- [API Documentation](http://pythonhosted.org/PyCRS)
"""
__version__ = "0.1.4"
from . import loader
from . import parser
from . import utils
from .elements.containers import CRS
| mit | Python |
f17971d339c943277afb5d7b2731cd87a23c0a83 | Update documentation of AxesMiddleware | jazzband/django-axes | axes/middleware.py | axes/middleware.py | from typing import Callable
from django.conf import settings
from axes.helpers import (
get_lockout_response,
get_failure_limit,
get_client_username,
get_credentials,
)
from axes.handlers.proxy import AxesProxyHandler
class AxesMiddleware:
"""
Middleware that calculates necessary HTTP request attributes for attempt monitoring
and maps lockout signals into readable HTTP 403 Forbidden responses.
If a project uses `django rest framework`` then the middleware updates the
request and checks whether the limit has been exceeded. It's needed only
for integration with DRF because it uses its own request object.
This middleware recognizes a logout monitoring flag in the request and
and uses the ``axes.helpers.get_lockout_response`` handler for returning
customizable and context aware lockout message to the end user if necessary.
To customize the lockout handling behaviour further, you can subclass this middleware
and change the ``__call__`` method to your own liking.
Please see the following configuration flags before customizing this handler:
- ``AXES_LOCKOUT_TEMPLATE``,
- ``AXES_LOCKOUT_URL``,
- ``AXES_COOLOFF_MESSAGE``, and
- ``AXES_PERMALOCK_MESSAGE``.
"""
def __init__(self, get_response: Callable):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if "rest_framework" in settings.INSTALLED_APPS:
AxesProxyHandler.update_request(request)
username = get_client_username(request)
credentials = get_credentials(username)
failures_since_start = AxesProxyHandler.get_failures(
request, credentials
)
if (
settings.AXES_LOCK_OUT_AT_FAILURE
and failures_since_start
>= get_failure_limit(request, credentials)
):
request.axes_locked_out = True
if getattr(request, "axes_locked_out", None):
response = get_lockout_response(request) # type: ignore
return response
| from typing import Callable
from django.conf import settings
from axes.helpers import (
get_lockout_response,
get_failure_limit,
get_client_username,
get_credentials,
)
from axes.handlers.proxy import AxesProxyHandler
class AxesMiddleware:
"""
Middleware that calculates necessary HTTP request attributes for attempt monitoring
and maps lockout signals into readable HTTP 403 Forbidden responses.
This middleware recognizes a logout monitoring flag in the request and
and uses the ``axes.helpers.get_lockout_response`` handler for returning
customizable and context aware lockout message to the end user if necessary.
To customize the lockout handling behaviour further, you can subclass this middleware
and change the ``__call__`` method to your own liking.
Please see the following configuration flags before customizing this handler:
- ``AXES_LOCKOUT_TEMPLATE``,
- ``AXES_LOCKOUT_URL``,
- ``AXES_COOLOFF_MESSAGE``, and
- ``AXES_PERMALOCK_MESSAGE``.
"""
def __init__(self, get_response: Callable):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if "rest_framework" in settings.INSTALLED_APPS:
AxesProxyHandler.update_request(request)
username = get_client_username(request)
credentials = get_credentials(username)
failures_since_start = AxesProxyHandler.get_failures(
request, credentials
)
if (
settings.AXES_LOCK_OUT_AT_FAILURE
and failures_since_start
>= get_failure_limit(request, credentials)
):
request.axes_locked_out = True
if getattr(request, "axes_locked_out", None):
response = get_lockout_response(request) # type: ignore
return response
| mit | Python |
c1a9882a91d8914e52d67ccab59c4e4121a93198 | bump version | dswah/pyGAM | pygam/__init__.py | pygam/__init__.py | """
GAM toolkit
"""
from __future__ import absolute_import
from pygam.pygam import GAM
from pygam.pygam import LinearGAM
from pygam.pygam import LogisticGAM
from pygam.pygam import GammaGAM
from pygam.pygam import PoissonGAM
from pygam.pygam import InvGaussGAM
__all__ = ['GAM', 'LinearGAM', 'LogisticGAM', 'GammaGAM', 'PoissonGAM',
'InvGaussGAM']
__version__ = '0.3.0'
| """
GAM toolkit
"""
from __future__ import absolute_import
from pygam.pygam import GAM
from pygam.pygam import LinearGAM
from pygam.pygam import LogisticGAM
from pygam.pygam import GammaGAM
from pygam.pygam import PoissonGAM
from pygam.pygam import InvGaussGAM
__all__ = ['GAM', 'LinearGAM', 'LogisticGAM', 'GammaGAM', 'PoissonGAM',
'InvGaussGAM']
__version__ = '0.2.17'
| apache-2.0 | Python |
698f07a93dea9b57010b9c3ac33608165123bef5 | Extend timeout. | freevo/kaa-epg | src/__init__.py | src/__init__.py | import os
import logging
from socket import gethostbyname, gethostname
from kaa import ipc
from client import *
from server import *
__all__ = [ 'connect', 'DEFAULT_EPG_PORT', 'GuideClient', 'GuideServer' ]
# connected client object
_client = None
def connect(epgdb, address='127.0.0.1', logfile='/tmp/kaa-epg.log', loglevel=logging.INFO):
"""
"""
global _client
if _client:
return _client
if address.split(':')[0] not in ['127.0.0.1', '0.0.0.0'] and \
address.split(':')[0] != gethostbyname(gethostname()):
# epg is remote: host:port
if address.find(':') >= 0:
host, port = address.split(':', 1)
else:
host = address
port = DEFAULT_EPG_PORT
# create socket, pass it to client
_client = GuideClient((host, int(port)))
else:
# EPG is local, only use unix socket
# get server filename
server = os.path.join(os.path.dirname(__file__), 'server.py')
_client = ipc.launch([server, logfile, str(loglevel), epgdb, address],
5, GuideClient, "epg")
return _client
| import os
import logging
from socket import gethostbyname, gethostname
from kaa import ipc
from client import *
from server import *
__all__ = [ 'connect', 'DEFAULT_EPG_PORT', 'GuideClient', 'GuideServer' ]
# connected client object
_client = None
def connect(epgdb, address='127.0.0.1', logfile='/tmp/kaa-epg.log', loglevel=logging.INFO):
"""
"""
global _client
if _client:
return _client
if address.split(':')[0] not in ['127.0.0.1', '0.0.0.0'] and \
address.split(':')[0] != gethostbyname(gethostname()):
# epg is remote: host:port
if address.find(':') >= 0:
host, port = address.split(':', 1)
else:
host = address
port = DEFAULT_EPG_PORT
# create socket, pass it to client
_client = GuideClient((host, int(port)))
else:
# EPG is local, only use unix socket
# get server filename
server = os.path.join(os.path.dirname(__file__), 'server.py')
_client = ipc.launch([server, logfile, str(loglevel), epgdb, address],
2, GuideClient, "epg")
return _client
| lgpl-2.1 | Python |
1d1ac3fa0538bba2135627b972d268c90b8d3051 | Add exception | jsikel/jsikel | blckur/exceptions.py | blckur/exceptions.py | class TestException(Exception):
pass
class TestCheckFailed(TestException):
pass
class TestStatusFailed(TestException):
pass
class TestExpectFailed(TestException):
pass
| class TestException(Exception):
pass
class TestStatusFailed(TestException):
pass
class TestExpectFailed(TestException):
pass
| agpl-3.0 | Python |
a510d20cebe2aff86a6bf842d063b5df8937a7ec | Update site and project names for pylons integration. Fix behavior of empty lists. Add DSN. | tarkatronic/opbeat_python,tarkatronic/opbeat_python,inspirehep/raven-python,ticosax/opbeat_python,jbarbuto/raven-python,jmagnusson/raven-python,ronaldevers/raven-python,akalipetis/raven-python,collective/mr.poe,patrys/opbeat_python,arthurlogilab/raven-python,percipient/raven-python,inspirehep/raven-python,ronaldevers/raven-python,icereval/raven-python,jmp0xf/raven-python,Photonomie/raven-python,Goldmund-Wyldebeast-Wunderliebe/raven-python,nikolas/raven-python,patrys/opbeat_python,patrys/opbeat_python,dbravender/raven-python,akheron/raven-python,lopter/raven-python-old,akheron/raven-python,inspirehep/raven-python,akheron/raven-python,smarkets/raven-python,johansteffner/raven-python,arthurlogilab/raven-python,lepture/raven-python,danriti/raven-python,getsentry/raven-python,arthurlogilab/raven-python,johansteffner/raven-python,arthurlogilab/raven-python,openlabs/raven,recht/raven-python,beniwohli/apm-agent-python,ewdurbin/raven-python,smarkets/raven-python,alex/raven,jmagnusson/raven-python,jbarbuto/raven-python,beniwohli/apm-agent-python,someonehan/raven-python,hzy/raven-python,icereval/raven-python,danriti/raven-python,someonehan/raven-python,jmp0xf/raven-python,akalipetis/raven-python,icereval/raven-python,ewdurbin/raven-python,ticosax/opbeat_python,nikolas/raven-python,ewdurbin/raven-python,jmp0xf/raven-python,nikolas/raven-python,patrys/opbeat_python,daikeren/opbeat_python,Photonomie/raven-python,dbravender/raven-python,getsentry/raven-python,beniwohli/apm-agent-python,ronaldevers/raven-python,icereval/raven-python,jbarbuto/raven-python,johansteffner/raven-python,dirtycoder/opbeat_python,recht/raven-python,danriti/raven-python,hzy/raven-python,Goldmund-Wyldebeast-Wunderliebe/raven-python,akalipetis/raven-python,smarkets/raven-python,hzy/raven-python,lepture/raven-python,inspirehep/raven-python,daikeren/opbeat_python,percipient/raven-python,someonehan/raven-python,dbravender/raven-python,recht/raven-python,percipient/raven-python,Goldmund-Wyldebeast-Wunderliebe/raven-python,jbarbuto/raven-python,jmagnusson/raven-python,daikeren/opbeat_python,Goldmund-Wyldebeast-Wunderliebe/raven-python,smarkets/raven-python,dirtycoder/opbeat_python,tarkatronic/opbeat_python,nikolas/raven-python,Photonomie/raven-python,getsentry/raven-python,dirtycoder/opbeat_python,beniwohli/apm-agent-python,ticosax/opbeat_python,lepture/raven-python | raven/contrib/pylons/__init__.py | raven/contrib/pylons/__init__.py | """
raven.contrib.pylons
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from raven.middleware import Sentry as Middleware
from raven.base import Client
def list_from_setting(config, setting):
value = config.get(setting)
if not value:
return None
return value.split()
class Sentry(Middleware):
def __init__(self, app, config):
if not config.get('sentry.servers'):
raise TypeError('The sentry.servers config variable is required')
servers = config.get('sentry_servers')
if servers:
servers = servers.split()
client = Client(
dsn=config.get('sentry.dsn'),
servers=list_from_setting(config, 'sentry.servers'),
name=config.get('sentry.name'),
key=config.get('sentry.key'),
public_key=config.get('sentry.public_key'),
secret_key=config.get('sentry.secret_key'),
project=config.get('sentry.project'),
site=config.get('sentry.site'),
include_paths=list_from_setting(config, 'sentry.include_paths'),
exclude_paths=list_from_setting(config, 'sentry.exclude_paths'),
)
super(Sentry, self).__init__(app, client)
| """
raven.contrib.pylons
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from raven.middleware import Sentry as Middleware
from raven.base import Client
class Sentry(Middleware):
def __init__(self, app, config):
if not config.get('sentry.servers'):
raise TypeError('The sentry.servers config variable is required')
client = Client(
servers=config['sentry.servers'].split(),
name=config.get('sentry.name'),
key=config.get('sentry.key'),
public_key=config.get('sentry.public_key'),
secret_key=config.get('sentry.secret_key'),
project=config.get('sentry.site_project'),
site=config.get('sentry.site_name'),
include_paths=config.get(
'sentry.include_paths', '').split() or None,
exclude_paths=config.get(
'sentry.exclude_paths', '').split() or None,
)
super(Sentry, self).__init__(app, client)
| bsd-3-clause | Python |
826429100092f449f73827bff3098f74ed4b0eff | Fix trakt_list test | qvazzler/Flexget,malkavi/Flexget,gazpachoking/Flexget,jacobmetrick/Flexget,ianstalk/Flexget,ianstalk/Flexget,Danfocus/Flexget,jawilson/Flexget,jacobmetrick/Flexget,jawilson/Flexget,qk4l/Flexget,jacobmetrick/Flexget,poulpito/Flexget,drwyrm/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,crawln45/Flexget,qk4l/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,poulpito/Flexget,tarzasai/Flexget,qk4l/Flexget,drwyrm/Flexget,OmgOhnoes/Flexget,dsemi/Flexget,malkavi/Flexget,jawilson/Flexget,poulpito/Flexget,LynxyssCZ/Flexget,oxc/Flexget,JorisDeRieck/Flexget,gazpachoking/Flexget,crawln45/Flexget,OmgOhnoes/Flexget,LynxyssCZ/Flexget,crawln45/Flexget,qvazzler/Flexget,qvazzler/Flexget,malkavi/Flexget,tarzasai/Flexget,tobinjt/Flexget,tobinjt/Flexget,ianstalk/Flexget,tobinjt/Flexget,jawilson/Flexget,Flexget/Flexget,Danfocus/Flexget,Flexget/Flexget,JorisDeRieck/Flexget,JorisDeRieck/Flexget,LynxyssCZ/Flexget,oxc/Flexget,sean797/Flexget,Danfocus/Flexget,sean797/Flexget,crawln45/Flexget,Flexget/Flexget,sean797/Flexget,tarzasai/Flexget,dsemi/Flexget,drwyrm/Flexget,oxc/Flexget,Danfocus/Flexget,dsemi/Flexget,malkavi/Flexget,LynxyssCZ/Flexget | tests/test_trakt_list_interface.py | tests/test_trakt_list_interface.py | from __future__ import unicode_literals, division, absolute_import
import pytest
from flexget.entry import Entry
from flexget.manager import Session
from flexget.plugins.api_trakt import TraktUserAuth
from flexget.plugins.list.trakt_list import TraktSet
@pytest.mark.online
class TestTraktList(object):
"""
Credentials for test account are:
username: flexget_list_test
password: flexget
"""
config = """
'tasks': {}
"""
trakt_config = {'account': 'flexget_list_test',
'list': 'watchlist',
'type': 'shows'}
def get_auth(self):
kwargs = {
'account': 'flexget_list_test',
'access_token': '895b5417b640ed0d40b5fbda56a3ad2158fc36f313c825f638e61b65586df32d',
'refresh_token': '3708a9f1057c10c0a9b59f544e6c758000d02b7d1cc66b9404e8d70210d15cba',
'created': 1458488839.44,
'expires': 7776000
}
# Creates the trakt token in db
with Session() as session:
auth = TraktUserAuth(**kwargs)
session.add(auth)
session.commit()
def test_trakt_add(self):
self.get_auth()
trakt_set = TraktSet(self.trakt_config)
# Initialize trakt set
trakt_set.clear()
entry = Entry(title='White collar', series_name='White Collar (2009)')
assert entry not in trakt_set
trakt_set.add(entry)
assert entry in trakt_set
| from __future__ import unicode_literals, division, absolute_import
import pytest
from flexget.entry import Entry
from flexget.manager import Session
from flexget.plugins.api_trakt import TraktUserAuth
from flexget.plugins.list.trakt_list import TraktSet
@pytest.mark.online
class TestTraktList(object):
"""
Credentials for test account are:
username: flexget_list_test
password: flexget
"""
config = """
'tasks': {}
"""
trakt_config = {'account': 'flexget_list_test',
'list': 'watchlist',
'type': 'shows'}
def get_auth(self):
kwargs = {
'account': 'flexget_list_test',
'access_token': '895b5417b640ed0d40b5fbda56a3ad2158fc36f313c825f638e61b65586df32d',
'refresh_token': '3708a9f1057c10c0a9b59f544e6c758000d02b7d1cc66b9404e8d70210d15cba',
'created': 1458488839.44,
'expires': 7776000
}
# Creates the trakt token in db
with Session() as session:
auth = TraktUserAuth(**kwargs)
session.add(auth)
session.commit()
def test_trakt_add(self):
self.get_auth()
trakt_set = TraktSet(self.trakt_config)
# Initialize trakt set
trakt_set.clear()
entry = Entry(title='White collar', series_name='White collar')
assert entry not in trakt_set
trakt_set.add(entry)
assert entry in trakt_set
| mit | Python |
cc894fea8e357f4bfdb3b34cf249f9f0ca817aff | make sample number a required argument in pair-test.py | perlinm/qcdg-nv-simulation,perlinm/qcdg-nv-simulation,perlinm/qcdg-nv-simulation | pair-test.py | pair-test.py | #!/usr/bin/python3
import sys, os
import subprocess as sp
import numpy as np
import matplotlib.pyplot as plt
if len(sys.argv) not in [3,4]:
print("usage: {} cutoff_start cutoff_end samples".format(sys.argv[0]))
exit(1)
start = int(sys.argv[1])
end = int(sys.argv[2])
samples = int(sys.argv[3])
if not start < end:
print("cutoff start must be less than end")
exit(2)
fname = "./data/pair-test-{}-{}-{}.txt".format(start,end,samples)
if not os.path.exists(fname):
cutoffs = range(start,end+1)
predicted = np.zeros(len(cutoffs))
actual = np.zeros(len(cutoffs))
for i in range(len(cutoffs)):
print("starting cutoff: {} kHz".format(cutoffs[i]))
predicted[i] = sp.check_output(["./pair-probability.py",str(cutoffs[i])])
actual[i] = sp.check_output(["./pair-search.py",str(cutoffs[i]),str(samples)])
with open(fname,'w') as f:
f.write("# samples: {}\n".format(samples))
f.write("# hyperfine_cutoff predicted actual\n")
for i in range(len(cutoffs)):
f.write("{} {} {}\n".format(cutoffs[i],predicted[i],actual[i]))
else:
cutoffs, predicted, actual = np.loadtxt(fname, unpack=True)
plt.title("Larmor pair probability test with {} samples".format(samples))
plt.plot(cutoffs,predicted,"k-",label="predicted")
plt.plot(cutoffs,actual,"k.",label="found")
plt.xlabel("Hyperfine cutoff [kHz]")
plt.ylabel("Proportion")
plt.ylim(0,1)
plt.legend(loc="best")
plt.savefig(fname.replace(".txt",".pdf"))
| #!/usr/bin/python3
import sys, os
import subprocess as sp
import numpy as np
import matplotlib.pyplot as plt
if len(sys.argv) not in [3,4]:
print("usage: {} cutoff_start cutoff_end [samples]".format(sys.argv[0]))
exit(1)
start = int(sys.argv[1])
end = int(sys.argv[2])
try:
samples = sys.argv[3]
except:
samples = "100"
if not start < end:
print("cutoff start must be less than end")
exit(2)
fname = "./data/pair-test-{}-{}-{}.txt".format(start,end,samples)
if not os.path.exists(fname):
cutoffs = range(start,end+1)
predicted = np.zeros(len(cutoffs))
actual = np.zeros(len(cutoffs))
for i in range(len(cutoffs)):
print("starting cutoff: {} kHz".format(cutoffs[i]))
predicted[i] = sp.check_output(["./pair-probability.py",str(cutoffs[i])])
actual[i] = sp.check_output(["./pair-search.py",str(cutoffs[i]),samples])
with open(fname,'w') as f:
f.write("# samples: {}\n".format(samples))
f.write("# hyperfine_cutoff predicted actual\n")
for i in range(len(cutoffs)):
f.write("{} {} {}\n".format(cutoffs[i],predicted[i],actual[i]))
else:
cutoffs, predicted, actual = np.loadtxt(fname, unpack=True)
plt.title("Larmor pair probability test with {} samples".format(samples))
plt.plot(cutoffs,predicted,"k-",label="predicted")
plt.plot(cutoffs,actual,"k.",label="found")
plt.xlabel("Hyperfine cutoff [kHz]")
plt.ylabel("Proportion")
plt.ylim(0,1)
plt.legend(loc="best")
plt.savefig(fname.replace(".txt",".pdf"))
| mit | Python |
595c6a01601422fd8cd25a8818be6c02a564acfd | Fix out-of-date names in oauth_utils | Duke-GCB/DukeDSHandoverService,Duke-GCB/DukeDSHandoverService,Duke-GCB/DukeDSHandoverService | d4s2_auth/oauth_utils.py | d4s2_auth/oauth_utils.py | import requests
from requests_oauthlib import OAuth2Session
from models import OAuthService
def make_oauth(oauth_service):
return OAuth2Session(oauth_service.client_id,
redirect_uri=oauth_service.redirect_uri,
scope=oauth_service.scope.split())
def authorization_url(oauth_service):
oauth = make_oauth(oauth_service)
return oauth.authorization_url(oauth_service.authorization_uri) # url, state
def get_token_dict(oauth_service, authorization_response):
"""
:param oauth_service: An OAuthService model object
:param authorization_response: the auth response redirect URI
:return: A token dictionary, containing access_token and refresh_token
"""
oauth = make_oauth(oauth_service)
# Use code or authorization_response
token = oauth.fetch_token(oauth_service.token_uri,
authorization_response=authorization_response,
client_secret=oauth_service.client_secret)
return token
def get_resource(oauth_service, token_dict):
"""
:param oauth_service: An OAuthService model object
:param token_dict: a dict containing the access_token
:return:
"""
# Only post the access token_dict
post_data = dict((k, token_dict[k]) for k in ('access_token',))
response = requests.post(oauth_service.resource_uri, post_data)
response.raise_for_status()
return response.json()
def main():
duke_service = OAuthService.objects.first()
auth_url, state = authorization_url(duke_service)
print 'Please go to {} and authorize access'.format(auth_url)
authorization_response = raw_input('Enter the full callback URL: ')
# Probably need the state?
token = get_token_dict(duke_service, authorization_response)
print 'Token: {}'.format(token)
resource = get_resource(duke_service, token)
print resource
if __name__ == '__main__':
main()
| import requests
from requests_oauthlib import OAuth2Session
from models import OAuthService
def make_oauth(oauth_service):
return OAuth2Session(oauth_service.client_id,
redirect_uri=oauth_service.redirect_uri,
scope=oauth_service.scope.split())
def authorization_url(oauth_service):
oauth = make_oauth(oauth_service)
return oauth.authorization_url(oauth_service.authorization_uri) # url, state
def get_token_dict(oauth_service, authorization_response):
"""
:param oauth_service: An OAuthService model object
:param authorization_response: the auth response redirect URI
:return: A token dictionary, containing access_token and refresh_token
"""
oauth = make_oauth(oauth_service)
# Use code or authorization_response
token = oauth.fetch_token(oauth_service.token_uri,
authorization_response=authorization_response,
client_secret=oauth_service.client_secret)
return token
def get_resource(oauth_service, token_dict):
"""
:param oauth_service: An OAuthService model object
:param token_dict: a dict containing the access_token
:return:
"""
# Only post the access token_dict
post_data = dict((k, token_dict[k]) for k in ('access_token',))
response = requests.post(oauth_service.resource_uri, post_data)
response.raise_for_status()
return response.json()
def main():
duke_service = OAuthService.objects.first()
state = OAuthState.generate()
auth_url, state = authorization_url(duke_service)
print 'Please go to {} and authorize access'.format(auth_url)
authorization_response = raw_input('Enter the full callback URL: ')
# Probably need the state?
token = get_token(duke_service, authorization_response)
print 'Token: {}'.format(token)
resource = get_resource(duke_service, token)
print resource
if __name__ == '__main__':
main()
| mit | Python |
e91a8ebe5858d6ce039f64fb28cb964ad72faa5c | add a manange command to serve with prod config | rsalmond/seabus,rsalmond/seabus,rsalmond/seabus,rsalmond/seabus,rsalmond/seabus | manage.py | manage.py | #!/usr/bin/env python
import flask_migrate
import flask_script
from seabus.web.socketio import socketio
from seabus.common.database import db
from seabus.web.web import create_app
from seabus.nmea_listen.listener import listen
app = create_app('Prod')
manager = flask_script.Manager(app)
flask_migrate.Migrate(app, db)
manager.add_command('db', flask_migrate.MigrateCommand)
@manager.command
def rundev(debug=True, use_reloader=True):
socketio.run(
app,
host='0.0.0.0',
debug=debug,
use_reloader=use_reloader,
)
@manager.command
def serveprod():
socketio.run(app)
@manager.command
def listener():
listen(app.config)
if __name__ == '__main__':
manager.run()
| #!/usr/bin/env python
import flask_migrate
import flask_script
from seabus.web.socketio import socketio
from seabus.common.database import db
from seabus.web.web import create_app
from seabus.nmea_listen.listener import listen
app = create_app('Dev')
manager = flask_script.Manager(app)
flask_migrate.Migrate(app, db)
manager.add_command('db', flask_migrate.MigrateCommand)
@manager.command
def rundev(debug=True, use_reloader=True):
socketio.run(
app,
host='0.0.0.0',
debug=debug,
use_reloader=use_reloader,
)
@manager.command
def listener():
listen(app.config)
if __name__ == '__main__':
manager.run()
| mit | Python |
3955aa821415f1e7630b7f1511bcd3609d26334d | fix division name | opencivicdata/scrapers-ca,opencivicdata/scrapers-ca | ca_mb_winnipeg/people.py | ca_mb_winnipeg/people.py | from __future__ import unicode_literals
from utils import CanadianScraper, CanadianPerson as Person
import re
from six.moves.urllib.parse import urljoin
COUNCIL_PAGE = 'http://winnipeg.ca/council/'
class WinnipegPersonScraper(CanadianScraper):
def scrape(self):
page = self.lxmlize(COUNCIL_PAGE, 'utf-8')
nodes = page.xpath('//td[@width="105"]')
for node in nodes:
url = urljoin(COUNCIL_PAGE, node.xpath('.//a/@href')[0])
ward = re.search('([A-Z].+) Ward', node.xpath('.//a//text()')[0]).group(1)
ward = ward.replace(' – ', '—').replace(' - ', '—') # n-dash, m-dash, hyphen, m-dash
ward = ward.replace('St. Norbert', 'St Norbert') # to match ocd-division-ids
name = ' '.join(node.xpath('.//span[@class="k80B"][1]/text()'))
yield self.councillor_data(url, name, ward)
mayor_node = page.xpath('//td[@width="315"]')[0]
mayor_name = mayor_node.xpath('./a//text()')[0][len('Mayor '):]
mayor_photo_url = mayor_node.xpath('./img/@src')[0]
m = Person(primary_org='legislature', name=mayor_name, district='Winnipeg', role='Mayor')
m.add_source(COUNCIL_PAGE)
# @see http://www.winnipeg.ca/interhom/mayor/MayorForm.asp?Recipient=CLK-MayorWebMail
m.add_contact('email', 'CLK-MayorWebMail@winnipeg.ca') # hardcoded
m.image = mayor_photo_url
yield m
def councillor_data(self, url, name, ward):
page = self.lxmlize(url)
# email is, sadly, a form
photo_url = urljoin(url, page.xpath('//img[@class="bio_pic"]/@src')[0])
phone = page.xpath('//td[contains(., "Phone")]/following-sibling::td//text()')[0]
email = re.search('=([^&]+)', page.xpath('//tr[contains(., "Email")]//a/@href')[0]).group(1) + '@winnipeg.ca'
p = Person(primary_org='legislature', name=name, district=ward, role='Councillor')
p.add_source(COUNCIL_PAGE)
p.add_source(url)
p.add_contact('email', email)
p.add_contact('voice', phone, 'legislature')
p.image = photo_url
return p
| from __future__ import unicode_literals
from utils import CanadianScraper, CanadianPerson as Person
import re
from six.moves.urllib.parse import urljoin
COUNCIL_PAGE = 'http://winnipeg.ca/council/'
class WinnipegPersonScraper(CanadianScraper):
def scrape(self):
page = self.lxmlize(COUNCIL_PAGE, 'utf-8')
nodes = page.xpath('//td[@width="105"]')
for node in nodes:
url = urljoin(COUNCIL_PAGE, node.xpath('.//a/@href')[0])
ward = re.search('([A-Z].+) Ward', node.xpath('.//a//text()')[0]).group(1)
# South Winnipeg – St. Norbert
ward = ward.replace('South Winnipeg – ', '').replace(' - ', '—') # m-dash
name = ' '.join(node.xpath('.//span[@class="k80B"][1]/text()'))
yield self.councillor_data(url, name, ward)
mayor_node = page.xpath('//td[@width="315"]')[0]
mayor_name = mayor_node.xpath('./a//text()')[0][len('Mayor '):]
mayor_photo_url = mayor_node.xpath('./img/@src')[0]
m = Person(primary_org='legislature', name=mayor_name, district='Winnipeg', role='Mayor')
m.add_source(COUNCIL_PAGE)
# @see http://www.winnipeg.ca/interhom/mayor/MayorForm.asp?Recipient=CLK-MayorWebMail
m.add_contact('email', 'CLK-MayorWebMail@winnipeg.ca') # hardcoded
m.image = mayor_photo_url
yield m
def councillor_data(self, url, name, ward):
page = self.lxmlize(url)
# email is, sadly, a form
photo_url = urljoin(url, page.xpath('//img[@class="bio_pic"]/@src')[0])
phone = page.xpath('//td[contains(., "Phone")]/following-sibling::td//text()')[0]
email = re.search('=([^&]+)', page.xpath('//tr[contains(., "Email")]//a/@href')[0]).group(1) + '@winnipeg.ca'
p = Person(primary_org='legislature', name=name, district=ward, role='Councillor')
p.add_source(COUNCIL_PAGE)
p.add_source(url)
p.add_contact('email', email)
p.add_contact('voice', phone, 'legislature')
p.image = photo_url
return p
| mit | Python |
3017a23893a21a7783480c69cecfcabf70c1f446 | Fix tests | orchardmile/mongo-connector,orchardmile/mongo-connector | tests/test_algolia.py | tests/test_algolia.py | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for mongo-connector + Algolia."""
"""Integration tests for mongo-connector + Elasticsearch."""
import base64
import os
import sys
import time
from algoliasearch import algoliasearch
from gridfs import GridFS
sys.path[0:0] = [""]
from tests import elastic_pair
from tests.setup_cluster import ReplicaSet
from mongo_connector.doc_managers.algolia_doc_manager import DocManager
from mongo_connector.connector import Connector
from mongo_connector.util import retry_until_ok
from tests.util import assert_soon
from tests import unittest
class AlgoliaTestCase(unittest.TestCase):
"""Base class for all Algolia TestCases."""
@classmethod
def setUpClass(cls):
cls.algolia_client = algoliasearch.Client(os.environ['ALGOLIA_APPLICATION_ID'], os.environ['ALGOLIA_API_KEY'])
cls.algolia_doc = DocManager('%s:%s:%s' % (os.environ['ALGOLIA_APPLICATION_ID'], os.environ['ALGOLIA_API_KEY'], 'test_mongo_connector'), algolia_auto_commit_interval=0, algolia_commit_sync=True)
def setUp(self):
self.algolia_index = self.algolia_client.initIndex('test_mongo_connector')
self.algolia_index.clearIndex()
res = self.algolia_index.setSettings({ 'hitsPerPage': 20 }) # work-around empty settings
self.algolia_index.waitTask(res['taskID'])
def tearDown(self):
self.algolia_client.deleteIndex('test_mongo_connector')
if __name__ == '__main__':
unittest.main()
| # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for mongo-connector + Algolia."""
"""Integration tests for mongo-connector + Elasticsearch."""
import base64
import os
import sys
import time
from algoliasearch import algoliasearch
from gridfs import GridFS
sys.path[0:0] = [""]
from tests import elastic_pair
from tests.setup_cluster import ReplicaSet
from mongo_connector.doc_managers.algolia_doc_manager import DocManager
from mongo_connector.connector import Connector
from mongo_connector.util import retry_until_ok
from tests.util import assert_soon
from tests import unittest
class AlgoliaTestCase(unittest.TestCase):
"""Base class for all Algolia TestCases."""
@classmethod
def setUpClass(cls):
cls.algolia_client = algoliasearch.Client(os.environ['ALGOLIA_APPLICATION_ID'], os.environ['ALGOLIA_API_KEY'])
cls.algolia_doc = DocManager('%s:%s:%s' % (os.environ['ALGOLIA_APPLICATION_ID'], os.environ['ALGOLIA_API_KEY'], 'test_mongo_connector'), auto_commit_interval=0, commit_sync=True)
def setUp(self):
self.algolia_index = self.algolia_client.initIndex('test_mongo_connector')
self.algolia_index.clearIndex()
res = self.algolia_index.setSettings({ 'hitsPerPage': 20 }) # work-around empty settings
self.algolia_index.waitTask(res['taskID'])
def tearDown(self):
self.algolia_client.deleteIndex('test_mongo_connector')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
4ebf63772505d562f6b95c6a2a72b8b34fa9686c | Update aligner tests | MontrealCorpusTools/Montreal-Forced-Aligner | tests/test_aligner.py | tests/test_aligner.py | import os
import pytest
from aligner.aligner import TrainableAligner
def test_sick_mono(sick_dict, sick_corpus, generated_dir):
a = TrainableAligner(sick_corpus, sick_dict, os.path.join(generated_dir, 'sick_output'),
temp_directory=os.path.join(generated_dir, 'sickcorpus'), skip_input=True)
a.train_mono()
def test_sick_tri(sick_dict, sick_corpus, generated_dir):
a = TrainableAligner(sick_corpus, sick_dict, os.path.join(generated_dir, 'sick_output'),
temp_directory=os.path.join(generated_dir, 'sickcorpus'), skip_input=True)
a.train_tri()
def test_sick_tri_fmllr(sick_dict, sick_corpus, generated_dir):
a = TrainableAligner(sick_corpus, sick_dict, os.path.join(generated_dir, 'sick_output'),
temp_directory=os.path.join(generated_dir, 'sickcorpus'), skip_input=True)
a.train_tri_fmllr()
a.export_textgrids()
| import os
import pytest
from aligner.aligner import TrainableAligner
def test_sick_mono(sick_dict, sick_corpus, generated_dir):
a = TrainableAligner(sick_corpus, sick_dict, os.path.join(generated_dir, 'sick_output'),
temp_directory=os.path.join(generated_dir, 'sickcorpus'))
a.train_mono()
def test_sick_tri(sick_dict, sick_corpus, generated_dir):
a = TrainableAligner(sick_corpus, sick_dict, os.path.join(generated_dir, 'sick_output'),
temp_directory=os.path.join(generated_dir, 'sickcorpus'))
a.train_tri()
def test_sick_tri_fmllr(sick_dict, sick_corpus, generated_dir):
a = TrainableAligner(sick_corpus, sick_dict, os.path.join(generated_dir, 'sick_output'),
temp_directory=os.path.join(generated_dir, 'sickcorpus'))
a.train_tri_fmllr()
a.export_textgrids()
| mit | Python |
e38ae10a684fd0e1161d28d458d2eabdbf597c1d | select query, get language from tablename | Vesihiisi/COH-tools | wlmhelpers.py | wlmhelpers.py | import pymysql
def selectQuery(query, connection):
cursor = connection.cursor()
cursor.execute(query)
result = cursor.fetchall()
return result
def getNumberOfRows(connection, tablename):
cursor = connection.cursor()
query = "SELECT COUNT(*) FROM `" + tablename + "`"
cursor.execute(query)
result = cursor.fetchone()
return result[0]
def showTables(connection):
cursor = connection.cursor()
cursor.execute("show tables")
return cursor.fetchall()
def tableIsEmpty(connection, tablename):
numberOfRows = getNumberOfRows(connection, tablename)
return numberOfRows == 0
def getNonEmptyCountryTables(connection):
countryTables = []
allTables = showTables(connection)
for table in allTables:
tablename = table[0]
if tablename.startswith("monuments_") and tablename != "monuments_all":
if tableIsEmpty(connection, tablename) == False:
countryTables.append(
(tablename, getNumberOfRows(connection, tablename)))
return countryTables
def getRowStats():
tables = getNonEmptyCountryTables(connection)
for tablename in tables:
rows = getNumberOfRows(connection, tablename)
print(tablename, rows)
def shortenTablename(tablename):
tablenameArr = tablename.split("_")[1:]
return "_".join(tablenameArr)
def saveToFile(filename, content):
with open(filename, "w") as out:
out.write(content)
print("Saved file: {}".format(filename))
def getLanguage(tablename):
return tablename.split('(', 1)[1].split(')')[0]
| import pymysql
def getNumberOfRows(connection, tablename):
cursor = connection.cursor()
query = "SELECT COUNT(*) FROM `" + tablename + "`"
cursor.execute(query)
result = cursor.fetchone()
return result[0]
def showTables(connection):
cursor = connection.cursor()
cursor.execute("show tables")
return cursor.fetchall()
def tableIsEmpty(connection, tablename):
numberOfRows = getNumberOfRows(connection, tablename)
return numberOfRows == 0
def getNonEmptyCountryTables(connection):
countryTables = []
allTables = showTables(connection)
for table in allTables:
tablename = table[0]
if tablename.startswith("monuments_") and tablename != "monuments_all":
if tableIsEmpty(connection, tablename) == False:
countryTables.append(
(tablename, getNumberOfRows(connection, tablename)))
return countryTables
def getRowStats():
tables = getNonEmptyCountryTables(connection)
for tablename in tables:
rows = getNumberOfRows(connection, tablename)
print(tablename, rows)
def shortenTablename(tablename):
tablenameArr = tablename.split("_")[1:]
return "_".join(tablenameArr)
def saveToFile(filename, content):
with open(filename, "w") as out:
out.write(content)
print("Saved file: {}".format(filename))
| mit | Python |
a3570205c90dd8757a833aed4f4069fbd33028e0 | Remove fixed owner and make loged in user instead | PNNutkung/Coursing-Field,PNNutkung/Coursing-Field,PNNutkung/Coursing-Field | course/views.py | course/views.py | from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from mainmodels.models import Category, Course, CourseInCategory
# Create your views here.
def createCourse(req):
if req.method == 'POST':
try:
courseName = req.POST['courseName']
courseCategory = req.POST['courseCategory']
courseDesc = req.POST['courseDesc']
courseThumbnail = req.FILES['courseThumbnail']
coursePrice = req.POST['coursePrice']
owner = req.user
newCourse = Course(courseName=courseName, courseDesc=courseDesc,courseThumbnail=courseThumbnail, owner=owner, coursePrice=coursePrice, isDelete=False)
newCourse.save()
category = Category.objects.get(categoryID=courseCategory)
newCourseCategory = CourseInCategory(category=category, course=newCourse)
newCourseCategory.save()
return render(req, 'course/createCourse.html', {'courseCategory':courseCategory, 'success': True, 'message': 'Create course successfully.'})
except:
return render(req, 'course/createCourse.html', {'courseCategory':courseCategory, 'success': False, 'message': 'Create course failed.'})
else:
courseCategory = Category.objects.all()
return render(req, 'course/createCourse.html', {'courseCategory':courseCategory})
| from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from mainmodels.models import Category, Course, CourseInCategory
from django.contrib.auth.models import User
# Create your views here.
def createCourse(req):
if req.method == 'POST':
try:
courseName = req.POST['courseName']
courseCategory = req.POST['courseCategory']
courseDesc = req.POST['courseDesc']
courseThumbnail = req.FILES['courseThumbnail']
coursePrice = req.POST['coursePrice']
owner = User.objects.get(username='nut')
newCourse = Course(courseName=courseName, courseDesc=courseDesc,courseThumbnail=courseThumbnail, owner=owner, coursePrice=coursePrice, isDelete=False)
newCourse.save()
category = Category.objects.get(categoryID=courseCategory)
newCourseCategory = CourseInCategory(category=category, course=newCourse)
newCourseCategory.save()
return render(req, 'course/createCourse.html', {'courseCategory':courseCategory, 'success': True, 'message': 'Create course successfully.'})
except:
return render(req, 'course/createCourse.html', {'courseCategory':courseCategory, 'success': False, 'message': 'Create course failed.'})
else:
courseCategory = Category.objects.all()
return render(req, 'course/createCourse.html', {'courseCategory':courseCategory})
| apache-2.0 | Python |
06f9598601a4701bf56b213764d645135ac5815e | Create Asciinema | coala/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,coala-analyzer/coala-bears,coala-analyzer/coala-bears,coala/coala-bears | bears/python/PythonPackageInitBear.py | bears/python/PythonPackageInitBear.py | import os
from coalib.results.Result import Result
from coalib.bears.GlobalBear import GlobalBear
class PythonPackageInitBear(GlobalBear):
LANGUAGES = {'Python', 'Python 3', 'Python 2'}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
ASCIINEMA_URL = 'https://asciinema.org/a/151310'
def run(self):
"""
Looks for missing __init__.py files in directories
containing python files.
"""
dirs = {os.path.split(filename)[0]
for filename in self.file_dict.keys()
if filename.endswith('.py')}
missing_inits = {directory for directory in dirs
if not os.path.join(directory, '__init__.py')
in self.file_dict}
for missing_init_dir in missing_inits:
yield Result(self,
'Directory "{}" does not contain __init__.py file'
.format(os.path.relpath(missing_init_dir,
self.get_config_dir())))
| import os
from coalib.results.Result import Result
from coalib.bears.GlobalBear import GlobalBear
class PythonPackageInitBear(GlobalBear):
LANGUAGES = {'Python', 'Python 3', 'Python 2'}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
def run(self):
"""
Looks for missing __init__.py files in directories
containing python files.
"""
dirs = {os.path.split(filename)[0]
for filename in self.file_dict.keys()
if filename.endswith('.py')}
missing_inits = {directory for directory in dirs
if not os.path.join(directory, '__init__.py')
in self.file_dict}
for missing_init_dir in missing_inits:
yield Result(self,
'Directory "{}" does not contain __init__.py file'
.format(os.path.relpath(missing_init_dir,
self.get_config_dir())))
| agpl-3.0 | Python |
ee5ef2ae9b146a6fb06fd9891502a8f66af06cce | remove hard coded path | ctSkennerton/BioSQL-Extensions | tests/test_biosqlx.py | tests/test_biosqlx.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `biosqlx` package."""
import os
import unittest
from io import StringIO
from click.testing import CliRunner
from biosqlx import biosqlx
from biosqlx import cli
class TestExportSequence(unittest.TestCase):
"""Tests for `biosqlx` package."""
def setUp(self):
"""Set up test fixtures, if any."""
sqlite3_db_file = os.path.join(os.path.dirname(__file__), 'test.db')
self.database_connection_params = ['-d', sqlite3_db_file, '-r', 'sqlite3']
self.common_params = ['export', 'sequence']
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_taxonomy_feat_prot(self):
"""Export from taxonomy as protein features."""
runner = CliRunner()
result = runner.invoke(cli.main, self.database_connection_params + self.common_params + ['--taxonomy', 'Geobacter', '-o', 'feat-prot'])
self.assertEqual(result.exit_code, 0)
self.assertEqual('>', result.output[0])
def test_taxonomy_fasta(self):
"""Export from taxonomy as fasta."""
runner = CliRunner()
result = runner.invoke(cli.main, self.database_connection_params + self.common_params + ['--taxonomy', 'Geobacter'])
self.assertEqual(result.exit_code, 0)
self.assertEqual('>', result.output[0])
def test_export_sequence_feat_nucl(self):
"""Export from taxonomy as nucleotide features."""
runner = CliRunner()
result = runner.invoke(cli.main, self.database_connection_params + self.common_params + ['--taxonomy', 'Geobacter', '-o', 'feat-nucl'])
self.assertEqual(result.exit_code, 0)
self.assertEqual('>', result.output[0])
def test_export_sequence_genbank(self):
"""Export from taxonomy as genbank."""
runner = CliRunner()
result = runner.invoke(cli.main, self.database_connection_params + self.common_params + ['--taxonomy', 'Geobacter', '-o', 'gb'])
self.assertEqual(result.exit_code, 0)
self.assertEqual('L', result.output[0])
def test_export_sequence_csv(self):
"""Export from taxonomy as csv."""
runner = CliRunner()
result = runner.invoke(cli.main, self.database_connection_params + self.common_params + ['--taxonomy', 'Geobacter', '-o', 'csv'])
self.assertEqual(result.exit_code, 0)
f = StringIO(result.output)
line = next(f)
f.close()
self.assertEqual(True, ',' in line)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `biosqlx` package."""
import unittest
from io import StringIO
from click.testing import CliRunner
from biosqlx import biosqlx
from biosqlx import cli
class TestExportSequence(unittest.TestCase):
"""Tests for `biosqlx` package."""
def setUp(self):
"""Set up test fixtures, if any."""
self.database_connection_params = ['-d', '/home/cts/local/BioSQL-Extensions/tests/test.db', '-r', 'sqlite3']
self.common_params = ['export', 'sequence']
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_taxonomy_feat_prot(self):
"""Export from taxonomy as protein features."""
runner = CliRunner()
result = runner.invoke(cli.main, self.database_connection_params + self.common_params + ['--taxonomy', 'Geobacter', '-o', 'feat-prot'])
self.assertEqual(result.exit_code, 0)
self.assertEqual('>', result.output[0])
def test_taxonomy_fasta(self):
"""Export from taxonomy as fasta."""
runner = CliRunner()
result = runner.invoke(cli.main, self.database_connection_params + self.common_params + ['--taxonomy', 'Geobacter'])
self.assertEqual(result.exit_code, 0)
self.assertEqual('>', result.output[0])
def test_export_sequence_feat_nucl(self):
"""Export from taxonomy as nucleotide features."""
runner = CliRunner()
result = runner.invoke(cli.main, self.database_connection_params + self.common_params + ['--taxonomy', 'Geobacter', '-o', 'feat-nucl'])
self.assertEqual(result.exit_code, 0)
self.assertEqual('>', result.output[0])
def test_export_sequence_genbank(self):
"""Export from taxonomy as genbank."""
runner = CliRunner()
result = runner.invoke(cli.main, self.database_connection_params + self.common_params + ['--taxonomy', 'Geobacter', '-o', 'gb'])
self.assertEqual(result.exit_code, 0)
self.assertEqual('L', result.output[0])
def test_export_sequence_csv(self):
"""Export from taxonomy as csv."""
runner = CliRunner()
result = runner.invoke(cli.main, self.database_connection_params + self.common_params + ['--taxonomy', 'Geobacter', '-o', 'csv'])
self.assertEqual(result.exit_code, 0)
f = StringIO(result.output)
line = next(f)
f.close()
self.assertEqual(True, ',' in line)
| mit | Python |
00e42da665ac25e9d793f331adf4ce58c5bd67b9 | Remove not existing import | yaph/logya,yaph/logya,elaOnMars/logya,elaOnMars/logya,elaOnMars/logya | tests/test_content.py | tests/test_content.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from logya.content import read
def test_read_markdown():
doc = read('tests/fixtures/site/content/markdown.md')
assert isinstance(doc, dict)
assert '/test/markdown/' == doc['url']
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from logya.content import read, write
def test_read_markdown():
doc = read('tests/fixtures/site/content/markdown.md')
assert isinstance(doc, dict)
assert '/test/markdown/' == doc['url']
| mit | Python |
dd635d5ae86f39b8746de01a1320fe7b970df554 | mark test as "run last" using pytest-ordering | arne-cl/discoursegraphs | tests/test_corpora.py | tests/test_corpora.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann <discoursegraphs.programming@arne.cl>
from copy import deepcopy
import pkgutil
from tempfile import NamedTemporaryFile, mkdtemp
import networkx as nx
import pytest
import discoursegraphs as dg
from discoursegraphs.corpora import pcc
@pytest.mark.last # this should be the last test to run
@pytest.mark.slowtest
def test_pcc():
"""
create document graphs for all PCC documents containing all annotation
layers and test them for cyclicity.
"""
assert len(pcc.document_ids) == 176
for doc_id in pcc.document_ids:
docgraph = pcc[doc_id]
assert isinstance(docgraph, dg.DiscourseDocumentGraph)
# We can't guarantee that all graphs are acyclic, because of secedges
# in TigerSentenceGraphs, but there must be no self loops.
if nx.is_directed_acyclic_graph(docgraph):
for src, target in docgraph.edges_iter():
assert src != target
# cyclic graphs must become acyclic once we remove the secedges
bad_graph = deepcopy(docgraph)
secedges = dg.select_edges_by(bad_graph, 'tiger:secedge')
bad_graph.remove_edges_from(secedges)
assert nx.is_directed_acyclic_graph(docgraph)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann <discoursegraphs.programming@arne.cl>
from copy import deepcopy
import pkgutil
from tempfile import NamedTemporaryFile, mkdtemp
import networkx as nx
import pytest
import discoursegraphs as dg
from discoursegraphs.corpora import pcc
@pytest.mark.slowtest
def test_pcc():
"""
create document graphs for all PCC documents containing all annotation
layers and test them for cyclicity.
"""
assert len(pcc.document_ids) == 176
for doc_id in pcc.document_ids:
docgraph = pcc[doc_id]
assert isinstance(docgraph, dg.DiscourseDocumentGraph)
# We can't guarantee that all graphs are acyclic, because of secedges
# in TigerSentenceGraphs, but there must be no self loops.
if nx.is_directed_acyclic_graph(docgraph):
for src, target in docgraph.edges_iter():
assert src != target
# cyclic graphs must become acyclic once we remove the secedges
bad_graph = deepcopy(docgraph)
secedges = dg.select_edges_by(bad_graph, 'tiger:secedge')
bad_graph.remove_edges_from(secedges)
assert nx.is_directed_acyclic_graph(docgraph)
| bsd-3-clause | Python |
86847172f732f2b27a82336f5da1f29592bf6cd6 | Put some parentheses there | wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api | monitoring/post_to_slack/src/snapshot_reports.py | monitoring/post_to_slack/src/snapshot_reports.py | # -*- encoding: utf-8
import datetime as dt
import boto3
def pprint_timedelta(seconds):
"""
Returns a pretty-printed summary of a duration as seconds.
e.g. "1h", "2d 3h", "1m 4s".
"""
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
if hours == 0:
return '%dd' % days
else:
return '%dd %dh' % (days, hours)
elif hours > 0:
if minutes == 0:
return '%dh' % hours
else:
return '%dh %dm' % (hours, minutes)
elif minutes > 0:
if seconds == 0:
return '%dm' % minutes
else:
return '%dm %ds' % (minutes, seconds)
else:
return '%ds' % seconds
def get_snapshot_report():
"""
Try to return a string that describes the latest snapshots.
"""
lines = []
now = dt.datetime.now()
s3 = boto3.client('s3')
for version in ['v1', 'v2']:
try:
# Yes, this makes a bunch of hard-coded assumptions about the
# way the bucket is laid out. It's a quick-win helper for
# bug diagnosis, not a prod API.
s3_object = s3.head_object(
Bucket='wellcomecollection-data-public',
Key=f'catalogue/{version}/works.json.gz'
)
last_modified_date = s3_object['LastModified'].replace(tzinfo=None)
seconds = (now - last_modified_date).seconds
lines.append(
f'{version}: {pprint_timedelta(seconds)} ago ({last_modified_date.isoformat()})'
)
except Exception:
pass
return '\n'.join(lines)
| # -*- encoding: utf-8
import datetime as dt
import boto3
def pprint_timedelta(seconds):
"""
Returns a pretty-printed summary of a duration as seconds.
e.g. "1h", "2d 3h", "1m 4s".
"""
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
if hours == 0:
return '%dd' % days
else:
return '%dd %dh' % (days, hours)
elif hours > 0:
if minutes == 0:
return '%dh' % hours
else:
return '%dh %dm' % (hours, minutes)
elif minutes > 0:
if seconds == 0:
return '%dm' % minutes
else:
return '%dm %ds' % (minutes, seconds)
else:
return '%ds' % seconds
def get_snapshot_report():
"""
Try to return a string that describes the latest snapshots.
"""
lines = []
now = dt.datetime.now()
s3 = boto3.client('s3')
for version in ['v1', 'v2']:
try:
# Yes, this makes a bunch of hard-coded assumptions about the
# way the bucket is laid out. It's a quick-win helper for
# bug diagnosis, not a prod API.
s3_object = s3.head_object(
Bucket='wellcomecollection-data-public',
Key=f'catalogue/{version}/works.json.gz'
)
last_modified_date = s3_object['LastModified'].replace(tzinfo=None)
seconds = (now - last_modified_date).seconds
lines.append(
f'{version}: {pprint_timedelta(seconds)} ago {last_modified_date.isoformat()}'
)
except Exception:
pass
return '\n'.join(lines)
| mit | Python |
6f7890c8b29670f613b6a551ebac2b383f3a7a64 | Test units mismatch in recipe | chrisgilmerproj/brewday,chrisgilmerproj/brewday | tests/test_recipes.py | tests/test_recipes.py | import unittest
from brew.constants import IMPERIAL_UNITS
from brew.constants import SI_UNITS
from brew.recipes import Recipe
from fixtures import grain_additions
from fixtures import hop_additions
from fixtures import recipe
from fixtures import yeast
class TestRecipe(unittest.TestCase):
def setUp(self):
# Define Grains
self.grain_additions = grain_additions
# Define Hops
self.hop_additions = hop_additions
# Define Yeast
self.yeast = yeast
# Define Recipes
self.recipe = recipe
def test_str(self):
out = str(self.recipe)
self.assertEquals(out, 'pale ale')
def test_set_units(self):
self.assertEquals(self.recipe.units, IMPERIAL_UNITS)
self.recipe.set_units(SI_UNITS)
self.assertEquals(self.recipe.units, SI_UNITS)
def test_set_raises(self):
with self.assertRaises(Exception):
self.recipe.set_units('bad')
def test_grains_units_mismatch_raises(self):
grain_additions = [g.change_units() for g in self.grain_additions]
with self.assertRaises(Exception):
Recipe(name='pale ale',
grain_additions=grain_additions,
hop_additions=self.hop_additions,
yeast=self.yeast)
def test_hops_units_mismatch_raises(self):
hop_additions = [h.change_units() for h in self.hop_additions]
with self.assertRaises(Exception):
Recipe(name='pale ale',
grain_additions=self.grain_additions,
hop_additions=hop_additions,
yeast=self.yeast)
def test_validate(self):
data = self.recipe.to_dict()
Recipe.validate(data)
| import unittest
from brew.constants import IMPERIAL_UNITS
from brew.constants import SI_UNITS
from brew.recipes import Recipe
from fixtures import grain_additions
from fixtures import hop_additions
from fixtures import recipe
class TestRecipe(unittest.TestCase):
def setUp(self):
# Define Grains
self.grain_additions = grain_additions
# Define Hops
self.hop_additions = hop_additions
# Define Recipes
self.recipe = recipe
def test_str(self):
out = str(self.recipe)
self.assertEquals(out, 'pale ale')
def test_set_units(self):
self.assertEquals(self.recipe.units, IMPERIAL_UNITS)
self.recipe.set_units(SI_UNITS)
self.assertEquals(self.recipe.units, SI_UNITS)
def test_set_raises(self):
with self.assertRaises(Exception):
self.recipe.set_units('bad')
def test_validate(self):
data = self.recipe.to_dict()
Recipe.validate(data)
| mit | Python |
0fc99faf79a3e12c6dc23586af9f5d7734d3c3c0 | Rewrite routing tests | nickfrostatx/malt | tests/test_routing.py | tests/test_routing.py | # -*- coding: utf-8 -*-
"""Test that the package exists and has specified metadata."""
from malt.routing import Router
import pytest
def test_get_and_post():
router = Router()
def get_root():
pass
def post_root():
pass
def get_home():
pass
router.add_rule('GET', '/', get_root)
router.add_rule('POST', '/', post_root)
router.add_rule('GET', '/home', get_home)
router.add_rule('GET', '/homie', get_home)
assert router.path_map == {
'/': {
'GET': get_root,
'POST': post_root,
},
'/home': {
'GET': get_home,
},
'/homie': {
'GET': get_home,
}
}
assert router.view_map == {
get_root: ('GET', '/'),
post_root: ('POST', '/'),
get_home: ('GET', '/home'),
}
assert router.get_view('GET', '/') == get_root
assert router.get_view('POST', '/') == post_root
assert router.get_view('GET', '/home') == get_home
assert router.get_view('GET', '/homie') == get_home
assert router.path_for(get_root) == '/'
assert router.path_for(post_root) == '/'
assert router.path_for(get_home) == '/home'
def test_duplicate_route():
router = Router()
def get_root():
pass
def get_root_again():
pass
router.add_rule('GET', '/', get_root)
for view in (get_root, get_root_again):
with pytest.raises(Exception) as exc_info:
router.add_rule('GET', '/', view)
assert 'Duplicate route: GET /' in str(exc_info)
def test_bad_rules():
router = Router()
def get_root():
pass
router.add_rule('GET', '/', get_root)
assert router.get_view('GET', '/') == get_root
for method in ('GET', 'POST', 'PUT', 'DELETE', 'fake method'):
with pytest.raises(LookupError) as exc_info:
router.get_view(method, '/something')
assert str(exc_info.value) == 'No such path'
for method in ('POST', 'PUT', 'DELETE', 'fake method'):
with pytest.raises(LookupError) as exc_info:
router.get_view(method, '/')
assert str(exc_info.value) == 'No such method'
def test_missing_view():
router = Router()
def get_root():
pass
def unregistered():
pass
router.add_rule('GET', '/', get_root)
assert router.path_for(get_root) == '/'
with pytest.raises(KeyError):
router.path_for(unregistered)
| # -*- coding: utf-8 -*-
"""Test that the package exists and has specified metadata."""
from malt import Malt
import pytest
@pytest.fixture
def app():
app = Malt()
@app.get('/')
def root(request):
return Response('Hello World!\n')
return app
def test_base_routing():
app = Malt()
@app.get('/')
def root(request):
return Response('Hello World!\n')
@app.post('/')
def post_home(request):
return Response()
assert app.router.path_map == {'/': {'GET': root, 'POST': post_home}}
def test_duplicate_route(app):
def view(request):
return Response('')
with pytest.raises(Exception) as exc_info:
app.get('/')(view)
assert 'Duplicate route: GET /' in str(exc_info)
decorator = app.post('/')
decorator(view)
with pytest.raises(Exception) as exc_info:
decorator(view)
assert 'Duplicate route: POST /' in str(exc_info)
def test_url_for(app):
@app.get('/hello')
def hello(request):
return Response('Hi there\n')
def unregistered(request):
return Response('')
assert app.url_for(hello) == '/hello'
assert app.url_for(hello, msg='a = b') == '/hello?msg=a+%3D+b'
with pytest.raises(Exception) as exc_info:
app.url_for(unregistered)
assert 'is not registered to a url' in str(exc_info)
| mit | Python |
59c8c407df2aec220677324b77f5910f25a5d062 | Make binary_accuracy's type-checking stricter | delta2323/chainer,anaruse/chainer,kashif/chainer,jnishi/chainer,keisuke-umezawa/chainer,kikusu/chainer,wkentaro/chainer,chainer/chainer,benob/chainer,chainer/chainer,niboshi/chainer,ktnyt/chainer,okuta/chainer,rezoo/chainer,aonotas/chainer,wkentaro/chainer,hvy/chainer,jnishi/chainer,tkerola/chainer,hvy/chainer,cupy/cupy,wkentaro/chainer,cupy/cupy,ysekky/chainer,chainer/chainer,AlpacaDB/chainer,ktnyt/chainer,benob/chainer,cemoody/chainer,hvy/chainer,okuta/chainer,niboshi/chainer,wkentaro/chainer,hvy/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,AlpacaDB/chainer,kikusu/chainer,ktnyt/chainer,jnishi/chainer,cupy/cupy,kiyukuta/chainer,okuta/chainer,chainer/chainer,jnishi/chainer,pfnet/chainer,sinhrks/chainer,sinhrks/chainer,cupy/cupy,niboshi/chainer,ronekko/chainer,ktnyt/chainer,okuta/chainer,niboshi/chainer | chainer/functions/evaluation/binary_accuracy.py | chainer/functions/evaluation/binary_accuracy.py | import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class BinaryAccuracy(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
t_type.dtype == numpy.int32,
t_type.shape == x_type.shape,
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
y, t = inputs
# flatten
y = y.reshape(-1)
t = t.reshape(-1)
c = (y >= 0)
return xp.asarray((c == t).mean(dtype='f')),
def binary_accuracy(y, t):
"""Computes binary classification accuracy of the minibatch.
Args:
y (Variable): Variable holding a matrix whose i-th element
indicates the score of positive at the i-th example.
t (Variable): Variable holding an int32 vector of groundtruth
labels (0 or 1).
Returns:
Variable: A variable holding a scalar array of the accuracy.
.. note:: This function is non-differentiable.
"""
return BinaryAccuracy()(y, t)
| import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class BinaryAccuracy(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
t_type.dtype == numpy.int32,
t_type.shape[0] == x_type.shape[0],
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
y, t = inputs
# flatten
y = y.reshape(len(y), -1)
t = t.reshape(len(t), -1)
c = (y >= 0)
return xp.asarray((c == t).mean(dtype='f')),
def binary_accuracy(y, t):
"""Computes binary classification accuracy of the minibatch.
Args:
y (Variable): Variable holding a matrix whose i-th element
indicates the score of positive at the i-th example.
t (Variable): Variable holding an int32 vector of groundtruth
labels (0 or 1).
Returns:
Variable: A variable holding a scalar array of the accuracy.
.. note:: This function is non-differentiable.
"""
return BinaryAccuracy()(y, t)
| mit | Python |
28ae68d405514364855fbdece5b22f636e802d12 | Update versions | conan-io/conan-package-tools | cpt/__init__.py | cpt/__init__.py |
__version__ = '0.39.0-dev'
def get_client_version():
from conans.model.version import Version
from conans import __version__ as client_version
from os import getenv
# It is a mess comparing dev versions, lets assume that the -dev is the further release
return Version(client_version.replace("-dev", ""))
|
__version__ = '0.37.0'
def get_client_version():
from conans.model.version import Version
from conans import __version__ as client_version
from os import getenv
# It is a mess comparing dev versions, lets assume that the -dev is the further release
return Version(client_version.replace("-dev", ""))
| mit | Python |
d5aef03683f77400cd56160852c650de09d0a8bb | Fix deprecated unittest API usage. | scoder/cython,cython/cython,da-woods/cython,scoder/cython,da-woods/cython,cython/cython,da-woods/cython,cython/cython,scoder/cython,scoder/cython,cython/cython,da-woods/cython | Cython/Build/Tests/TestStripLiterals.py | Cython/Build/Tests/TestStripLiterals.py | from Cython.Build.Dependencies import strip_string_literals
from Cython.TestUtils import CythonTest
class TestStripLiterals(CythonTest):
def t(self, before, expected):
actual, literals = strip_string_literals(before, prefix="_L")
self.assertEqual(expected, actual)
for key, value in literals.items():
actual = actual.replace(key, value)
self.assertEqual(before, actual)
def test_empty(self):
self.t("", "")
def test_single_quote(self):
self.t("'x'", "'_L1_'")
def test_double_quote(self):
self.t('"x"', '"_L1_"')
def test_nested_quotes(self):
self.t(""" '"' "'" """, """ '_L1_' "_L2_" """)
def test_triple_quote(self):
self.t(" '''a\n''' ", " '''_L1_''' ")
def test_backslash(self):
self.t(r"'a\'b'", "'_L1_'")
self.t(r"'a\\'", "'_L1_'")
self.t(r"'a\\\'b'", "'_L1_'")
def test_unicode(self):
self.t("u'abc'", "u'_L1_'")
def test_raw(self):
self.t(r"r'abc\\'", "r'_L1_'")
def test_raw_unicode(self):
self.t(r"ru'abc\\'", "ru'_L1_'")
def test_comment(self):
self.t("abc # foo", "abc #_L1_")
def test_comment_and_quote(self):
self.t("abc # 'x'", "abc #_L1_")
self.t("'abc#'", "'_L1_'")
def test_include(self):
self.t("include 'a.pxi' # something here",
"include '_L1_' #_L2_")
def test_extern(self):
self.t("cdef extern from 'a.h': # comment",
"cdef extern from '_L1_': #_L2_")
| from Cython.Build.Dependencies import strip_string_literals
from Cython.TestUtils import CythonTest
class TestStripLiterals(CythonTest):
def t(self, before, expected):
actual, literals = strip_string_literals(before, prefix="_L")
self.assertEquals(expected, actual)
for key, value in literals.items():
actual = actual.replace(key, value)
self.assertEquals(before, actual)
def test_empty(self):
self.t("", "")
def test_single_quote(self):
self.t("'x'", "'_L1_'")
def test_double_quote(self):
self.t('"x"', '"_L1_"')
def test_nested_quotes(self):
self.t(""" '"' "'" """, """ '_L1_' "_L2_" """)
def test_triple_quote(self):
self.t(" '''a\n''' ", " '''_L1_''' ")
def test_backslash(self):
self.t(r"'a\'b'", "'_L1_'")
self.t(r"'a\\'", "'_L1_'")
self.t(r"'a\\\'b'", "'_L1_'")
def test_unicode(self):
self.t("u'abc'", "u'_L1_'")
def test_raw(self):
self.t(r"r'abc\\'", "r'_L1_'")
def test_raw_unicode(self):
self.t(r"ru'abc\\'", "ru'_L1_'")
def test_comment(self):
self.t("abc # foo", "abc #_L1_")
def test_comment_and_quote(self):
self.t("abc # 'x'", "abc #_L1_")
self.t("'abc#'", "'_L1_'")
def test_include(self):
self.t("include 'a.pxi' # something here",
"include '_L1_' #_L2_")
def test_extern(self):
self.t("cdef extern from 'a.h': # comment",
"cdef extern from '_L1_': #_L2_")
| apache-2.0 | Python |
7e1ed594dadca06c256424f7a950a323138171f3 | Support Conan 1.23 | conan-io/conan-package-tools | cpt/__init__.py | cpt/__init__.py |
__version__ = '0.32.0-dev'
NEWEST_CONAN_SUPPORTED = "1.23.000"
def get_client_version():
from conans.model.version import Version
from conans import __version__ as client_version
# It is a mess comparing dev versions, lets assume that the -dev is the further release
return Version(client_version.replace("-dev", ""))
|
__version__ = '0.31.1'
NEWEST_CONAN_SUPPORTED = "1.22.200"
def get_client_version():
from conans.model.version import Version
from conans import __version__ as client_version
# It is a mess comparing dev versions, lets assume that the -dev is the further release
return Version(client_version.replace("-dev", ""))
| mit | Python |
90b602a1965418efef706c92bc4da8fc14e013d9 | Change cache policy to only include analytics api endpoints | uw-it-aca/canvas-analytics,uw-it-aca/canvas-analytics,uw-it-aca/canvas-analytics,uw-it-aca/canvas-analytics | data_aggregator/cache.py | data_aggregator/cache.py | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import re
from gcs_clients import RestclientGCSClient
FOREVER = 0
class DataAggregatorGCSCache(RestclientGCSClient):
def get_cache_expiration_time(self, service, url, status=None):
if "canvas" == service:
if re.match(r'^.*/api/v1/.*/analytics/.*', url):
return FOREVER
| # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from gcs_clients import RestclientGCSClient
FOREVER = 0
class DataAggregatorGCSCache(RestclientGCSClient):
def get_cache_expiration_time(self, service, url, status=None):
if "canvas" == service:
return FOREVER
| apache-2.0 | Python |
05ab8c591f487aec84b68832393cd2fdca68ef38 | Implement secure RBAC for auto_allocated_topology API | mahak/neutron,openstack/neutron,mahak/neutron,mahak/neutron,openstack/neutron,openstack/neutron | neutron/conf/policies/auto_allocated_topology.py | neutron/conf/policies/auto_allocated_topology.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from neutron.conf.policies import base
RESOURCE_PATH = '/auto-allocated-topology/{project_id}'
DEPRECATION_REASON = (
"The Auto allocated topology API now supports system scope "
"and default roles.")
rules = [
policy.DocumentedRuleDefault(
name='get_auto_allocated_topology',
check_str=base.SYSTEM_OR_PROJECT_READER,
description="Get a project's auto-allocated topology",
operations=[
{
'method': 'GET',
'path': RESOURCE_PATH,
},
],
scope_types=['system', 'project'],
deprecated_rule=policy.DeprecatedRule(
name='get_auto_allocated_topology',
check_str=base.RULE_ADMIN_OR_OWNER),
deprecated_reason=DEPRECATION_REASON,
deprecated_since='Wallaby'
),
policy.DocumentedRuleDefault(
name='delete_auto_allocated_topology',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
description="Delete a project's auto-allocated topology",
operations=[
{
'method': 'DELETE',
'path': RESOURCE_PATH,
},
],
scope_types=['system', 'project'],
deprecated_rule=policy.DeprecatedRule(
name='delete_auto_allocated_topology',
check_str=base.RULE_ADMIN_OR_OWNER),
deprecated_reason=DEPRECATION_REASON,
deprecated_since='Wallaby'
),
]
def list_rules():
return rules
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from neutron.conf.policies import base
RESOURCE_PATH = '/auto-allocated-topology/{project_id}'
rules = [
policy.DocumentedRuleDefault(
'get_auto_allocated_topology',
base.RULE_ADMIN_OR_OWNER,
"Get a project's auto-allocated topology",
[
{
'method': 'GET',
'path': RESOURCE_PATH,
},
]
),
policy.DocumentedRuleDefault(
'delete_auto_allocated_topology',
base.RULE_ADMIN_OR_OWNER,
"Delete a project's auto-allocated topology",
[
{
'method': 'DELETE',
'path': RESOURCE_PATH,
},
]
),
]
def list_rules():
return rules
| apache-2.0 | Python |
b5e4d0c42947260bcc6e2e033f6a6dd294edfa39 | Use snake_case for final PDF and folder name | Lodifice/mfnf-pdf-export,Lodifice/mfnf-pdf-export,Lodifice/mfnf-pdf-export | create_books.py | create_books.py | """Creates a PDF from an article of the project „Mathe für Nicht-Freaks“."""
import os
import shelve
import requests
from mfnf.api import HTTPMediaWikiAPI
from mfnf.parser import ArticleParser
from mfnf.utils import CachedFunction
from mfnf.sitemap import parse_sitemap
from mfnf.latex import LatexExporter, MediaWiki2Latex
from mfnf.utils import to_snake_case
# title of article which shall be converted to PDF
SITEMAP_ARTICLE_NAME = "Mathe für Nicht-Freaks: Projekte/LMU Buchprojekte"
def create_book(book, api):
"""Creates the LaTeX file of a book."""
book_name = to_snake_case(book["name"])
target = os.path.join("out", book_name, book_name + ".tex")
try:
os.makedirs(os.path.dirname(target))
except FileExistsError:
pass
book = MediaWiki2Latex()(book)
with open(target, "w") as latex_file:
LatexExporter(api, os.path.dirname(target))(book, latex_file)
def run_script():
"""Runs this script."""
with shelve.open(".cache.db", "c", writeback=True) as database:
cached_function = CachedFunction(database)
class CachedMediaWikiAPI(HTTPMediaWikiAPI):
"""A version of the API where the main function calls are
cached."""
@cached_function
def convert_text_to_html(self, title, text):
return super().convert_text_to_html(title, text)
@cached_function
def normalize_formula(self, formula, mode):
return super().normalize_formula(formula, mode)
@cached_function
def get_revisions(self, title):
# TODO: The list of revisions can change, thus this caching
# need to be implemented in a better way in the future.
return super().get_revisions(title)
api = CachedMediaWikiAPI(requests.Session())
parser = ArticleParser(api=api)
sitemap = parse_sitemap(api.get_content(SITEMAP_ARTICLE_NAME))
sitemap = parser(sitemap)
for book in sitemap["children"]:
create_book(book, api)
if __name__ == "__main__":
run_script()
| """Creates a PDF from an article of the project „Mathe für Nicht-Freaks“."""
import os
import shelve
import requests
from mfnf.api import HTTPMediaWikiAPI
from mfnf.parser import ArticleParser
from mfnf.utils import CachedFunction
from mfnf.sitemap import parse_sitemap
from mfnf.latex import LatexExporter, MediaWiki2Latex
# title of article which shall be converted to PDF
SITEMAP_ARTICLE_NAME = "Mathe für Nicht-Freaks: Projekte/LMU Buchprojekte"
def create_book(book, api):
"""Creates the LaTeX file of a book."""
target = os.path.join("out", book["name"], book["name"] + ".tex")
try:
os.makedirs(os.path.dirname(target))
except FileExistsError:
pass
book = MediaWiki2Latex()(book)
with open(target, "w") as latex_file:
LatexExporter(api, os.path.dirname(target))(book, latex_file)
def run_script():
"""Runs this script."""
with shelve.open(".cache.db", "c", writeback=True) as database:
cached_function = CachedFunction(database)
class CachedMediaWikiAPI(HTTPMediaWikiAPI):
"""A version of the API where the main function calls are
cached."""
@cached_function
def convert_text_to_html(self, title, text):
return super().convert_text_to_html(title, text)
@cached_function
def normalize_formula(self, formula, mode):
return super().normalize_formula(formula, mode)
@cached_function
def get_revisions(self, title):
# TODO: The list of revisions can change, thus this caching
# need to be implemented in a better way in the future.
return super().get_revisions(title)
api = CachedMediaWikiAPI(requests.Session())
parser = ArticleParser(api=api)
sitemap = parse_sitemap(api.get_content(SITEMAP_ARTICLE_NAME))
sitemap = parser(sitemap)
for book in sitemap["children"]:
create_book(book, api)
if __name__ == "__main__":
run_script()
| apache-2.0 | Python |
4b3e1a1fa2dadcccd65d974a27c72169a68c6a61 | Add subtraction and scalar multiplication tests for Terrain | jackromo/RandTerrainPy | tests/test_terrain.py | tests/test_terrain.py | import unittest
from randterrainpy import *
class TerrainTesterPy(unittest.TestCase):
def setUp(self):
self.ter1 = Terrain(1, 1)
self.ter2 = Terrain(2, 4)
self.ter3 = Terrain(1, 1)
def test_getitem(self):
self.assertEqual(self.ter1[0, 0], 0)
self.assertEqual(self.ter2[1, 2], 0)
def test_eq(self):
self.assertEqual(self.ter1, self.ter3)
self.assertNotEqual(self.ter1, self.ter2)
def test_setitem(self):
self.ter1[0, 0] = 0.9
self.assertEqual(self.ter1[0, 0], 0.9)
self.ter2[1, 2] = 0.5
self.assertEqual(self.ter2[1, 2], 0.5)
def test_add(self):
self.assertRaises(InvalidDimensionsError, self.ter1.__add__, self.ter2)
self.assertEqual(self.ter1+self.ter3, self.ter1)
test_ter = Terrain(1, 1)
test_ter[0, 0] = 1
self.assertEqual((self.ter1+test_ter)[0, 0], 1) # capped at 1
def test_sub(self):
self.assertRaises(InvalidDimensionsError, self.ter1.__sub__, self.ter2)
self.assertEqual(self.ter1-self.ter3, self.ter1)
test_ter = Terrain(1, 1)
test_ter[0, 0] = 1
self.assertEqual((self.ter1-test_ter)[0, 0], 0) # capped at 0
def test_mul(self):
self.assertEqual(self.ter1*0, Terrain(self.ter1.width, self.ter1.length))
self.assertEqual(self.ter2*1, self.ter2)
self.assertNotEqual(self.ter2*0.5, self.ter2)
if __name__ == "__main__":
unittest.main()
| import unittest
from randterrainpy import *
class TerrainTesterPy(unittest.TestCase):
def setUp(self):
self.ter1 = Terrain(1, 1)
self.ter2 = Terrain(2, 4)
self.ter3 = Terrain(1, 1)
def test_getitem(self):
self.assertEqual(self.ter1[0, 0], 0)
self.assertEqual(self.ter2[1, 2], 0)
def test_eq(self):
self.assertEqual(self.ter1, self.ter3)
self.assertNotEqual(self.ter1, self.ter2)
def test_setitem(self):
self.ter1[0, 0] = 1
self.assertEqual(self.ter1[0, 0], 1)
self.ter2[1, 2] = 0.5
self.assertEqual(self.ter2[1, 2], 0.5)
def test_add(self):
self.assertRaises(InvalidDimensionsError, self.ter1.__add__, self.ter2)
self.assertEqual(self.ter1+self.ter3, self.ter1)
if __name__ == "__main__":
unittest.main()
| mit | Python |
5d082a9178ae70a3f1adca07e1ef9263a15ee1e3 | Fix default alarm_state_history_driver value | openstack/monasca-persister,openstack/monasca-persister,stackforge/monasca-persister,stackforge/monasca-persister,openstack/monasca-persister,stackforge/monasca-persister | monasca_persister/conf/repositories.py | monasca_persister/conf/repositories.py | # (C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
repositories_opts = [
cfg.StrOpt(
name='metrics_driver',
help='The repository driver to use for metrics',
default=('monasca_persister.repositories.influxdb.metrics_repository:'
'MetricInfluxdbRepository')),
cfg.StrOpt(
name='alarm_state_history_driver',
help='The repository driver to use for alarm state history',
default=('monasca_persister.repositories.influxdb.'
'alarm_state_history_repository:'
'AlarmStateHistInfluxdbRepository')),
cfg.StrOpt(
name='events_driver',
help='The repository driver to use for events',
default=('monasca_persister.repositories.elasticsearch.events_repository:'
'ElasticSearchEventsRepository'))]
repositories_group = cfg.OptGroup(name='repositories',
title='repositories')
def register_opts(conf):
conf.register_group(repositories_group)
conf.register_opts(repositories_opts, repositories_group)
def list_opts():
return repositories_group, repositories_opts
| # (C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
repositories_opts = [
cfg.StrOpt(
name='metrics_driver',
help='The repository driver to use for metrics',
default=('monasca_persister.repositories.influxdb.metrics_repository:'
'MetricInfluxdbRepository')),
cfg.StrOpt(
name='alarm_state_history_driver',
help='The repository driver to use for alarm state history',
default=('monasca_persister.repositories.influxdb.metrics_repository:'
'MetricInfluxdbRepository')),
cfg.StrOpt(
name='events_driver',
help='The repository driver to use for events',
default=('monasca_persister.repositories.elasticsearch.events_repository:'
'ElasticSearchEventsRepository'))]
repositories_group = cfg.OptGroup(name='repositories',
title='repositories')
def register_opts(conf):
conf.register_group(repositories_group)
conf.register_opts(repositories_opts, repositories_group)
def list_opts():
return repositories_group, repositories_opts
| apache-2.0 | Python |
9c02cfa250b0aa8f867d136cc57d405a7d4f3ec0 | check if website is not None | gdesmott/manger-veggie,gdesmott/manger-veggie,gdesmott/manger-veggie,gdesmott/manger-veggie | restaurant/management/commands/populate.py | restaurant/management/commands/populate.py | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
#from optparse import make_option
from restaurant.models import Restaurant
from geopy.geocoders import Nominatim
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
)
def create_restaurant(self, name, address, website=None, phone=None, mail=None, contact=None, status=None, vg_contact=None):
if website is not None and not website.startswith('http'):
website = 'http://%s' % website
restaurant = Restaurant.objects.create(name=name, address=address, website=website,
phone=phone, mail=mail, contact=contact, status=status, vg_contact=vg_contact)
geolocator = Nominatim()
location = geolocator.geocode(address)
if location is not None:
restaurant.lat = location.latitude
restaurant.lon = location.longitude
restaurant.save()
else:
print "Unknown address", address
def handle(self, *args, **options):
Restaurant.objects.all().delete()
self.create_restaurant("Exki", "12, Chaussée D'Ixelles, 1050 Ixelles", 'www.exki.be', '02/502.72.77', status='2ème vague')
| # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
#from optparse import make_option
from restaurant.models import Restaurant
from geopy.geocoders import Nominatim
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
)
def create_restaurant(self, name, address, website=None, phone=None, mail=None, contact=None, status=None, vg_contact=None):
if not website.startswith('http'):
website = 'http://%s' % website
restaurant = Restaurant.objects.create(name=name, address=address, website=website,
phone=phone, mail=mail, contact=contact, status=status, vg_contact=vg_contact)
geolocator = Nominatim()
location = geolocator.geocode(address)
if location is not None:
restaurant.lat = location.latitude
restaurant.lon = location.longitude
restaurant.save()
else:
print "Unknown address", address
def handle(self, *args, **options):
Restaurant.objects.all().delete()
self.create_restaurant("Exki", "12, Chaussée D'Ixelles, 1050 Ixelles", 'www.exki.be', '02/502.72.77', status='2ème vague')
| agpl-3.0 | Python |
2ab3a73124685565877d4755056dced391305216 | Fix multichan | Flat/JiyuuBot,Zaexu/JiyuuBot | modules/multichan.py | modules/multichan.py | @self.command(help="Displays channels being served")
def chans(self, msginfo):
tosend = ", ".join(self.conman.instances["irc"].joined_chans)
tosend = "%s channels currently served by %s: %s" % (len(self.conman.instances["irc"].joined_chans), self.glob_confman.get("IRC", "NICK"), tosend)
self.conman.gen_send(tosend, msginfo)
@self.command(help="Leaves specified chan. If no chan is specified, leaves the current chan. Syntax: .leave [#chan]", perm=900)
def leave(self, msginfo):
cmd = msginfo["msg"].split(" ")[1:]
if len(cmd) == 0:
self.conman.instances["irc"].leave_irc(msginfo["chan"], msginfo["nick"])
else:
for chan in cmd:
if chan in self.conman.instances["irc"].joined_chans:
self.conman.instances["irc"].leave_irc(chan, msginfo["nick"])
else:
self.conman.gen_send("Can't PART from a channel that hasn't been joined", msginfo)
@self.command(help="Joins a specified chan", perm=900)
def join(self, msginfo):
cmd = msginfo["msg"].split(" ")[1:]
for chan in cmd:
if not chan in self.conman.instances["irc"].joined_chans:
self.conman.instances["irc"].join_irc(chan, msginfo["nick"])
else:
self.conman.gen_send("Already serving {}".format(chan), msginfo)
@self.irc("INVITE")
def invite(self, msginfo):
hostname = msginfo["strg"][msginfo["strg"].index("@")+1:msginfo["strg"].index(" ")]
if self.permsman.get_msg_perms(hostname):
nick = msginfo["strg"][:msginfo["strg"].index("!")]
chan = msginfo["strg"][msginfo["strg"].index(" :")+2:]
self.conman.instances["irc"].join_irc(chan, nick)
@self.irc("KICK")
def kick(self, msginfo):
beingkicked = msginfo["strg"][msginfo["strg"].rindex(":")+1:]
NICK = self.glob_confman.get("IRC", "NICK")
if beingkicked == NICK:
chan = re.findall("(#[^\s,]+)", msginfo["strg"])[0]
nick = msginfo["strg"][:msginfo["strg"].index("!")]
self.conman.instances["irc"].leave_irc(chan, nick, True)
| @self.command(help="Displays channels being served")
def chans(self, msginfo):
tosend = ", ".join(self.conman.instances["irc"].joined_chans)
tosend = "%s channels currently served by %s: %s" % (len(self.conman.instances["irc"].joined_chans), self.glob_confman.get("IRC", "NICK"), tosend)
self.conman.gen_send(tosend, msginfo)
@self.command(help="Leaves specified chan. If no chan is specified, leaves the current chan. Syntax: .leave [#chan]", perm=900)
def leave(self, msginfo):
cmd = msginfo["msg"].split(" ")[1:]
if len(cmd) == 0:
self.conman.instances["irc"].leave_irc(msginfo["chan"], msginfo["nick"])
else:
for chan in cmd:
if chan in self.conman.joined_chans:
self.conman.instances["irc"].leave_irc(chan, msginfo["nick"])
else:
self.conman.gen_send("Can't PART from a channel that hasn't been joined", msginfo)
@self.command(help="Joins a specified chan", perm=900)
def join(self, msginfo):
cmd = msginfo["msg"].split(" ")[1:]
for chan in cmd:
if not chan in self.conman.joined_chans:
self.conman.instances["irc"].join_irc(chan, msginfo["nick"])
else:
self.conman.gen_send("Already serving {}".format(chan), msginfo)
@self.irc("INVITE")
def invite(self, msginfo):
hostname = msginfo["strg"][msginfo["strg"].index("@")+1:msginfo["strg"].index(" ")]
if self.permsman.get_msg_perms(hostname):
nick = msginfo["strg"][:msginfo["strg"].index("!")]
chan = msginfo["strg"][msginfo["strg"].index(" :")+2:]
self.conman.instances["irc"].join_irc(chan, nick)
@self.irc("KICK")
def kick(self, msginfo):
beingkicked = msginfo["strg"][msginfo["strg"].rindex(":")+1:]
NICK = self.glob_confman.get("IRC", "NICK")
if beingkicked == NICK:
chan = re.findall("(#[^\s,]+)", msginfo["strg"])[0]
nick = msginfo["strg"][:msginfo["strg"].index("!")]
self.conman.instances["irc"].leave_irc(chan, nick, True)
| agpl-3.0 | Python |
3f6713a7367e2f14fc900e5c100d62e049dfeace | Update easynetwork_example.py | Idrias/easynet | easynet/easynetwork_example.py | easynet/easynetwork_example.py | #!/usr/bin/python3
import easynet.easyclient as easyclient
import easynet.easyserver as easyserver
import time
myserver = easyserver.Server("localhost", 1337)
myserver.start()
myclient = easyclient.Client("localhost", 1336, "localhost", 1337)
myclient.start()
time.sleep(0.1)
myclient.send(b'Hello Server!')
myserver.sendall(b"Hello Client!")
myserver.ping(5, 0.2)
time.sleep(0.1)
myserver.showpings()
time.sleep(0.1)
print("The client has a new message:", myclient.datalist[0])
print("The server has a new message:", myserver.datalist[0].text, "from", myserver.datalist[0].sender)
| #!/usr/bin/python3
import easyclient as easyclient # when easyclient is in same folder, otherwise import easynet.easyclient as easyclient
import easyserver as easyserver # same as above
import time
myserver = easyserver.Server("localhost", 1337)
myserver.start()
myclient = easyclient.Client("localhost", 1336, "localhost", 1337)
myclient.start()
time.sleep(0.1)
myclient.send(b'Hello Server!')
myserver.sendall(b"Hello Client!")
myserver.ping(5, 0.2)
time.sleep(0.1)
myserver.showpings()
time.sleep(0.1)
print("The client has a new message:", myclient.datalist[0])
print("The server has a new message:", myserver.datalist[0].text, "from", myserver.datalist[0].sender)
| mit | Python |
39d0c69745c1d2811992712ad4174409d296cb52 | Use --no-patch with "git show" | google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot | slave/skia_slave_scripts/run_bench.py | slave/skia_slave_scripts/run_bench.py | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Run the Skia benchmarking executable. """
from build_step import BuildStep
from utils import shell_utils
import os
import re
import sys
GIT = 'git.bat' if os.name == 'nt' else 'git'
GIT_SVN_ID_MATCH_STR = 'git-svn-id: http://skia.googlecode.com/svn/trunk@(\d+)'
def BenchArgs(data_file):
""" Builds a list containing arguments to pass to bench.
data_file: filepath to store the log output
"""
return ['--timers', 'wcg', '--logFile', data_file]
def GetSvnRevision(commit_hash):
output = shell_utils.Bash([GIT, 'show', '--no-patch', commit_hash],
echo=False, log_in_real_time=False)
results = re.findall(GIT_SVN_ID_MATCH_STR, output)
if results:
return results[0]
else:
raise Exception('No git-svn-id found for %s\nOutput:\n%s' % (commit_hash,
output))
class RunBench(BuildStep):
def __init__(self, timeout=9600, no_output_timeout=9600, **kwargs):
super(RunBench, self).__init__(timeout=timeout,
no_output_timeout=no_output_timeout,
**kwargs)
def _BuildDataFile(self):
return os.path.join(self._device_dirs.PerfDir(),
'bench_r%s_data' % GetSvnRevision(self._got_revision))
def _Run(self):
args = []
if self._perf_data_dir:
args.extend(BenchArgs(self._BuildDataFile()))
if 'Nexus4' in self._builder_name:
args.extend(['--config', 'defaults', 'MSAA4'])
self._flavor_utils.RunFlavoredCmd('bench', args + self._bench_args)
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(RunBench))
| #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Run the Skia benchmarking executable. """
from build_step import BuildStep
from utils import shell_utils
import os
import re
import sys
GIT = 'git.bat' if os.name == 'nt' else 'git'
GIT_SVN_ID_MATCH_STR = 'git-svn-id: http://skia.googlecode.com/svn/trunk@(\d+)'
def BenchArgs(data_file):
""" Builds a list containing arguments to pass to bench.
data_file: filepath to store the log output
"""
return ['--timers', 'wcg', '--logFile', data_file]
def GetSvnRevision(commit_hash):
output = shell_utils.Bash([GIT, 'show', commit_hash], echo=False,
log_in_real_time=False)
results = re.findall(GIT_SVN_ID_MATCH_STR, output)
if results:
return results[0]
else:
raise Exception('No git-svn-id found for %s\nOutput:\n%s' % (commit_hash,
output))
class RunBench(BuildStep):
def __init__(self, timeout=9600, no_output_timeout=9600, **kwargs):
super(RunBench, self).__init__(timeout=timeout,
no_output_timeout=no_output_timeout,
**kwargs)
def _BuildDataFile(self):
return os.path.join(self._device_dirs.PerfDir(),
'bench_r%s_data' % GetSvnRevision(self._got_revision))
def _Run(self):
args = []
if self._perf_data_dir:
args.extend(BenchArgs(self._BuildDataFile()))
if 'Nexus4' in self._builder_name:
args.extend(['--config', 'defaults', 'MSAA4'])
self._flavor_utils.RunFlavoredCmd('bench', args + self._bench_args)
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(RunBench))
| bsd-3-clause | Python |
c983a2de4c223d9f6ac78d4a0a3e14a6b6a90756 | fix in DirectorDistance example | lisitsyn/shogun,Saurabh7/shogun,geektoni/shogun,Saurabh7/shogun,shogun-toolbox/shogun,sorig/shogun,karlnapf/shogun,lisitsyn/shogun,lisitsyn/shogun,karlnapf/shogun,Saurabh7/shogun,besser82/shogun,besser82/shogun,shogun-toolbox/shogun,shogun-toolbox/shogun,geektoni/shogun,shogun-toolbox/shogun,lambday/shogun,lambday/shogun,lambday/shogun,shogun-toolbox/shogun,lisitsyn/shogun,sorig/shogun,sorig/shogun,Saurabh7/shogun,lambday/shogun,Saurabh7/shogun,lambday/shogun,besser82/shogun,karlnapf/shogun,karlnapf/shogun,sorig/shogun,geektoni/shogun,karlnapf/shogun,karlnapf/shogun,Saurabh7/shogun,Saurabh7/shogun,besser82/shogun,lambday/shogun,geektoni/shogun,Saurabh7/shogun,besser82/shogun,lisitsyn/shogun,shogun-toolbox/shogun,sorig/shogun,sorig/shogun,lisitsyn/shogun,besser82/shogun,geektoni/shogun,Saurabh7/shogun,geektoni/shogun | examples/undocumented/python_modular/distance_director_euclidian_modular.py | examples/undocumented/python_modular/distance_director_euclidian_modular.py | import numpy
from shogun.Features import RealFeatures
try:
from shogun.Distance import DirectorDistance
except ImportError:
print "recompile shogun with --enable-swig-directors"
import sys
sys.exit(0)
class DirectorEuclidianDistance(DirectorDistance):
def __init__(self):
DirectorDistance.__init__(self, True)
def distance_function(self, idx_a, idx_b):
seq1 = self.get_lhs().get_feature_vector(idx_a)
seq2 = self.get_rhs().get_feature_vector(idx_b)
return numpy.linalg.norm(seq1-seq2)
traindat = numpy.random.random_sample((10,10))
testdat = numpy.random.random_sample((10,10))
parameter_list=[[traindat,testdat,1.2],[traindat,testdat,1.4]]
def distance_director_euclidian_modular (fm_train_real=traindat,fm_test_real=testdat,scale=1.2):
from shogun.Distance import EuclidianDistance
from modshogun import Time
feats_train=RealFeatures(fm_train_real)
feats_train.io.set_loglevel(0)
feats_train.parallel.set_num_threads(1)
feats_test=RealFeatures(fm_test_real)
distance=EuclidianDistance()
distance.init(feats_train, feats_test)
ddistance=DirectorEuclidianDistance()
ddistance.init(feats_train, feats_test)
print "dm_train"
t=Time()
dm_train=distance.get_distance_matrix()
t1=t.cur_time_diff(True)
print "ddm_train"
t=Time()
ddm_train=ddistance.get_distance_matrix()
t2=t.cur_time_diff(True)
print "dm_train", dm_train
print "ddm_train", ddm_train
return dm_train, ddm_train
if __name__=='__main__':
print('DirectorEuclidianDistance')
distance_director_euclidian_modular(*parameter_list[0])
| import numpy
try:
from shogun.Distance import DirectorDistance
except ImportError:
print "recompile shogun with --enable-swig-directors"
import sys
sys.exit(0)
class DirectorEuclidianDistance(DirectorDistance):
def __init__(self):
DirectorDistance.__init__(self, True)
def distance_function(self, idx_a, idx_b):
return numpy.linalg.norm(traindat[:,idx_a]-testdat[:,idx_b])
traindat = numpy.random.random_sample((1000,1000))
testdat = numpy.random.random_sample((1000,1000))
parameter_list=[[traindat,testdat,1.2],[traindat,testdat,1.4]]
def distance_director_euclidian_modular (fm_train_real=traindat,fm_test_real=testdat,scale=1.2):
from shogun.Features import RealFeatures
from shogun.Distance import EuclidianDistance
from modshogun import Time
feats_train=RealFeatures(fm_train_real)
feats_train.io.set_loglevel(0)
feats_train.parallel.set_num_threads(1)
feats_test=RealFeatures(fm_test_real)
distance=EuclidianDistance(feats_train, feats_test)
ddistance=DirectorEuclidianDistance()
ddistance.set_num_vec_lhs(traindat.shape[0])
ddistance.set_num_vec_rhs(testdat.shape[1])
print "dm_train"
t=Time()
dm_train=distance.get_distance_matrix()
t1=t.cur_time_diff(True)
print "ddm_train"
t=Time()
ddm_train=ddistance.get_distance_matrix()
t2=t.cur_time_diff(True)
print "dm_train", dm_train
print "ddm_train", ddm_train
return dm_train, ddm_train
if __name__=='__main__':
print('DirectorEuclidianDistance')
distance_director_euclidian_modular(*parameter_list[0])
| bsd-3-clause | Python |
f0c374eba55cdeb56bf3526ea0da041556f6ffe2 | Fix tests on python 3 | sciyoshi/yamlmod | tests/test_yamlmod.py | tests/test_yamlmod.py | import os
import sys
from nose.tools import *
try:
from importlib import reload
except ImportError:
pass
def setup_yamlmod():
import yamlmod
reload(yamlmod)
def teardown_yamlmod():
import yamlmod
for hook in sys.meta_path:
if isinstance(hook, yamlmod.YamlImportHook):
sys.meta_path.remove(hook)
break
@with_setup(setup_yamlmod, teardown_yamlmod)
def test_import_installs_hook():
import yamlmod
hooks = []
for hook in sys.meta_path:
if isinstance(hook, yamlmod.YamlImportHook):
hooks.append(hook)
eq_(len(hooks), 1, 'did not find exactly one hook')
@with_setup(setup_yamlmod, teardown_yamlmod)
def test_import_fixture():
import fixture
eq_(fixture.debug, True)
eq_(fixture.domain, 'example.com')
eq_(fixture.users, ['alice', 'bob', 'cathy'])
@with_setup(setup_yamlmod, teardown_yamlmod)
def test_hidden_attributes():
import fixture
eq_(fixture.__name__, 'fixture')
eq_(fixture.__file__, os.path.join(os.path.dirname(__file__), 'fixture.yml'))
| import os
import sys
from nose.tools import *
def setup_yamlmod():
import yamlmod
reload(yamlmod)
def teardown_yamlmod():
import yamlmod
for hook in sys.meta_path:
if isinstance(hook, yamlmod.YamlImportHook):
sys.meta_path.remove(hook)
break
@with_setup(setup_yamlmod, teardown_yamlmod)
def test_import_installs_hook():
import yamlmod
hooks = []
for hook in sys.meta_path:
if isinstance(hook, yamlmod.YamlImportHook):
hooks.append(hook)
eq_(len(hooks), 1, 'did not find exactly one hook')
@with_setup(setup_yamlmod, teardown_yamlmod)
def test_import_fixture():
import fixture
eq_(fixture.debug, True)
eq_(fixture.domain, 'example.com')
eq_(fixture.users, ['alice', 'bob', 'cathy'])
@with_setup(setup_yamlmod, teardown_yamlmod)
def test_hidden_attributes():
import fixture
eq_(fixture.__name__, 'fixture')
eq_(fixture.__file__, os.path.join(os.path.dirname(__file__), 'fixture.yml'))
| mit | Python |
87791c10f9063c48dc238ba8561bcc83d865cd0d | Use optional arg for vimtips | JokerQyou/bot | botcommands/vimtips.py | botcommands/vimtips.py | # coding: utf-8
import requests
def vimtips(msg=None):
try:
tip = requests.get('http://vim-tips.com/random_tips/json').json()
except Exception as e:
return None
return u'%s\n%s' % (tip['Content'], tip['Comment'], )
| # coding: utf-8
import requests
def vimtips(msg):
try:
tip = requests.get('http://vim-tips.com/random_tips/json').json()
except Exception as e:
return None
return u'%s\n%s' % (tip['Content'], tip['Comment'], )
| bsd-2-clause | Python |
cfdf3ed724b40d69e6bfce47a66f40249240c129 | make tmpdir if it doesn't exist | vpsfreecz/brutus,vpsfreecz/brutus,vpsfreecz/brutus | tests/testgenerate.py | tests/testgenerate.py | #!/usr/bin/python
import os, shutil, subprocess
import yaml
from brutus.db import Database
from brutus.generate import generate_all
tmpdir = "tmp"
filename = os.path.join(tmpdir, "test.pickle")
rootdir = os.path.join(tmpdir, "output")
def cleanup():
try:
os.remove(filename)
except FileNotFoundError:
pass
os.makedirs(tmpdir, exist_ok=True)
shutil.rmtree(rootdir,ignore_errors=True)
os.makedirs(rootdir)
def test_empty():
cleanup()
with Database(filename) as db:
generate_all(db, rootdir)
def test_services():
cleanup()
with Database(filename) as db:
with open("examples/domain.yaml") as stream:
db.add(yaml.load(stream))
with open("examples/mailaccount.yaml") as stream:
db.add(yaml.load(stream))
with open("examples/website.yaml") as stream:
db.add(yaml.load(stream))
with open("examples/website-minimal.yaml") as stream:
db.add(yaml.load(stream))
generate_all(db, rootdir)
subprocess.check_call(["diff", "-ru", rootdir, "tests/output"])
| #!/usr/bin/python
import os, shutil, subprocess
import yaml
from brutus.db import Database
from brutus.generate import generate_all
tmpdir = "tmp"
filename = os.path.join(tmpdir, "test.pickle")
rootdir = os.path.join(tmpdir, "output")
def cleanup():
try:
os.remove(filename)
except FileNotFoundError:
pass
shutil.rmtree(rootdir)
os.makedirs(rootdir)
def test_empty():
cleanup()
with Database(filename) as db:
generate_all(db, rootdir)
def test_services():
cleanup()
with Database(filename) as db:
with open("examples/domain.yaml") as stream:
db.add(yaml.load(stream))
with open("examples/mailaccount.yaml") as stream:
db.add(yaml.load(stream))
with open("examples/website.yaml") as stream:
db.add(yaml.load(stream))
with open("examples/website-minimal.yaml") as stream:
db.add(yaml.load(stream))
generate_all(db, rootdir)
subprocess.check_call(["diff", "-ru", rootdir, "tests/output"])
| bsd-2-clause | Python |
3c4e407430297ab5c011d67db2e2d6ad660d7c0c | Fix a typo in the instructions. | newrelic/newrelic-python-kata,newrelic/newrelic-python-kata,tebriel/newrelic-python-kata,newrelic/newrelic-python-kata,tebriel/newrelic-python-kata,tebriel/newrelic-python-kata | initialize_db.py | initialize_db.py | from random import randint
from os import environ
import requests
def setup_postgression():
url = 'http://api.postgression.com'
db_url = requests.get(url).text
with open('db_url.txt', 'w') as f:
f.write(db_url)
def run_django_commands(*args):
environ.setdefault("DJANGO_SETTINGS_MODULE", "newrelic_python_kata.settings")
from django.core.management import call_command
for command in args:
call_command(command, interactive=False)
def populate_db():
environ.setdefault("DJANGO_SETTINGS_MODULE", "newrelic_python_kata.settings")
from employees.models import Employee, BioData, Payroll
with open('names.txt') as f:
es = []
bs = []
ps = []
for idx, line in enumerate(f):
name, sex, salary = line.rstrip('\r\n').split(',')
e = Employee(name=name, employee_id=idx)
b = BioData(employee=e, age=randint(18, 40), sex=sex)
p = Payroll(employee=e, salary=salary)
es.append(e)
bs.append(b)
ps.append(p)
Employee.objects.bulk_create(es)
BioData.objects.bulk_create(bs)
Payroll.objects.bulk_create(ps)
if __name__ == '__main__':
print 'INFO: Setting up Postgression'
setup_postgression()
print 'INFO: Setting up Django DB'
run_django_commands('syncdb')
print 'INFO: Populating the database.'
populate_db()
print 'INFO: All done!'
print 'INFO: Start your server.'
print 'newrelic-admin run-python manage.py run_gunicorn'
| from random import randint
from os import environ
import requests
def setup_postgression():
url = 'http://api.postgression.com'
db_url = requests.get(url).text
with open('db_url.txt', 'w') as f:
f.write(db_url)
def run_django_commands(*args):
environ.setdefault("DJANGO_SETTINGS_MODULE", "newrelic_python_kata.settings")
from django.core.management import call_command
for command in args:
call_command(command, interactive=False)
def populate_db():
environ.setdefault("DJANGO_SETTINGS_MODULE", "newrelic_python_kata.settings")
from employees.models import Employee, BioData, Payroll
with open('names.txt') as f:
es = []
bs = []
ps = []
for idx, line in enumerate(f):
name, sex, salary = line.rstrip('\r\n').split(',')
e = Employee(name=name, employee_id=idx)
b = BioData(employee=e, age=randint(18, 40), sex=sex)
p = Payroll(employee=e, salary=salary)
es.append(e)
bs.append(b)
ps.append(p)
Employee.objects.bulk_create(es)
BioData.objects.bulk_create(bs)
Payroll.objects.bulk_create(ps)
if __name__ == '__main__':
print 'INFO: Setting up Postgression'
setup_postgression()
print 'INFO: Setting up Django DB'
run_django_commands('syncdb')
print 'INFO: Populating the database.'
populate_db()
print 'INFO: All done!'
print 'INFO: Start your server.'
print 'newrelic-adming run-python manage.py run_gunicorn'
| mit | Python |
8880aff128c1e430ee62691de3492fc5f65d6877 | fix bug | FederatedAI/FATE,FederatedAI/FATE,FederatedAI/FATE | python/federatedml/framework/homo/blocks/base.py | python/federatedml/framework/homo/blocks/base.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch import session
from federatedml.transfer_variable.base_transfer_variable import Variable, BaseTransferVariables
from federatedml.util import consts
class HomoTransferBase(BaseTransferVariables):
def __init__(self, server=(consts.ARBITER,), clients=(consts.GUEST, consts.HOST), prefix=None):
super().__init__()
if prefix is None:
self.prefix = f"{self.__class__.__module__}.{self.__class__.__name__}."
else:
self.prefix = f"{prefix}{self.__class__.__name__}."
self.server = server
self.clients = clients
def create_client_to_server_variable(self, name):
name = f"{self.prefix}{name}"
return Variable.get_or_create(name, lambda: Variable(name, self.clients, self.server))
def create_server_to_client_variable(self, name):
name = f"{self.prefix}{name}"
return Variable.get_or_create(name, lambda: Variable(name, self.server, self.clients))
@staticmethod
def get_parties(roles):
return session.get_latest_opened().parties.roles_to_parties(roles=roles)
@property
def client_parties(self):
return self.get_parties(roles=self.clients)
@property
def server_parties(self):
return self.get_parties(roles=self.server)
| #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch import session
from federatedml.transfer_variable.base_transfer_variable import Variable, BaseTransferVariables
from federatedml.util import consts
class HomoTransferBase(BaseTransferVariables):
def __init__(self, server=(consts.ARBITER,), clients=(consts.GUEST, consts.HOST), prefix=None):
super().__init__()
if prefix is None:
self.prefix = f"{self.__class__.__module__}${self.__class__.__name__}$"
else:
self.prefix = f"{prefix}{self.__class__.__name__}."
self.server = server
self.clients = clients
def create_client_to_server_variable(self, name):
name = f"{self.prefix}{name}"
return Variable.get_or_create(name, lambda: Variable(name, self.clients, self.server))
def create_server_to_client_variable(self, name):
name = f"{self.prefix}{name}"
return Variable.get_or_create(name, lambda: Variable(name, self.server, self.clients))
@staticmethod
def get_parties(roles):
return session.get_latest_opened().parties.roles_to_parties(roles=roles)
@property
def client_parties(self):
return self.get_parties(roles=self.clients)
@property
def server_parties(self):
return self.get_parties(roles=self.server)
| apache-2.0 | Python |
9d5bf8b31b3de277422af5a294a6aca50f1eb2cc | Fix examples/print_request.py | clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage | python/g1/http/servers/examples/print_request.py | python/g1/http/servers/examples/print_request.py | """Simple web app that prints out requests."""
import json
from startup import startup
import g1.asyncs.agents.parts
import g1.http.servers.parts
from g1.apps import asyncs
from g1.asyncs import kernels
LABELS = g1.http.servers.parts.define_server(
host='127.0.0.1',
port=8000,
reuse_address=True,
reuse_port=True,
)
async def application(environ, start_response):
response = environ.copy()
response.pop('wsgi.input')
response.pop('wsgi.errors')
response.pop('wsgi.file_wrapper')
response = {
'environ': response,
'request_body_size': len(await environ['wsgi.input'].read()),
}
response = json.dumps(response, indent=4).encode('utf-8')
start_response(
'200 OK',
[
('Content-Type', 'application/json'),
('Content-Length', str(len(response))),
],
)
return [response]
startup.set(LABELS.application, application)
def main(supervise_agents: g1.asyncs.agents.parts.LABELS.supervise_agents):
kernels.run(supervise_agents)
return 0
if __name__ == '__main__':
asyncs.run(main)
| """Simple web app that prints out requests."""
import json
from startup import startup
import g1.asyncs.agents.parts
import g1.http.servers.parts
from g1.apps import asyncs
from g1.asyncs import kernels
LABELS = g1.http.servers.parts.define_server(
host='127.0.0.1',
port=8000,
reuse_address=True,
reuse_port=True,
)
async def application(environ, start_response):
response = environ.copy()
response.pop('wsgi.input')
response.pop('wsgi.errors')
response = {
'environ': response,
'request_body_size': len(await environ['wsgi.input'].read()),
}
response = json.dumps(response, indent=4).encode('utf-8')
start_response(
'200 OK',
[
('Content-Type', 'application/json'),
('Content-Length', str(len(response))),
],
)
return [response]
startup.set(LABELS.application, application)
def main(supervise_agents: g1.asyncs.agents.parts.LABELS.supervise_agents):
kernels.run(supervise_agents)
return 0
if __name__ == '__main__':
asyncs.run(main)
| mit | Python |
3b7d1424da0e3ea7d9df928ab73d1b76f370acb0 | remove benchmarks | aaronkl/RoBO,automl/RoBO,aaronkl/RoBO,aaronkl/RoBO,numairmansur/RoBO,numairmansur/RoBO,automl/RoBO | robo/test/test_task_branin.py | robo/test/test_task_branin.py | '''
Created on 14.07.2015
@author: aaron
'''
import unittest
import numpy as np
from robo.task.branin import Branin
class TestTaskBranin(unittest.TestCase):
def test_branin(self):
branin = Branin()
# Check batch computation
n_points = 10
X = np.random.rand(n_points, branin.n_dims)
X[:, 0] = X[:, 0].dot(branin.X_upper[0] - branin.X_lower[0]) + branin.X_lower[0]
X[:, 1] = X[:, 1].dot(branin.X_upper[1] - branin.X_lower[1]) + branin.X_lower[1]
y = branin.evaluate(X)
assert y.shape[0] == n_points
assert y.shape[1] == 1
# Check single computation
X = np.array([np.random.rand(branin.n_dims)])
X[:, 0] = X[:, 0].dot(branin.X_upper[0] - branin.X_lower[0]) + branin.X_lower[0]
X[:, 1] = X[:, 1].dot(branin.X_upper[1] - branin.X_lower[1]) + branin.X_lower[1]
y = branin.evaluate(X)
assert y.shape[0] == 1
# Check optimas
X = branin.opt
y = branin.evaluate(X)
assert np.all(np.round(y, 6) == np.array([branin.fopt]))
if __name__ == "__main__":
unittest.main()
| '''
Created on 14.07.2015
@author: aaron
'''
import unittest
import numpy as np
from robo.task.branin import Branin
class TestTaskBranin(unittest.TestCase):
def test_branin(self):
branin = Branin()
# Check batch computation
n_points = 10
X = np.random.rand(n_points, branin.n_dims)
X[:, 0] = X[:, 0].dot(branin.X_upper[0] - branin.X_lower[0]) + branin.X_lower[0]
X[:, 1] = X[:, 1].dot(branin.X_upper[1] - branin.X_lower[1]) + branin.X_lower[1]
y = branin.evaluate(X)
assert y.shape[0] == n_points
assert y.shape[1] == 1
# Check single computation
X = np.array([np.random.rand(branin.n_dims)])
X[:, 0] = X[:, 0].dot(branin.X_upper[0] - branin.X_lower[0]) + branin.X_lower[0]
X[:, 1] = X[:, 1].dot(branin.X_upper[1] - branin.X_lower[1]) + branin.X_lower[1]
y = branin.evaluate(X)
assert y.shape[0] == 1
# Check optimas
X = branin.opt
y = branin.evaluate(X)
assert np.all(np.round(y, 6) == branin.fopt) is True
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | Python |
f365d0804b516ced454ad4cd7d0119d83adea98d | Add CLI help and command to print README. | mjrusso/mountain | mountain/mountain.py | mountain/mountain.py | # -*- coding: utf-8 -*-
__version__ = "0.1.0"
import sys
from .join import expand_manifest
from .split import split_combined_document
from .utils import write_files
def join(manifest_path, combined_document_path):
print("Reading manifest from `%s`." % manifest_path)
write_files(expand_manifest(manifest_path, combined_document_path))
print("Finished expanding manifest.")
def split(manifest_path, combined_document_path):
print("Reading combined document from `%s`." % combined_document_path)
write_files(split_combined_document(manifest_path, combined_document_path))
print("Updated manifest and all referenced files.")
def help():
version()
print("""
Usage:
$ mountain join <manifest-path> <combined-document-path>
Combine all files referenced from <manifest-path> into the document at
<combined-document-path>. The file at <combined-document-path> will be
overwritten if it already exists.
The files referenced from <manifest-path> must be referenced with the
[[#include file-path]] directive.
$ mountain split <manifest-path> <combined-document-path>
Write all included files from <combined-document-path> into standalone files.
Produce an updated manifest file and write to <manifest-path>. In all cases,
files will be overwritten if they already exist.
All included files in <combined-document-path> must be indicated with the
[[#reference file-path]] ... [[#reference-end]] directive.
For more details, see the README:
$ mountain --readme
""")
def readme():
with open("README.md", "rb") as f:
print("\n")
print(f.read().decode("utf-8"))
def version():
print("Mountain v%s." % __version__)
def main():
args = sys.argv[1:]
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise Exception("Python >= 2.7 required. Aborting.")
if not len(args):
help()
elif args[0] == "join":
join(args[1], args[2])
elif args[0] == "split":
split(args[1], args[2])
elif args[0] == "--version":
version()
elif args[0] == "--help":
help()
elif args[0] == "--readme":
readme()
else:
print("Invalid option specified.")
help()
| # -*- coding: utf-8 -*-
__version__ = "0.1.0"
import sys
from .join import expand_manifest
from .split import split_combined_document
from .utils import write_files
def join(manifest_path, combined_document_path):
print("Reading manifest from `%s`." % manifest_path)
write_files(expand_manifest(manifest_path, combined_document_path))
print("Finished expanding manifest.")
def split(manifest_path, combined_document_path):
print("Reading combined document from `%s`." % combined_document_path)
write_files(split_combined_document(manifest_path, combined_document_path))
print("Updated manifest and all referenced files.")
def help():
version()
def version():
print("Mountain v%s." % __version__)
def main():
args = sys.argv[1:]
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise Exception("Python >= 2.7 required. Aborting.")
if not len(args):
help()
elif args[0] == "join":
join(args[1], args[2])
elif args[0] == "split":
split(args[1], args[2])
elif args[0] == "--version":
version()
elif args[0] == "--help":
help()
else:
print("Invalid option specified.")
help()
| mit | Python |
f0ecbfbc5e0fb10a48c122f81ce61d4b16893448 | use transforms in grid script benchmark | OEP/volpy,OEP/volpy | benchmarks/grid.py | benchmarks/grid.py | import numpy as np
import volpy
from libbenchmark import render, get_parser
import math
def main():
parser = _get_parser()
args = parser.parse_args()
transform = np.eye(4)
if args.rotate:
args.rotate = np.array(args.rotate) * math.pi / 180
transform = volpy.rotatexyz(*args.rotate).dot(transform)
if args.scale:
transform = volpy.scale(*args.scale).dot(transform)
if args.translate:
transform = volpy.translate(*args.translate).dot(transform)
grid = volpy.Grid(np.ones(args.grid_shape),
transform=transform,
default=args.default)
scene = volpy.Scene(emit=grid, scatter=args.scatter)
image = render(scene, args)
image.save(args.output)
def _get_parser():
parser = get_parser()
parser.add_argument('-D', '--default', type=float, default=0)
parser.add_argument('-g', '--grid-shape', type=int, nargs=3,
default=(100, 100, 100))
parser.add_argument('-o', '--output', default='out.png')
parser.add_argument('-c', '--color', action='store_true')
parser.add_argument('-k', '--scatter', type=float, default=10)
parser.add_argument('-T', '--translate', type=float, nargs=3)
parser.add_argument('-R', '--rotate', type=float, nargs=3)
parser.add_argument('-S', '--scale', type=float, nargs=3)
return parser
if __name__ == '__main__':
main()
| import numpy as np
import volpy
from libbenchmark import render, get_parser
def main():
parser = _get_parser()
args = parser.parse_args()
grid = volpy.Grid(np.ones(args.grid_shape), default=args.default)
scene = volpy.Scene(emit=grid, scatter=args.scatter)
image = render(scene, args)
image.save(args.output)
def _get_parser():
parser = get_parser()
parser.add_argument('-D', '--default', type=float, default=0)
parser.add_argument('-g', '--grid-shape', type=int, nargs=3,
default=(100, 100, 100))
parser.add_argument('-o', '--output', default='out.png')
parser.add_argument('-c', '--color', action='store_true')
parser.add_argument('-k', '--scatter', type=float, default=10)
return parser
if __name__ == '__main__':
main()
| mit | Python |
ecfc74bfc93ddea277105fd2386d64ffe751c819 | Fix `series_list` in `user_page` not being populated properly | wtsi-hgi/CoGS-Webapp,wtsi-hgi/CoGS-Webapp,wtsi-hgi/CoGS-Webapp | cogs/routes/user_page.py | cogs/routes/user_page.py | """
Copyright (c) 2017 Genome Research Ltd.
Authors:
* Simon Beal <sb48@sanger.ac.uk>
* Christopher Harrison <ch12@sanger.ac.uk>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from typing import Dict
from aiohttp.web import Request
from aiohttp_jinja2 import template
@template("user_page.jinja2")
async def user_page(request: Request) -> Dict:
"""
Set the template context for the user. Show the projects they:
* Own (including legacy projects);
* Are involved with;
* Are in the process of signing up for, front loaded
:param request:
:return:
"""
db = request.app["db"]
user = request["user"]
navbar_data = request["navbar"]
data = {
"user": user,
"cur_option": "cogs",
"first_option": user.first_option,
"second_option": user.second_option,
"third_option": user.third_option,
**navbar_data}
if user.role.create_project_groups:
group = db.get_most_recent_group()
data["groups"] = db.get_project_groups_by_series(group.series)
if user.role.review_other_projects:
data["review_list"] = series_list = db.get_projects_by_cogs_marker(user)
# TODO Refactor this...or remove it: It looks like it's doing
# dangerous things!
# for series in series_list:
# for project in series:
# set_project_can_mark(request.app, cookies, project)
# sort_by_attr(series, "can_mark")
if user.role.join_projects:
data["project_list"] = db.get_projects_by_student(user)
if user.role.create_projects:
data["series_list"] = series_list = []
for series in db.get_all_series():
for group in db.get_project_groups_by_series(series):
projects = db.get_projects_by_supervisor(user, group)
if projects:
series_list.append(projects)
# TODO/FIXME Dragons be here! Remove this if possible
# for series in series_list:
# set_group_attributes(request.app, cookies, series)
return data
| """
Copyright (c) 2017 Genome Research Ltd.
Authors:
* Simon Beal <sb48@sanger.ac.uk>
* Christopher Harrison <ch12@sanger.ac.uk>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from typing import Dict
from aiohttp.web import Request
from aiohttp_jinja2 import template
@template("user_page.jinja2")
async def user_page(request: Request) -> Dict:
"""
Set the template context for the user. Show the projects they:
* Own (including legacy projects);
* Are involved with;
* Are in the process of signing up for, front loaded
:param request:
:return:
"""
db = request.app["db"]
user = request["user"]
navbar_data = request["navbar"]
data = {
"user": user,
"cur_option": "cogs",
"first_option": user.first_option,
"second_option": user.second_option,
"third_option": user.third_option,
**navbar_data}
if user.role.create_project_groups:
group = db.get_most_recent_group()
data["groups"] = db.get_project_groups_by_series(group.series)
if user.role.review_other_projects:
data["review_list"] = series_list = db.get_projects_by_cogs_marker(user)
# TODO Refactor this...or remove it: It looks like it's doing
# dangerous things!
# for series in series_list:
# for project in series:
# set_project_can_mark(request.app, cookies, project)
# sort_by_attr(series, "can_mark")
if user.role.join_projects:
data["project_list"] = db.get_projects_by_student(user)
if user.role.create_projects:
data["series_list"] = series_list = db.get_projects_by_supervisor(user)
# TODO/FIXME Dragons be here! Remove this if possible
# for series in series_list:
# set_group_attributes(request.app, cookies, series)
return data
| agpl-3.0 | Python |
931a858dc1cfde1652d21e1ccd60a82dde683ce3 | Add simple "yo" bot command | paultag/moxie,loandy/moxie,mileswwatkins/moxie,mileswwatkins/moxie,paultag/moxie,loandy/moxie,loandy/moxie,paultag/moxie,rshorey/moxie,rshorey/moxie,rshorey/moxie,mileswwatkins/moxie | moxie/butterfield.py | moxie/butterfield.py | import os
import json
import asyncio
from butterfield.utils import at_bot
from aiodocker import Docker
from aiocore import Service
WEB_ROOT = os.environ.get("MOXIE_WEB_URL", "http://localhost:8888")
@asyncio.coroutine
def events(bot):
docker = Docker()
events = docker.events
events.saferun()
stream = events.listen()
while True:
el = yield from stream.get()
yield from bot.post("#cron", "`{}`".format(str(el)))
@asyncio.coroutine
@at_bot
def run(bot, message: "message"):
runner = Service.resolve("moxie.cores.run.RunService")
text = message.get("text", "")
if text == "":
yield from bot.post(message['channel'], "Invalid request")
cmd, arg = text.split(" ", 1)
if cmd == "run":
job = arg
yield from bot.post(
message['channel'], "Doing bringup of {}".format(job))
try:
yield from runner.run(job)
except ValueError as e:
yield from bot.post(
message['channel'],
"Gah, {job} failed - {e}".format(e=e, job=job)
)
return
yield from bot.post(message['channel'],
"Job {job} online - {webroot}/container/{job}/".format(
webroot=WEB_ROOT, job=job))
elif cmd == "yo":
yield from bot.post(
message['channel'], "Yo {}".format(message['user']))
| import os
import json
import asyncio
from butterfield.utils import at_bot
from aiodocker import Docker
from aiocore import Service
WEB_ROOT = os.environ.get("MOXIE_WEB_URL", "http://localhost:8888")
@asyncio.coroutine
def events(bot):
docker = Docker()
events = docker.events
events.saferun()
stream = events.listen()
while True:
el = yield from stream.get()
yield from bot.post("#cron", "`{}`".format(str(el)))
@asyncio.coroutine
@at_bot
def run(bot, message: "message"):
runner = Service.resolve("moxie.cores.run.RunService")
text = message.get("text", "")
if text == "":
yield from bot.post(message['channel'], "Invalid request")
cmd, arg = text.split(" ", 1)
if cmd == "run":
job = arg
yield from bot.post(
message['channel'], "Doing bringup of {}".format(job))
try:
yield from runner.run(job)
except ValueError as e:
yield from bot.post(
message['channel'],
"Gah, {job} failed - {e}".format(e=e, job=job)
)
return
yield from bot.post(message['channel'],
"Job {job} online - {webroot}/container/{job}/".format(
webroot=WEB_ROOT, job=job))
| mit | Python |
c8d66eb6a9d624cfb321b34abd9ec778d52d3266 | Fix copy for python2 | rparini/cxroots,rparini/cxroots | cxroots/Misc.py | cxroots/Misc.py | from numpydoc.docscrape import FunctionDoc
def remove_para(*paras):
def wrapper(func):
doc = FunctionDoc(func)
for i, p in enumerate(doc['Parameters'][:]):
if p.name.split(':')[0].rstrip() in paras:
del doc['Parameters'][i]
func.__doc__ = doc
return func
return wrapper
def update_docstring(**dic):
def wrapper(func):
doc = FunctionDoc(func)
for k, v in dic.items():
doc[k] = v
func.__doc__ = doc
return func
return wrapper
class NumberOfRootsChanged(Exception):
pass
| from numpydoc.docscrape import FunctionDoc
def remove_para(*paras):
def wrapper(func):
doc = FunctionDoc(func)
for i, p in enumerate(doc['Parameters'].copy()):
if p.name.split(':')[0].rstrip() in paras:
del doc['Parameters'][i]
func.__doc__ = doc
return func
return wrapper
def update_docstring(**dic):
def wrapper(func):
doc = FunctionDoc(func)
for k, v in dic.items():
doc[k] = v
func.__doc__ = doc
return func
return wrapper
class NumberOfRootsChanged(Exception):
pass
| bsd-3-clause | Python |
2b8f025e94e593fc4d8cdaafb2b1344a9ea82a1a | allow continues reading and check if tag changes | thedarkman/ttleagueterminal,thedarkman/ttleagueterminal | terminal.py | terminal.py | #!/usr/bin/python
from TTLeague import Match, Game, Player
import RPi.GPIO as GPIO
import MFRC522
import signal
import json
import requests
from time import sleep
# init empty configuration
config = {}
try:
# try to load config from file
config = json.load(open('config.json'))
except Exception, e:
print("Error while getting config: "+ str(e))
exit()
notFound = True
# Capture SIGINT for cleanup when the script is aborted
def end_read(signal, frame):
global notFound
print "Ctrl+C captured, ending read."
notFound = False
GPIO.cleanup()
def getHexStringFromScannedTag(tag):
return '{:x}{:x}{:x}{:x}'.format(tag[0], tag[1], tag[2], tag[3])
def getPlayer(nfcTag):
params = {'clientToken': config['secretKey']}
url = '{}/user/{}'.format(config['baseUrl'], nfcTag)
#print("requesting "+ url)
r = requests.get(url, params=params)
if r.status_code == requests.codes.ok:
obj = r.json()
pObj = Player(obj['nfcTag'], obj['username'])
return pObj
else:
print("server returned (status code: {:d}): {:s} ".format(r.status_code, r.text))
# Hook the SIGINT
signal.signal(signal.SIGINT, end_read)
GPIO.setwarnings(False)
# Create an object of the class MFRC522
MIFAREReader = MFRC522.MFRC522()
print "Welcome to the TT League Terminal"
oldTag = ''
def waitForTag():
global notFound
global oldTag
notFound = True
while notFound:
# Scan for cards
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
# If a card is found
if status == MIFAREReader.MI_OK:
#print "tag detected"
# Get the UID of the card
(status,uid) = MIFAREReader.MFRC522_Anticoll()
# If we have the UID, continue
if status == MIFAREReader.MI_OK:
#print "tag id read"
if oldTag != uid:
notFound = False
oldTag = uid
sleep(0.1)
return uid
print "waiting now for player 1 scan ..."
rawTag = waitForTag()
nfcTag = getHexStringFromScannedTag(rawTag)
print('player 1 - {}'.format(nfcTag))
p1 = getPlayer(nfcTag)
print(str(p1))
print('player 1 found, now waiting for player 2 scan ...')
print
rawTag = waitForTag()
nfcTag = getHexStringFromScannedTag(rawTag)
print('player 2 - {}'.format(nfcTag))
p2 = getPlayer(nfcTag)
print(str(p2))
print
print
print('(Hopefully) both found, let\'s play table tennis')
| from TTLeague import Match, Game, Player
import RPi.GPIO as GPIO
import MFRC522
import signal
import json
import requests
from time import sleep
# init empty configuration
config = {}
try:
# try to load config from file
config = json.load(open('config.json'))
except Exception, e:
print("Error while getting config: "+ str(e))
exit()
notFound = True
# Capture SIGINT for cleanup when the script is aborted
def end_read(signal, frame):
global notFound
print "Ctrl+C captured, ending read."
notFound = False
GPIO.cleanup()
def getHexStringFromScannedTag(tag):
return '{:x}{:x}{:x}{:x}'.format(tag[0], tag[1], tag[2], tag[3])
def getPlayer(nfcTag):
params = {'clientToken': config['secretKey']}
url = '{}/user/{}'.format(config['baseUrl'], nfcTag)
#print("requesting "+ url)
r = requests.get(url, params=params)
if r.status_code == requests.codes.ok:
obj = r.json()
pObj = Player(obj['nfcTag'], obj['username'])
return pObj
else:
print("server returned (status code: {:d}): {:s} ".format(r.status_code, r.text))
# Hook the SIGINT
signal.signal(signal.SIGINT, end_read)
GPIO.setwarnings(False)
# Create an object of the class MFRC522
MIFAREReader = MFRC522.MFRC522()
print "Welcome to the TT League Terminal"
def waitForTag():
global notFound
notFound = True
while notFound:
# Scan for cards
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
# If a card is found
if status == MIFAREReader.MI_OK:
print "tag detected"
# Get the UID of the card
(status,uid) = MIFAREReader.MFRC522_Anticoll()
# If we have the UID, continue
if status == MIFAREReader.MI_OK:
print "tag id read"
notFound = False
return uid
print "waiting now for player 1 scan ..."
rawTag = waitForTag()
nfcTag = getHexStringFromScannedTag(rawTag)
print('player 1 - {}'.format(nfcTag))
p1 = getPlayer(nfcTag)
print(str(p1))
print "player 1 found, now waiting for player 2 scan ..."
print("\n")
sleep(1)
rawTag = waitForTag()
nfcTag = getHexStringFromScannedTag(rawTag)
print('player 2 - {}'.format(nfcTag))
p2 = getPlayer(nfcTag)
print(str(p2))
print
print
print "(Hopefully) both found, let's play table tennis"
| mit | Python |
fe5e86254a1e2a2e8838988210531fc30467ee7f | Add further information to absorption correction cif | xia2/i19 | command_line/write_absorption_correction_cif.py | command_line/write_absorption_correction_cif.py | from __future__ import division
import datetime
import iotbx.cif.model
import os
import re
def find_scale_dir():
assert os.path.exists('xia2.json')
from xia2.Schema.XProject import XProject
xinfo = XProject.from_json(filename='xia2.json')
crystals = xinfo.get_crystals()
assert len(crystals) == 1
crystal = next(crystals.itervalues())
return os.path.join(crystal.get_name(), 'scale')
def find_aimless_log():
scaledir = os.path.join('DEFAULT', 'scale') # best guess
if not os.path.exists(scaledir):
scaledir = find_scale_dir()
logs = [f for f in os.listdir(scaledir)
if f.endswith('_aimless.log') and os.path.isfile(os.path.join(scaledir, f))]
logs.sort(key=lambda x: int(x.split('_')[0]))
lastlog = os.path.join(scaledir, logs[-1])
assert os.path.isfile(lastlog)
return lastlog
def get_versions():
from dials.util.version import dials_version
from i19.util.version import i19_version
import xia2.XIA2Version
versions = {
'xia2': xia2.XIA2Version.Version,
'dials': dials_version(),
'i19': i19_version(),
'aimless': 'AIMLESS, CCP4' }
with open(find_aimless_log(), 'r') as aimlesslog:
pattern = re.compile(" +#+ *CCP4.*#+")
for line in aimlesslog:
if pattern.search(line):
versions['aimless'] = re.sub('\s\s+', ', ', line.strip("\t\n #"))
break
return versions
def write_cif(filename, absmin, absmax, prefix='abscorr'):
versions = get_versions()
block = iotbx.cif.model.block()
block["_audit_creation_method"] = versions['xia2']
block["_audit_creation_date"] = datetime.date.today().isoformat()
block["_publ_section_references"] = '''
Evans, P. R. and Murshudov, G. N. (2013) Acta Cryst. D69, 1204-1214.
Winter, G. (2010) Journal of Applied Crystallography 43, 186-190.
'''
block["_exptl_absorpt_correction_T_min"] = absmin
block["_exptl_absorpt_correction_T_max"] = absmax
block["_exptl_absorpt_correction_type"] = "empirical"
block["_exptl_absorpt_process_details"] = '''
{aimless}
Scaling & analysis of unmerged intensities, absorption correction using spherical harmonics
Run via {xia2}, {dials}, {i19}
'''.format(**versions)
cif = iotbx.cif.model.cif()
cif[prefix] = block
with open(filename, 'w') as fh:
cif.show(out=fh)
def main():
print "Generating absorption surface"
log = find_aimless_log()
from xia2.Toolkit.AimlessSurface import evaluate_1degree, scrape_coefficients, generate_map
absmap = evaluate_1degree(scrape_coefficients(log))
write_cif('absorption_surface.cif_xia2', absmap.min(), absmap.max())
generate_map(absmap, 'absorption_surface.png')
if __name__ == '__main__':
main()
| from __future__ import division
import datetime
import iotbx.cif.model
import os
import xia2.XIA2Version
def find_scale_dir():
assert os.path.exists('xia2.json')
from xia2.Schema.XProject import XProject
xinfo = XProject.from_json(filename='xia2.json')
crystals = xinfo.get_crystals()
assert len(crystals) == 1
crystal = next(crystals.itervalues())
return os.path.join(crystal.get_name(), 'scale')
def find_aimless_log():
scaledir = os.path.join('DEFAULT', 'scale') # best guess
if not os.path.exists(scaledir):
scaledir = find_scale_dir()
logs = [f for f in os.listdir(scaledir)
if f.endswith('_aimless.log') and os.path.isfile(os.path.join(scaledir, f))]
logs.sort(key=lambda x: int(x.split('_')[0]))
lastlog = os.path.join(scaledir, logs[-1])
assert os.path.isfile(lastlog)
return lastlog
def write_cif(filename, absmin, absmax, prefix='abscorr'):
block = iotbx.cif.model.block()
block["_audit_creation_method"] = xia2.XIA2Version.Version
block["_audit_creation_date"] = datetime.date.today().isoformat()
block["_publ_section_references"] = '''
Winter, G. (2010) Journal of Applied Crystallography 43
'''
block["_exptl_absorpt_correction_T_min"] = absmin
block["_exptl_absorpt_correction_T_max"] = absmax
cif = iotbx.cif.model.cif()
cif[prefix] = block
with open(filename, 'w') as fh:
cif.show(out=fh)
def main():
print "Generating absorption surface"
log = find_aimless_log()
from xia2.Toolkit.AimlessSurface import evaluate_1degree, scrape_coefficients, generate_map
absmap = evaluate_1degree(scrape_coefficients(log))
write_cif('absorption_surface.cif_xia2', absmap.min(), absmap.max())
generate_map(absmap, 'absorption_surface.png')
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
fa6edcb1ad04ca0ff698da8fa4965ea02915758e | Fix the name of CHANGES.txt | larsbutler/coveragepy,hugovk/coveragepy,nedbat/coveragepy,jayhetee/coveragepy,larsbutler/coveragepy,nedbat/coveragepy,7WebPages/coveragepy,blueyed/coveragepy,blueyed/coveragepy,blueyed/coveragepy,hugovk/coveragepy,jayhetee/coveragepy,7WebPages/coveragepy,hugovk/coveragepy,hugovk/coveragepy,larsbutler/coveragepy,blueyed/coveragepy,jayhetee/coveragepy,7WebPages/coveragepy,nedbat/coveragepy,blueyed/coveragepy,larsbutler/coveragepy,nedbat/coveragepy,larsbutler/coveragepy,hugovk/coveragepy,7WebPages/coveragepy,nedbat/coveragepy,jayhetee/coveragepy,jayhetee/coveragepy | coverage/__init__.py | coverage/__init__.py | """Code coverage measurement for Python.
Ned Batchelder
http://nedbatchelder.com/code/coverage
"""
__version__ = "3.0.1" # see detailed history in CHANGES.txt
from coverage.control import coverage
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
# class. That design hampered programmability. Here we define the top-level
# functions to create the singleton when they are first called.
# Singleton object for use with module-level functions. The singleton is
# created as needed when one of the module-level functions is called.
_the_coverage = None
def _singleton_method(name):
"""Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
"""
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
return wrapper
# Define the module-level functions.
use_cache = _singleton_method('use_cache')
start = _singleton_method('start')
stop = _singleton_method('stop')
erase = _singleton_method('erase')
exclude = _singleton_method('exclude')
analysis = _singleton_method('analysis')
analysis2 = _singleton_method('analysis2')
report = _singleton_method('report')
annotate = _singleton_method('annotate')
# COPYRIGHT AND LICENSE
#
# Copyright 2001 Gareth Rees. All rights reserved.
# Copyright 2004-2009 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
| """Code coverage measurement for Python.
Ned Batchelder
http://nedbatchelder.com/code/coverage
"""
__version__ = "3.0.1" # see detailed history in CHANGES
from coverage.control import coverage
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
# class. That design hampered programmability. Here we define the top-level
# functions to create the singleton when they are first called.
# Singleton object for use with module-level functions. The singleton is
# created as needed when one of the module-level functions is called.
_the_coverage = None
def _singleton_method(name):
"""Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
"""
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
return wrapper
# Define the module-level functions.
use_cache = _singleton_method('use_cache')
start = _singleton_method('start')
stop = _singleton_method('stop')
erase = _singleton_method('erase')
exclude = _singleton_method('exclude')
analysis = _singleton_method('analysis')
analysis2 = _singleton_method('analysis2')
report = _singleton_method('report')
annotate = _singleton_method('annotate')
# COPYRIGHT AND LICENSE
#
# Copyright 2001 Gareth Rees. All rights reserved.
# Copyright 2004-2009 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
| apache-2.0 | Python |
8c6c96d5a0cdeadb59e6f6ab8649f1dbc1fc12b7 | delete multiple sites at once | appsembler/edx-platform,appsembler/edx-platform,appsembler/edx-platform,appsembler/edx-platform | openedx/core/djangoapps/appsembler/sites/management/commands/remove_site.py | openedx/core/djangoapps/appsembler/sites/management/commands/remove_site.py | import traceback
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from django.db import transaction
from openedx.core.djangoapps.appsembler.sites.utils import delete_site
class Command(BaseCommand):
"""
Remove a Tahoe website from LMS records.
Must be used `remove_site` on AMC to avoid any errors there.
"""
def add_arguments(self, parser):
parser.add_argument(
'--commit',
default=False,
dest='commit',
help='Fully delete the site, otherwise the transaction will be rolled back.',
action='store_true',
)
parser.add_argument(
'domain',
help='The domain of the organization to be deleted.',
nargs='+',
type=str,
)
def handle(self, *args, **options):
domains = options['domain']
for domain in domains:
self.stdout.write('Removing "%s" in progress...' % domain)
try:
site = Site.objects.filter(domain=domain).first()
if not site:
self.stderr.write(self.style.ERROR('Cannot find "{domain}"'.format(domain=domain)))
continue
with transaction.atomic():
delete_site(site)
if not options['commit']:
transaction.set_rollback(True)
except Exception: # noqa
self.stderr.write(self.style.ERROR(
'Failed to remove site "{domain}" error: \n {error}'.format(
domain=domain,
error=traceback.format_exc(),
)
))
traceback.format_exc()
else:
self.stdout.write(self.style.SUCCESS(
'{message} removed site "{domain}"'.format(
message='Successfully' if options['commit'] else 'Dry run',
domain=domain,
)
))
| from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
from django.db import transaction
from openedx.core.djangoapps.appsembler.sites.utils import delete_site
class Command(BaseCommand):
"""
Remove a Tahoe website from LMS records.
Must be used `remove_site` on AMC to avoid any errors there.
"""
def add_arguments(self, parser):
parser.add_argument(
'domain',
help='The domain of the organization to be deleted.',
type=str,
)
parser.add_argument(
'--commit',
default=False,
dest='commit',
help='Fully delete the site, otherwise the transaction will be rolled back.',
action='store_true',
)
def handle(self, *args, **options):
organization_domain = options['domain']
self.stdout.write('Removing "%s" in progress...' % organization_domain)
site = self._get_site(organization_domain)
with transaction.atomic():
delete_site(site)
if not options['commit']:
transaction.set_rollback(True)
self.stdout.write(self.style.SUCCESS(
'{message} removed site "{domain}"'.format(
message='Successfully' if options['commit'] else 'Dry run',
domain=organization_domain,
)
))
def _get_site(self, domain):
"""
Locates the site to be deleted and return its instance.
:param domain: The domain of the site to be returned.
:return: Returns the site object that has the given domain.
"""
try:
return Site.objects.get(domain=domain)
except Site.DoesNotExist:
raise CommandError('Cannot find "%s" in Sites!' % domain)
| agpl-3.0 | Python |
69baf66b13331d5936e8c540a2bb7eccb1d64cb7 | Add view for user to download invoice | Chris7/django-invoice,Chris7/django-invoice,simonluijk/django-invoice | invoice/views.py | invoice/views.py | from django.shortcuts import get_object_or_404
from invoice.models import Invoice
from invoice.pdf import draw_pdf
from invoice.utils import pdf_response
def pdf_view(request, pk):
invoice = get_object_or_404(Invoice, pk=pk)
return pdf_response(draw_pdf, invoice.file_name(), invoice)
def pdf_user_view(request, invoice_id):
invoice = get_object_or_404(Invoice, invoice_id=invoice_id, user=request.user)
return pdf_response(draw_pdf, invoice.file_name(), invoice)
| from django.shortcuts import get_object_or_404
from invoice.models import Invoice
from invoice.pdf import draw_pdf
from invoice.utils import pdf_response
def pdf_view(request, pk):
invoice = get_object_or_404(Invoice, pk=pk)
return pdf_response(draw_pdf, invoice.file_name(), invoice)
| bsd-3-clause | Python |
c3abd04da6c4ecc5d9464261c5c30d3eb527b04f | Bump version number | hugovk/coveragepy,larsbutler/coveragepy,blueyed/coveragepy,7WebPages/coveragepy,hugovk/coveragepy,larsbutler/coveragepy,nedbat/coveragepy,hugovk/coveragepy,7WebPages/coveragepy,hugovk/coveragepy,blueyed/coveragepy,jayhetee/coveragepy,nedbat/coveragepy,larsbutler/coveragepy,blueyed/coveragepy,jayhetee/coveragepy,jayhetee/coveragepy,7WebPages/coveragepy,jayhetee/coveragepy,hugovk/coveragepy,jayhetee/coveragepy,blueyed/coveragepy,7WebPages/coveragepy,larsbutler/coveragepy,larsbutler/coveragepy,nedbat/coveragepy,blueyed/coveragepy,nedbat/coveragepy,nedbat/coveragepy | coverage/__init__.py | coverage/__init__.py | """Code coverage measurement for Python.
Ned Batchelder
http://nedbatchelder.com/code/coverage
"""
__version__ = "3.3.2a1" # see detailed history in CHANGES.txt
__url__ = "http://nedbatchelder.com/code/coverage"
from coverage.control import coverage, process_startup
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
# class. That design hampered programmability. Here we define the top-level
# functions to create the singleton when they are first called.
# Singleton object for use with module-level functions. The singleton is
# created as needed when one of the module-level functions is called.
_the_coverage = None
def _singleton_method(name):
"""Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
"""
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
return wrapper
# Define the module-level functions.
use_cache = _singleton_method('use_cache')
start = _singleton_method('start')
stop = _singleton_method('stop')
erase = _singleton_method('erase')
exclude = _singleton_method('exclude')
analysis = _singleton_method('analysis')
analysis2 = _singleton_method('analysis2')
report = _singleton_method('report')
annotate = _singleton_method('annotate')
# COPYRIGHT AND LICENSE
#
# Copyright 2001 Gareth Rees. All rights reserved.
# Copyright 2004-2010 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
| """Code coverage measurement for Python.
Ned Batchelder
http://nedbatchelder.com/code/coverage
"""
__version__ = "3.3.1" # see detailed history in CHANGES.txt
__url__ = "http://nedbatchelder.com/code/coverage"
from coverage.control import coverage, process_startup
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
# class. That design hampered programmability. Here we define the top-level
# functions to create the singleton when they are first called.
# Singleton object for use with module-level functions. The singleton is
# created as needed when one of the module-level functions is called.
_the_coverage = None
def _singleton_method(name):
"""Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
"""
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
return wrapper
# Define the module-level functions.
use_cache = _singleton_method('use_cache')
start = _singleton_method('start')
stop = _singleton_method('stop')
erase = _singleton_method('erase')
exclude = _singleton_method('exclude')
analysis = _singleton_method('analysis')
analysis2 = _singleton_method('analysis2')
report = _singleton_method('report')
annotate = _singleton_method('annotate')
# COPYRIGHT AND LICENSE
#
# Copyright 2001 Gareth Rees. All rights reserved.
# Copyright 2004-2010 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
| apache-2.0 | Python |
6b090757799b6c5928b5cf9c0722d870ff822205 | Fix poorly written sentence | alphagov/notifications-api,alphagov/notifications-api | app/v2/template/post_template.py | app/v2/template/post_template.py | from flask import jsonify, request
from app import authenticated_service
from app.dao import templates_dao
from app.schema_validation import validate
from app.utils import get_template_instance
from app.v2.errors import BadRequestError
from app.v2.template import v2_template_blueprint
from app.v2.template.template_schemas import (
post_template_preview_request,
create_post_template_preview_response
)
from app.v2.utils import get_valid_json
@v2_template_blueprint.route("/<template_id>/preview", methods=['POST'])
def post_template_preview(template_id):
# The payload is empty when there are no place holders in the template.
_data = request.get_data(as_text=True)
if not _data:
_data = {}
else:
_data = get_valid_json()
_data['id'] = template_id
data = validate(_data, post_template_preview_request)
template = templates_dao.dao_get_template_by_id_and_service_id(
template_id, authenticated_service.id)
template_object = get_template_instance(
template.__dict__, values=data.get('personalisation'))
check_placeholders(template_object)
resp = create_post_template_preview_response(template=template,
template_object=template_object)
return jsonify(resp), 200
def check_placeholders(template_object):
if template_object.missing_data:
message = 'Missing personalisation: {}'.format(", ".join(template_object.missing_data))
raise BadRequestError(message=message, fields=[{'template': message}])
| from flask import jsonify, request
from app import authenticated_service
from app.dao import templates_dao
from app.schema_validation import validate
from app.utils import get_template_instance
from app.v2.errors import BadRequestError
from app.v2.template import v2_template_blueprint
from app.v2.template.template_schemas import (
post_template_preview_request,
create_post_template_preview_response
)
from app.v2.utils import get_valid_json
@v2_template_blueprint.route("/<template_id>/preview", methods=['POST'])
def post_template_preview(template_id):
# If the payload is is empty if there is no personalisation in the template.
_data = request.get_data(as_text=True)
if not _data:
_data = {}
else:
_data = get_valid_json()
_data['id'] = template_id
data = validate(_data, post_template_preview_request)
template = templates_dao.dao_get_template_by_id_and_service_id(
template_id, authenticated_service.id)
template_object = get_template_instance(
template.__dict__, values=data.get('personalisation'))
check_placeholders(template_object)
resp = create_post_template_preview_response(template=template,
template_object=template_object)
return jsonify(resp), 200
def check_placeholders(template_object):
if template_object.missing_data:
message = 'Missing personalisation: {}'.format(", ".join(template_object.missing_data))
raise BadRequestError(message=message, fields=[{'template': message}])
| mit | Python |
6d51b0aa6e18d0cd99485861d60a7f20f40cb97a | tweak osm import query | cubgs53/usaddress,frankleng/usaddress,ahlusar1989/probablepeople,markbaas/usaddress,yl2695/usaddress | training/import_osm.py | training/import_osm.py | import requests
import codecs
query1 = """<union>
<query type="way">
<has-kv k="addr:full"/>
<has-kv k="addr:city"/>
<has-kv k="addr:street"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:full"/>
<has-kv k="addr:city"/>
<has-kv k="addr:street"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>""" % ((-86.805193, 47.080799, 42.491920, -92.889259) * 2)
#r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1)
#f = open("data/osm_data.xml", "w")
#f.write(r1.text)
query2 = """<union>
<query type="way">
<has-kv k="addr:street"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:prefix"/>
<has-kv k="addr:street:type"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:street"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:prefix"/>
<has-kv k="addr:street:type"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>""" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2)
r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2)
#f = codecs.open("data/osm_data_street.xml", "wb", "utf-8")
#r2.encoding = 'utf-8'
#f.write(r2.text)
query3 = """<union>
<query type="way">
<has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]{5}.*"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]{5}.*"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>
""" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 2)
r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3)
f = codecs.open("data/osm_data_full_addr.xml", "wb", "utf-8")
r3.encoding = 'utf-8'
f.write(r3.text)
| import requests
import codecs
query1 = """<union>
<query type="way">
<has-kv k="addr:full"/>
<has-kv k="addr:city"/>
<has-kv k="addr:street"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:full"/>
<has-kv k="addr:city"/>
<has-kv k="addr:street"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>""" % ((-86.805193, 47.080799, 42.491920, -92.889259) * 2)
r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1)
f = open("data/osm_data.xml", "w")
f.write(r1.text)
query2 = """<union>
<query type="way">
<has-kv k="addr:street"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:prefix"/>
<has-kv k="addr:street:type"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:street"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:prefix"/>
<has-kv k="addr:street:type"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>""" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2)
r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2)
f = codecs.open("data/osm_data_street.xml", "wb", "utf-8")
r2.encoding = 'utf-8'
f.write(r2.text)
query3 = """<union>
<query type="way">
<has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]+.*"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]+.*"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>
""" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 2)
r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3)
f = codecs.open("data/osm_data_full_addr.xml", "wb", "utf-8")
r3.encoding = 'utf-8'
f.write(r3.text)
| mit | Python |
9de3e898b0ce1d8970052d2052e4215bc2f12491 | bump version to 2.11 | romonzaman/newfies-dialer,Star2Billing/newfies-dialer,Star2Billing/newfies-dialer,romonzaman/newfies-dialer,Star2Billing/newfies-dialer,saydulk/newfies-dialer,saydulk/newfies-dialer,newfies-dialer/newfies-dialer,saydulk/newfies-dialer,romonzaman/newfies-dialer,saydulk/newfies-dialer,newfies-dialer/newfies-dialer,Star2Billing/newfies-dialer,newfies-dialer/newfies-dialer,romonzaman/newfies-dialer,newfies-dialer/newfies-dialer | newfies/newfies_dialer/__init__.py | newfies/newfies_dialer/__init__.py | # -*- coding: utf-8 -*-
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
# :copyright: (c) 2011-2014 by Arezqui Belaid.
# :license: MPL 2.0, see COPYING for more details.
VERSION = (2, 11, 0, "")
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Arezqui Belaid"
__contact__ = "info@star2billing.com"
__homepage__ = "http://www.newfies-dialer.org"
__docformat__ = "restructuredtext"
| # -*- coding: utf-8 -*-
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
# :copyright: (c) 2011-2014 by Arezqui Belaid.
# :license: MPL 2.0, see COPYING for more details.
VERSION = (2, 10, 1, "")
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Arezqui Belaid"
__contact__ = "info@star2billing.com"
__homepage__ = "http://www.newfies-dialer.org"
__docformat__ = "restructuredtext"
| mpl-2.0 | Python |
014cb038b8e3a22c90864c842d2c049d3e9637d4 | Use recipe bootstrap | CoherentLabs/depot_tools,CoherentLabs/depot_tools | tests/recipes_test.py | tests/recipes_test.py | #!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs simulation tests and lint on the recipes."""
import os
import subprocess
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def recipes_py(*args):
subprocess.check_call([
os.path.join(ROOT_DIR, 'recipes.py'), '--use-bootstrap'] + list(args))
recipes_py('simulation_test')
recipes_py('lint')
| #!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs simulation tests and lint on the recipes."""
import os
import subprocess
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def recipes_py(*args):
subprocess.check_call([os.path.join(ROOT_DIR, 'recipes.py')] + list(args))
recipes_py('simulation_test')
recipes_py('lint')
| bsd-3-clause | Python |
b2371e4a19930ec32c0b212798203142c7c69f74 | Update fields.py | oscarmcm/django-places,oscarmcm/django-places,oscarmcm/django-places | places/fields.py | places/fields.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from django.db import models
from django.utils.six import with_metaclass
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from . import Places
from .forms import PlacesField as PlacesFormField
class PlacesField(models.Field):
description = _("A geoposition field (latitude and longitude)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 255
super(PlacesField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'CharField'
def to_python(self, value):
if not value or value == 'None':
return None
if isinstance(value, Places):
return value
if isinstance(value, list):
return Places(value[0], value[1], value[2])
value_parts = [Decimal(val) for val in value.split(',')[-2:]]
try:
latitude = value_parts[0]
except IndexError:
latitude = '0.0'
try:
longitude = value_parts[1]
except IndexError:
longitude = '0.0'
try:
place = ','.join(value.split(',')[:-2])
except:
pass
return Places(place, latitude, longitude)
def from_db_value(self, value, expression, connection):
return self.to_python(value)
def get_prep_value(self, value):
return str(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return smart_text(value)
def formfield(self, **kwargs):
defaults = {'form_class': PlacesFormField}
defaults.update(kwargs)
return super(PlacesField, self).formfield(**defaults)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from django.db import models
from django.utils.six import with_metaclass
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from . import Places
from .forms import PlacesField as PlacesFormField
class PlacesField(models.Field):
description = _("A geoposition field (latitude and longitude)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 255
super(PlacesField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'CharField'
def to_python(self, value):
if not value or value == 'None':
return None
if isinstance(value, Places):
return value
if isinstance(value, list):
return Places(value[0], value[1], value[2])
value_parts = [Decimal(val) for val in value.split(',')[-2:]]
try:
latitude = value_parts[0]
except IndexError:
latitude = '0.0'
try:
longitude = value_parts[1]
except IndexError:
longitude = '0.0'
try:
place = ','.join(value.split(',')[:-2])
except:
pass
return Places(place, latitude, longitude)
def from_db_value(self, value, expression, connection, context):
return self.to_python(value)
def get_prep_value(self, value):
return str(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return smart_text(value)
def formfield(self, **kwargs):
defaults = {'form_class': PlacesFormField}
defaults.update(kwargs)
return super(PlacesField, self).formfield(**defaults)
| mit | Python |
94f795dbbac32cbe1c83da1fb3cd3e55fdceef11 | Test de pycharm par Simon. | adrien-bellaiche/ia-cdf-rob-2015 | testFile.py | testFile.py | __author__ = 'adrie_000'
import numpy as np
import numpy.matrixlib as nm
'test'
p = nm.matrix([[3, 3, 3], [3, 5, 7]])
q = p - p.mean(1)
print q
r = [np.array([q.getA()[0][k], q.getA()[1][k]]) for k in range(q.shape[1])]
print r
d = [np.linalg.norm(w) for w in r]
print d | __author__ = 'adrie_000'
import numpy as np
import numpy.matrixlib as nm
p = nm.matrix([[3, 3, 3], [3, 5, 7]])
q = p - p.mean(1)
print q
r = [np.array([q.getA()[0][k], q.getA()[1][k]]) for k in range(q.shape[1])]
print r
d = [np.linalg.norm(w) for w in r]
print d | apache-2.0 | Python |
1bf981bd62d56e608cea8e7f0f0132af8cf25b70 | Fix NXOS tests to allow mocked data to raise | spotify/napalm,napalm-automation/napalm,spotify/napalm | test/nxos/conftest.py | test/nxos/conftest.py | """Test fixtures."""
from builtins import super
import pytest
from napalm.base.mock import raise_exception
from napalm.base.test import conftest as parent_conftest
from napalm.base.test.double import BaseTestDouble
from napalm.nxos import nxos
@pytest.fixture(scope="class")
def set_device_parameters(request):
"""Set up the class."""
def fin():
request.cls.device.close()
request.addfinalizer(fin)
request.cls.driver = nxos.NXOSDriver
request.cls.patched_driver = PatchedNXOSDriver
request.cls.vendor = "nxos"
parent_conftest.set_device_parameters(request)
def pytest_generate_tests(metafunc):
"""Generate test cases dynamically."""
parent_conftest.pytest_generate_tests(metafunc, __file__)
class PatchedNXOSDriver(nxos.NXOSDriver):
"""Patched NXOS Driver."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
super().__init__(hostname, username, password, timeout, optional_args)
self.patched_attrs = ["device"]
self.device = FakeNXOSDevice()
def disconnect(self):
pass
def is_alive(self):
return {"is_alive": True} # In testing everything works..
def open(self):
pass
class FakeNXOSDevice(BaseTestDouble):
"""NXOS device test double."""
def __init__(self):
super().__init__()
def _send_command(self, command, raw_text=False):
"""
Wrapper for NX-API show method.
Allows more code sharing between NX-API and SSH.
"""
return self.show(command, raw_text=raw_text)
def _send_command_list(self, commands):
return self.config_list(commands)
def show(self, command, raw_text=False):
"""Fake show."""
filename = "{}.json".format(command.replace(" ", "_"))
full_path = self.find_file(filename)
if raw_text:
result = self.read_txt_file(full_path)
else:
result = self.read_json_file(full_path)
if "exception" in result:
raise_exception(result)
return result
def config_list(self, command):
"""Fake config_list."""
pass
| """Test fixtures."""
from builtins import super
import pytest
from napalm.base.test import conftest as parent_conftest
from napalm.base.test.double import BaseTestDouble
from napalm.nxos import nxos
@pytest.fixture(scope="class")
def set_device_parameters(request):
"""Set up the class."""
def fin():
request.cls.device.close()
request.addfinalizer(fin)
request.cls.driver = nxos.NXOSDriver
request.cls.patched_driver = PatchedNXOSDriver
request.cls.vendor = "nxos"
parent_conftest.set_device_parameters(request)
def pytest_generate_tests(metafunc):
"""Generate test cases dynamically."""
parent_conftest.pytest_generate_tests(metafunc, __file__)
class PatchedNXOSDriver(nxos.NXOSDriver):
"""Patched NXOS Driver."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
super().__init__(hostname, username, password, timeout, optional_args)
self.patched_attrs = ["device"]
self.device = FakeNXOSDevice()
def disconnect(self):
pass
def is_alive(self):
return {"is_alive": True} # In testing everything works..
def open(self):
pass
class FakeNXOSDevice(BaseTestDouble):
"""NXOS device test double."""
def __init__(self):
super().__init__()
def _send_command(self, command, raw_text=False):
"""
Wrapper for NX-API show method.
Allows more code sharing between NX-API and SSH.
"""
return self.show(command, raw_text=raw_text)
def _send_command_list(self, commands):
return self.config_list(commands)
def show(self, command, raw_text=False):
"""Fake show."""
filename = "{}.json".format(command.replace(" ", "_"))
full_path = self.find_file(filename)
if raw_text:
result = self.read_txt_file(full_path)
else:
result = self.read_json_file(full_path)
return result
def config_list(self, command):
"""Fake config_list."""
pass
| apache-2.0 | Python |
3d25ebd40d55925c3b0c03454fc8543cd5e39ac9 | update import script for Salford (closes #1162) | DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_salford.py | polling_stations/apps/data_collection/management/commands/import_salford.py | from data_collection.github_importer import BaseGitHubImporter
class Command(BaseGitHubImporter):
srid = 4326
districts_srid = 4326
council_id = 'E08000006'
elections = ['local.2018-05-03']
scraper_name = 'wdiv-scrapers/DC-PollingStations-Salford'
geom_type = 'geojson'
def district_record_to_dict(self, record):
poly = self.extract_geometry(record, self.geom_type, self.get_srid('districts'))
return {
'internal_council_id': record['code'],
'name' : record['code'],
'area' : poly
}
def station_record_to_dict(self, record):
location = self.extract_geometry(record, self.geom_type, self.get_srid('stations'))
return {
'internal_council_id': record['polling_district'],
'postcode': '',
'address': record['station_location'],
'location': location,
'polling_district_id': record['polling_district']
}
| from data_collection.github_importer import BaseGitHubImporter
class Command(BaseGitHubImporter):
srid = 4326
districts_srid = 4326
council_id = 'E08000006'
elections = ['parl.2017-06-08']
scraper_name = 'wdiv-scrapers/DC-PollingStations-Salford'
geom_type = 'geojson'
def district_record_to_dict(self, record):
poly = self.extract_geometry(record, self.geom_type, self.get_srid('districts'))
return {
'internal_council_id': record['code'],
'name' : record['code'],
'area' : poly
}
def station_record_to_dict(self, record):
location = self.extract_geometry(record, self.geom_type, self.get_srid('stations'))
return {
'internal_council_id': record['polling_district'],
'postcode': '',
'address': record['station_location'],
'location': location,
'polling_district_id': record['polling_district']
}
| bsd-3-clause | Python |
1f7e355a3a325284c912265ce178d15129555375 | Add a news model | bm424/churchmanager,bm424/churchmanager | models.py | models.py | import urllib
from django.db import models
from django.core.validators import RegexValidator
from django.utils.text import slugify
from image_cropping import ImageRatioField
class Church(models.Model):
class Meta:
verbose_name_plural = "Churches"
ordering = ['name']
name = models.CharField(max_length=64)
CLASSIFICATION_CHOICES = (
('T', 'Town'),
('V', 'Village'),
)
classification = models.CharField(
max_length=1,
choices=CLASSIFICATION_CHOICES,
default='T'
)
short_description = models.CharField(max_length=128)
long_description = models.TextField()
photo = models.ImageField(blank=True)
wide_crop = ImageRatioField('photo', '768x200', size_warning=True)
list_crop = ImageRatioField('photo', '250x200', size_warning=True)
address_line_1 = models.CharField(max_length=64)
address_line_2 = models.CharField(max_length=64)
postcode_regex = RegexValidator(
regex=r'^([a-zA-Z](([0-9][0-9]?)|([a-zA-Z][0-9][0-9]?)|([a-zA-Z]?[0-9][a-zA-Z]))'
r' ?[0-9][abd-hjlnp-uw-zABD-HJLNP-UW-Z]{2})$', message="Invalid postcode.")
postcode = models.CharField(max_length=10, validators=[postcode_regex])
email = models.EmailField(blank=True)
phone_regex = RegexValidator(
regex=r'^[ \d]{6,20}$',
message="Phone number bust be a sequence of numbers or spaces."
)
phone_number = models.CharField(blank=True, max_length=20)
website = models.URLField(blank=True)
slug = models.SlugField(editable=False, unique=True)
show_map = models.BooleanField(default=True, help_text="Choose whether or not to display the location of the church as a Google map.")
map_query = models.CharField(max_length=200, blank=True, editable=False, null=True)
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.name)
self.map_query = ", ".join([self.address_line_1, self.address_line_2, self.postcode, "United Kingdom"])
super(Church, self).save(*args, **kwargs)
def __str__(self):
return "{}, {}".format(self.name, self.address_line_2)
class News(models.Model):
title = models.CharField(max_length=144)
detail = models.TextField()
photo = models.ImageField(blank=True)
square_crop = ImageRatioField('photo', '64x64', size_warning=True)
date = models.DateField(auto_now_add=True)
| import urllib
from django.db import models
from django.core.validators import RegexValidator
from django.utils.text import slugify
from image_cropping import ImageRatioField
class Church(models.Model):
class Meta:
verbose_name_plural = "Churches"
ordering = ['name']
name = models.CharField(max_length=64)
CLASSIFICATION_CHOICES = (
('T', 'Town'),
('V', 'Village'),
)
classification = models.CharField(
max_length=1,
choices=CLASSIFICATION_CHOICES,
default='T'
)
short_description = models.CharField(max_length=128)
long_description = models.TextField()
photo = models.ImageField(blank=True)
wide_crop = ImageRatioField('photo', '768x200', size_warning=True)
list_crop = ImageRatioField('photo', '250x200', size_warning=True)
address_line_1 = models.CharField(max_length=64)
address_line_2 = models.CharField(max_length=64)
postcode_regex = RegexValidator(
regex=r'^([a-zA-Z](([0-9][0-9]?)|([a-zA-Z][0-9][0-9]?)|([a-zA-Z]?[0-9][a-zA-Z]))'
r' ?[0-9][abd-hjlnp-uw-zABD-HJLNP-UW-Z]{2})$', message="Invalid postcode.")
postcode = models.CharField(max_length=10, validators=[postcode_regex])
email = models.EmailField(blank=True)
phone_regex = RegexValidator(
regex=r'^[ \d]{6,20}$',
message="Phone number bust be a sequence of numbers or spaces."
)
phone_number = models.CharField(blank=True, max_length=20)
website = models.URLField(blank=True)
slug = models.SlugField(editable=False, unique=True)
show_map = models.BooleanField(default=True, help_text="Choose whether or not to display the location of the church as a Google map.")
map_query = models.CharField(max_length=200, blank=True, editable=False, null=True)
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.name)
self.map_query = ", ".join([self.address_line_1, self.address_line_2, self.postcode, "United Kingdom"])
super(Church, self).save(*args, **kwargs)
def __str__(self):
return "{}, {}".format(self.name, self.address_line_2)
| mit | Python |
dd3a8cddc1ca7efe09bb9887a5fdc6b9b989b734 | Bump version for pypi to 0.2018.07.04.1917 | oduwsdl/ipwb,oduwsdl/ipwb,oduwsdl/ipwb,oduwsdl/ipwb | ipwb/__init__.py | ipwb/__init__.py | __version__ = '0.2018.07.04.1917'
| __version__ = '0.2018.07.04.1637'
| mit | Python |
013048e1d68174e71d4579e28efd0339144ce186 | Make function! and Use 4 tabs, not 8 tabs. | Yokan-Study/study,Yokan-Study/study,Yokan-Study/study | 2017/11.07/python/jya_homework2.py | 2017/11.07/python/jya_homework2.py | ```python
i = -1
sum = 0
one_to_ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
while i < 10:
i += 1
sum += one_to_ten[i]
print(sum)
```
**Make function!**
`like:`
```python
def sum(number):
# .. TODO
return 0
```
`I want to reuse this function, like:`
```python
one_to_ten = sum(range(1,11))
one_to_five = sum(range(1,6))
five_to_ten = sum(range(5,11))
```
| ```python
i = -1
sum = 0
one_to_ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
while i < 10:
i += 1
sum += one_to_ten[i]
print(sum)
``` | mit | Python |
eb121b797e803be295fb2abeef91ebcdab2e75cf | Normalize data | alexey-ernest/ml-for-trading | plot_multiple.py | plot_multiple.py |
import os
import pandas as pd
import matplotlib.pyplot as plt
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',
parse_dates=True, usecols=['Date', 'Adj Close'],
na_values=['nan'])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset=['SPY'])
return df
def normalize_data(df):
return df/df.ix[0,:]
def plot_data(df, title='Stock Prices'):
ax = df.plot(title=title)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
plt.show()
def test_run():
# Define a date range
dates = pd.date_range('2015-11-23', '2016-11-18')
# Choose stock symbols to read
symbols = ['GOOG', 'IBM', 'GLD']
# Get stock data
df = get_data(symbols, dates)
plot_data(normalize_data(df))
if __name__ == "__main__":
test_run()
|
import os
import pandas as pd
import matplotlib.pyplot as plt
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',
parse_dates=True, usecols=['Date', 'Adj Close'],
na_values=['nan'])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset=['SPY'])
return df
def plot_data(df, title='Stock Prices'):
ax = df.plot(title=title)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
plt.show()
def test_run():
# Define a date range
dates = pd.date_range('2015-11-23', '2016-11-18')
# Choose stock symbols to read
symbols = ['GOOG', 'IBM', 'GLD']
# Get stock data
df = get_data(symbols, dates)
#df = df/df.ix['2015-11-23':'2015-11-23']
plot_data(df)
if __name__ == "__main__":
test_run()
| mit | Python |
f81680a0bfe04b9e38fda53038e00346246bb1eb | make test more portable | sassoftware/mint,sassoftware/mint,sassoftware/mint,sassoftware/mint,sassoftware/mint | mint_test/resttest/apitest/sitetest.py | mint_test/resttest/apitest/sitetest.py | #!/usr/bin/python
#
# Copyright (c) 2009 rPath, Inc.
#
# All Rights Reserved
#
import testsetup
import os
import re
import time
from conary import conaryclient
from conary import constants as conaryConstants
from conary.lib import util
from mint import buildtypes
from mint import constants
from rpath_proddef import api1 as proddef
import restbase
from restlib import client as restClient
ResponseError = restClient.ResponseError
class SiteTest(restbase.BaseRestTest):
def testGetInfo(self):
uriTemplate = ''
uri = uriTemplate
client = self.getRestClient()
req, response = client.call('GET', uri, convert=True)
exp = """\
<?xml version='1.0' encoding='UTF-8'?>
<rbuilderStatus id="http://%(server)s:%(port)s/api">
<version>%(version)s</version>
<conaryVersion>%(conaryversion)s</conaryVersion>
<isRBO>false</isRBO>
<identity>
<rbuilderId></rbuilderId>
<serviceLevel status="Unknown" daysRemaining="-1" expired="true" limited="true"/>
<registered>false</registered>
</identity>
<products href="http://%(server)s:%(port)s/api/products/"/>
<users href="http://%(server)s:%(port)s/api/users/"/>
<platforms href="http://%(server)s:%(port)s/api/platforms"/>
<registration href="http://%(server)s:%(port)s/api/registration"/>
<maintMode>false</maintMode>
<proddefSchemaVersion>%(proddefVer)s</proddefSchemaVersion>
</rbuilderStatus>
"""
self.assertBlobEquals(response,
exp % dict(port = client.port, server = client.server,
version=constants.mintVersion,
conaryversion=conaryConstants.version,
proddefVer=proddef.BaseDefinition.version))
if __name__ == "__main__":
testsetup.main()
| #!/usr/bin/python
#
# Copyright (c) 2009 rPath, Inc.
#
# All Rights Reserved
#
import testsetup
import os
import re
import time
from conary import conaryclient
from conary import constants as conaryConstants
from conary.lib import util
from mint import buildtypes
from mint import constants
import restbase
from restlib import client as restClient
ResponseError = restClient.ResponseError
class SiteTest(restbase.BaseRestTest):
def testGetInfo(self):
uriTemplate = ''
uri = uriTemplate
client = self.getRestClient()
req, response = client.call('GET', uri, convert=True)
exp = """\
<?xml version='1.0' encoding='UTF-8'?>
<rbuilderStatus id="http://%(server)s:%(port)s/api">
<version>%(version)s</version>
<conaryVersion>%(conaryversion)s</conaryVersion>
<isRBO>false</isRBO>
<identity>
<rbuilderId></rbuilderId>
<serviceLevel status="Unknown" daysRemaining="-1" expired="true" limited="true"/>
<registered>false</registered>
</identity>
<products href="http://%(server)s:%(port)s/api/products/"/>
<users href="http://%(server)s:%(port)s/api/users/"/>
<platforms href="http://%(server)s:%(port)s/api/platforms"/>
<registration href="http://%(server)s:%(port)s/api/registration"/>
<maintMode>false</maintMode>
<proddefSchemaVersion>3.0</proddefSchemaVersion>
</rbuilderStatus>
"""
self.assertBlobEquals(response,
exp % dict(port = client.port, server = client.server,
version=constants.mintVersion,
conaryversion=conaryConstants.version))
if __name__ == "__main__":
testsetup.main()
| apache-2.0 | Python |
a2b1addd08c82162d7554aff2636d370cee68922 | Update 0008.py | 12wang3/python,fairyzoro/python,EricSekyere/python,haiyangd/python-show-me-the-code-,fairyzoro/python,yangzilong1986/python,Show-Me-the-Code/python,YGIronMan/python,starlightme/python,snailwalker/python,Show-Me-the-Code/python,snailwalker/python,hooting/show-me-the-code-python,fairyzoro/python,DIYgod/python,merfii/PythonExercises,luoxufeiyan/python,DanielShangHai/python,sravaniaitha/python,Mark24Code/python,llluiop/python-1,Ph0enixxx/python,starlightme/python,zhenglaizhang/python,lz199144/python,ZSeaPeng/python,Ph0enixxx/python,Yrthgze/prueba-sourcetree2,wangjun/python,hooting/show-me-the-code-python,agogear/python-1,Yrthgze/prueba-sourcetree2,snailwalker/python,llluiop/python-1,wangjun/python,wangjun/python,dominjune/python,xiaoixa/python,haiyangd/python-show-me-the-code-,Jaccorot/python,Yrthgze/prueba-sourcetree2,YGIronMan/python,ionutcipriananescu/python,xchaoinfo/python,Pritesh242/python,12wang3/python,JiYouMCC/python,yangzilong1986/python,ZuoGuocai/python,Friday21/python_show_me_the_code,sravaniaitha/python,karnikamit/python,luoxufeiyan/python,fairyzoro/python,zhakui/python,JiYouMCC/python,tzq668766/python,Friday21/python_show_me_the_code,keysona/python,Ph0enixxx/python,ZSeaPeng/python,Show-Me-the-Code/python,YGIronMan/python,agogear/python-1,merfii/PythonExercises,DanielShangHai/python,Jaccorot/python,hooting/show-me-the-code-python,tzq668766/python,yangzilong1986/python,ZuoGuocai/python,ionutcipriananescu/python,tzq668766/python,keysona/python,DIYgod/python,hooting/show-me-the-code-python,starlightme/python,12wang3/python,hooting/show-me-the-code-python,starlightme/python,YGIronMan/python,xchaoinfo/python,whix/python,tzq668766/python,snailwalker/python,haiyangd/python-show-me-the-code-,renzongxian/Show-Me-the-Code,zhakui/python,JiYouMCC/python,wangjun/python,karnikamit/python,zhenglaizhang/python,snailwalker/python,karnikamit/python,EricSekyere/python,DanielShangHai/python,YGIronMan/python,tzq668766/python,Mark24Code/python,zhenglaizhang/python,EricSekyere/python,ZSeaPeng/python,haiyangd/python-show-me-the-code-,xiaoixa/python,renzongxian/Show-Me-the-Code,Jaccorot/python,sravaniaitha/python,Yrthgze/prueba-sourcetree2,agogear/python-1,xiaoixa/python,zhenglaizhang/python,Friday21/python_show_me_the_code,karnikamit/python,12wang3/python,Yrthgze/prueba-sourcetree2,JiYouMCC/python,Mark24Code/python,fairyzoro/python,Mark24Code/python,ionutcipriananescu/python,dominjune/python,keysona/python,lz199144/python,yangzilong1986/python,whix/python,Pritesh242/python,Mark24Code/python,haiyangd/python-show-me-the-code-,xchaoinfo/python,Jaccorot/python,ionutcipriananescu/python,dominjune/python,yangzilong1986/python,lz199144/python,EricSekyere/python,Yrthgze/prueba-sourcetree2,xchaoinfo/python,merfii/PythonExercises,ionutcipriananescu/python,agogear/python-1,keysona/python,whix/python,Show-Me-the-Code/python,zhakui/python,llluiop/python-1,dominjune/python,Show-Me-the-Code/python,renzongxian/Show-Me-the-Code,merfii/PythonExercises,DanielShangHai/python,lz199144/python,ZSeaPeng/python,merfii/PythonExercises,renzongxian/Show-Me-the-Code,starlightme/python,zhakui/python,Pritesh242/python,Pritesh242/python,Ph0enixxx/python,DanielShangHai/python,sravaniaitha/python,dominjune/python,Pritesh242/python,sravaniaitha/python,Jaccorot/python,lz199144/python,luoxufeiyan/python,JiYouMCC/python,Ph0enixxx/python,ZuoGuocai/python,ZuoGuocai/python,zhenglaizhang/python,ZuoGuocai/python,xiaoixa/python,DIYgod/python,luoxufeiyan/python,DIYgod/python,Friday21/python_show_me_the_code,ZSeaPeng/python,EricSekyere/python,Show-Me-the-Code/python,luoxufeiyan/python,agogear/python-1,12wang3/python,zhakui/python,Friday21/python_show_me_the_code,wangjun/python,llluiop/python-1,karnikamit/python,renzongxian/Show-Me-the-Code,xchaoinfo/python,whix/python,xiaoixa/python,llluiop/python-1,whix/python,keysona/python | renzongxian/0008/0008.py | renzongxian/0008/0008.py | # Source:https://github.com/Show-Me-the-Code/show-me-the-code
# Author:renzongxian
# Date:2014-12-20
# Python 3.4
"""
第 0008 题:一个HTML文件,找出里面的正文。
"""
import urllib.request
import re
def get_body(url):
html_content = urllib.request.urlopen(url).read()
r = re.compile('<p>(?:<.[^>]*>)?(.*?)(?:<.[^>]*>)?</p>')
result = r.findall(html_content.decode('GBK').encode('utf-8'))
return result
if __name__ == '__main__':
body = get_body('http://tech.163.com/14/1219/01/ADPT7MTE000915BF.html')
file_object = open('result.txt', 'w')
for l in body:
file_object.write(l + '\n')
file_object.close()
| # Source:https://github.com/Show-Me-the-Code/show-me-the-code
# Author:renzongxian
# Date:2014-12-20
# Python 3.4
"""
第 0008 题:一个HTML文件,找出里面的正文。
"""
import urllib.request
import re
def get_body(url):
html_content = urllib.request.urlopen(url).read()
r = re.compile('<p>(?:<.[^>]*>)?(.*?)(?:<.[^>]*>)?</p>')
result = r.findall(html_content.decode('GBK'))
return result
if __name__ == '__main__':
body = get_body('http://tech.163.com/14/1219/01/ADPT7MTE000915BF.html')
file_object = open('result.txt', 'w')
for l in body:
file_object.write(l + '\n')
file_object.close()
| mit | Python |
c32e163e3a5ea5ed9f8b3c95df7ad94c7c90bf80 | add missing import | AngelTsanev/AntColony | lib/simulator.py | lib/simulator.py | from rgbmatrix import RGBMatrix
from rgbmatrix import graphics
from field import Field
from color import Color
from ant import Ant
import time
Matrix = RGBMatrix(32, 2, 1)
Matrix.pwmBits = 11
Matrix.brightness = 50
def colorize(foods, base, ants):
for food in foods:
Matrix.SetPixel(food[0], food[1], 0, 200, 0)
for ant in ants:
Matrix.SetPixel(ant[0], ant[1], 255, 255, 0)
#base
Matrix.SetPixel(base[0], base[1], 100, 255, 100)
field = Field("field.txt", (30, 40))
field.food.append((10, 10))
field.food.append((20, 20))
for i in range(1, 30):
field.units.append(Ant(Color(0, 0, 100), 30, 40, i))
field.add_occupied((30, 40))
offscreenCanvas = Matrix.CreateFrameCanvas()
i = 1
while True:
i+=1
offscreenCanvas.Clear()
field.draw()
colorize(field.food, field.base, field.occupied)
field.generate_next()
if i % 10 == 0:
i = 1
field.evaporate_pheromones()
time.sleep(0.4)
offscreenCanvas = Matrix.SwapOnVSync(offscreenCanvas)
| from rgbmatrix import RGBMatrix
from rgbmatrix import graphics
from field import Field
from color import Color
from ant import Ant
Matrix = RGBMatrix(32, 2, 1)
Matrix.pwmBits = 11
Matrix.brightness = 50
def colorize(foods, base, ants):
for food in foods:
Matrix.SetPixel(food[0], food[1], 0, 200, 0)
for ant in ants:
Matrix.SetPixel(ant[0], ant[1], 255, 255, 0)
#base
Matrix.SetPixel(base[0], base[1], 100, 255, 100)
field = Field("field.txt", (30, 40))
field.food.append((10, 10))
field.food.append((20, 20))
for i in range(1, 30):
field.units.append(Ant(Color(0, 0, 100), 30, 40, i))
field.add_occupied((30, 40))
offscreenCanvas = Matrix.CreateFrameCanvas()
i = 1
while True:
i+=1
offscreenCanvas.Clear()
field.draw()
colorize(field.food, field.base, field.occupied)
field.generate_next()
if i % 10 == 0:
i = 1
field.evaporate_pheromones()
time.sleep(0.4)
offscreenCanvas = Matrix.SwapOnVSync(offscreenCanvas)
| mit | Python |
59f6a67dd7457da92f0ab1ff8a78e672dfdb1bdc | Remove help from `argparse` to use as parent. | DerWeh/pyplot | plotter/plotn.py | plotter/plotn.py | """existing plotting script to test with."""
import argparse
def parse_arguments():
print __doc__
parser = argparse.ArgumentParser(description=__doc__.split('\n',1)[0],add_help=False)
parser.add_argument('-s', '--start', action='store', type=int, default=0,
help='the number of the first iteration to plot')
return parser
if __name__ == '__main__':
import sys
import pylab
import numpy as np
import matplotlib.pyplot as plt
parser = parse_arguments()
start = parser.parse_args().start
# start = 0
# if(len(sys.argv)==2):
# start = int(sys.argv[1])
try:
n = np.atleast_2d(np.loadtxt("nsteps.dat"))
except IOError:
n = np.atleast_2d(np.loadtxt("loop/nsteps.dat"))
i = start
x = range(0,len(n[0]))
for line in n[start:]:
plt.plot(x,line, label = "{0}".format(i))
i+=1
plt.legend()
plt.xlabel('label'), plt.ylabel('occupation')
param = None
try:
param = open('layer_hb_param.init', 'r')
content = param.readlines()
except IOError:
try:
param = open('../layer_hb_param.init', 'r')
content = param.readlines()
except IOError:
pass
finally:
param.close()
if param:
N = int(content[5])
U = np.fromstring(content[28 + N*2], sep = ' ')
mu = np.fromstring(content[29 + N*2], sep = ' ')
if(N>10):
N = N/10
else:
N = 1
pos = range(0,len(n[0]),N)
labels = ['{0}\nU={1}\nmu={2}'.format(i,U[i],mu[i]) for i in pos]
pylab.xticks(pos, labels)
plt.show()
| """existing plotting script to test with."""
import argparse
def parse_arguments():
print __doc__
parser = argparse.ArgumentParser(description=__doc__.split('\n',1)[0])
parser.add_argument('-s', '--start', action='store', type=int, default=0,
help='the number of the first iteration to plot')
return parser
if __name__ == '__main__':
import sys
import pylab
import numpy as np
import matplotlib.pyplot as plt
parser = parse_arguments()
start = parser.parse_args().start
# start = 0
# if(len(sys.argv)==2):
# start = int(sys.argv[1])
try:
n = np.atleast_2d(np.loadtxt("nsteps.dat"))
except IOError:
n = np.atleast_2d(np.loadtxt("loop/nsteps.dat"))
i = start
x = range(0,len(n[0]))
for line in n[start:]:
plt.plot(x,line, label = "{0}".format(i))
i+=1
plt.legend()
plt.xlabel('label'), plt.ylabel('occupation')
param = None
try:
param = open('layer_hb_param.init', 'r')
content = param.readlines()
except IOError:
try:
param = open('../layer_hb_param.init', 'r')
content = param.readlines()
except IOError:
pass
finally:
param.close()
if param:
N = int(content[5])
U = np.fromstring(content[28 + N*2], sep = ' ')
mu = np.fromstring(content[29 + N*2], sep = ' ')
if(N>10):
N = N/10
else:
N = 1
pos = range(0,len(n[0]),N)
labels = ['{0}\nU={1}\nmu={2}'.format(i,U[i],mu[i]) for i in pos]
pylab.xticks(pos, labels)
plt.show()
| mit | Python |
ab2e26f388e174d7d66ba659430f7b772e5c2199 | Update dump_ast.py | sztomi/code-generator,rashadkm/code-generator,sztomi/code-generator,sztomi/code-generator,rashadkm/code-generator,rashadkm/code-generator | src/dump_ast.py | src/dump_ast.py | #!/usr/bin/python
# vim: set fileencoding=utf-8
import clang.cindex
import asciitree # must be version 0.2
import sys
def node_children(node):
return (c for c in node.get_children() if c.location.file.name == sys.argv[1])
def print_node(node):
text = node.spelling or node.displayname
kind = str(node.kind)[str(node.kind).index('.')+1:]
return '{} {}'.format(kind, text)
if len(sys.argv) != 2:
print("Usage: dump_ast.py [header file name]")
sys.exit()
clang.cindex.Config.set_library_file('/usr/local/lib/libclang.so')
index = clang.cindex.Index.create()
translation_unit = index.parse(sys.argv[1], ['-x', 'c++', '-std=c++11', '-D__CODE_GENERATOR__'])
print(asciitree.draw_tree(translation_unit.cursor, node_children, print_node))
| #!/usr/bin/python
# vim: set fileencoding=utf-8
import clang.cindex
import asciitree
import sys
def node_children(node):
return (c for c in node.get_children() if c.location.file.name == sys.argv[1])
def print_node(node):
text = node.spelling or node.displayname
kind = str(node.kind)[str(node.kind).index('.')+1:]
return '{} {}'.format(kind, text)
if len(sys.argv) != 2:
print("Usage: dump_ast.py [header file name]")
sys.exit()
clang.cindex.Config.set_library_file('/usr/local/lib/libclang.so')
index = clang.cindex.Index.create()
translation_unit = index.parse(sys.argv[1], ['-x', 'c++', '-std=c++11', '-D__CODE_GENERATOR__'])
print(asciitree.draw_tree(translation_unit.cursor, node_children, print_node))
| mit | Python |
84d734fe0df15c9968222346b13c6f06530a96b6 | fix command line argument parsing test | itoed/fabric,qinrong/fabric,fernandezcuesta/fabric,rodrigc/fabric,sdelements/fabric,TarasRudnyk/fabric,ploxiln/fabric,pashinin/fabric,pgroudas/fabric,bitprophet/fabric,felix-d/fabric,bspink/fabric,tekapo/fabric,bitmonk/fabric,MjAbuz/fabric,rane-hs/fabric-py3,elijah513/fabric,getsentry/fabric,kmonsoor/fabric,rbramwell/fabric,opavader/fabric,ericholscher/fabric,tolbkni/fabric,cgvarela/fabric,StackStorm/fabric,cmattoon/fabric,simon-engledew/fabric,SamuelMarks/fabric,akaariai/fabric,askulkarni2/fabric,xLegoz/fabric,mathiasertl/fabric,amaniak/fabric,likesxuqiang/fabric,haridsv/fabric,kxxoling/fabric,jaraco/fabric,hrubi/fabric,raimon49/fabric | test/test_plumbing.py | test/test_plumbing.py | def setUp(self):
pass
def test_cli_arg_parsing(self):
tests = [
("abc", ("abc", [], {}, [])),
("ab:c", ("ab", ['c'], {}, [])),
("a:b=c", ('a', [], {'b':'c'}, [])),
("a:b=c,d", ('a', ['d'], {'b':'c'}, [])),
("a:b=c,d=e", ('a', [], {'b':'c','d':'e'}, [])),
]
for cli, output in tests:
self.assertEquals(fabric._parse_args([cli]), [output])
| def setUp(self):
pass
def test_cli_arg_parsing(self):
tests = [
("abc", ("abc", [], {})),
("ab:c", ("ab", ['c'], {})),
("a:b=c", ('a', [], {'b':'c'})),
("a:b=c,d", ('a', ['d'], {'b':'c'})),
("a:b=c,d=e", ('a', [], {'b':'c','d':'e'})),
]
for cli, output in tests:
self.assertEquals(fabric._parse_args([cli]), [output])
| bsd-2-clause | Python |
29d3aa6bf6a7a670b24ff423cb687a0c6d862208 | Remove outdated comment | springload/draftjs_exporter,springload/draftjs_exporter,springload/draftjs_exporter | tests/test_exports.py | tests/test_exports.py | from __future__ import absolute_import, unicode_literals
import json
import os
import unittest
from draftjs_exporter.constants import BLOCK_TYPES, ENTITY_TYPES, INLINE_STYLES
from draftjs_exporter.defaults import BLOCK_MAP
from draftjs_exporter.html import HTML
from tests.test_entities import Image, Link
fixtures_path = os.path.join(os.path.dirname(__file__), 'test_exports.json')
fixtures = json.loads(open(fixtures_path, 'r').read())
config = {
'entity_decorators': {
ENTITY_TYPES.LINK: Link(),
ENTITY_TYPES.IMAGE: Image(),
},
'block_map': dict(BLOCK_MAP, **{
BLOCK_TYPES.UNORDERED_LIST_ITEM: {
'element': 'li',
'wrapper': ['ul', {'className': 'bullet-list'}],
},
BLOCK_TYPES.ATOMIC: {'element': 'span'},
}),
'style_map': {
INLINE_STYLES.ITALIC: {'element': 'em'},
INLINE_STYLES.BOLD: {'element': 'strong'},
},
}
class TestExportsMeta(type):
"""
Generates test cases dynamically.
See http://stackoverflow.com/a/20870875/1798491
"""
def __new__(mcs, name, bases, dict):
def gen_test(export):
def test(self):
self.maxDiff = None
self.assertEqual(HTML(config).render(export.get('content_state')), export.get('output'))
return test
for export in fixtures:
test_name = 'test_export_%s' % export.get('label').lower().replace(' ', '_')
dict[test_name] = gen_test(export)
return type.__new__(mcs, name, bases, dict)
class TestExports(unittest.TestCase):
__metaclass__ = TestExportsMeta
| from __future__ import absolute_import, unicode_literals
import json
import os
import unittest
from draftjs_exporter.constants import BLOCK_TYPES, ENTITY_TYPES, INLINE_STYLES
from draftjs_exporter.defaults import BLOCK_MAP
from draftjs_exporter.html import HTML
from tests.test_entities import Image, Link
fixtures_path = os.path.join(os.path.dirname(__file__), 'test_exports.json')
fixtures = json.loads(open(fixtures_path, 'r').read())
# TODO Move this to JSON file.
config = {
'entity_decorators': {
ENTITY_TYPES.LINK: Link(),
ENTITY_TYPES.IMAGE: Image(),
},
'block_map': dict(BLOCK_MAP, **{
BLOCK_TYPES.UNORDERED_LIST_ITEM: {
'element': 'li',
'wrapper': ['ul', {'className': 'bullet-list'}],
},
BLOCK_TYPES.ATOMIC: {'element': 'span'},
}),
'style_map': {
INLINE_STYLES.ITALIC: {'element': 'em'},
INLINE_STYLES.BOLD: {'element': 'strong'},
},
}
class TestExportsMeta(type):
"""
Generates test cases dynamically.
See http://stackoverflow.com/a/20870875/1798491
"""
def __new__(mcs, name, bases, dict):
def gen_test(export):
def test(self):
self.maxDiff = None
self.assertEqual(HTML(config).render(export.get('content_state')), export.get('output'))
return test
for export in fixtures:
test_name = 'test_export_%s' % export.get('label').lower().replace(' ', '_')
dict[test_name] = gen_test(export)
return type.__new__(mcs, name, bases, dict)
class TestExports(unittest.TestCase):
__metaclass__ = TestExportsMeta
| mit | Python |
da954862019e3f836952d746b04bbb1f7d3035bc | update version to 0.7.1 | Widukind/dlstats,Widukind/dlstats | dlstats/version.py | dlstats/version.py | VERSION = (0, 7, 1)
def version_str():
if len(VERSION) == 3:
return "%s.%s.%s" % VERSION
elif len(VERSION) == 4:
return "%s.%s.%s-%s" % VERSION
else:
raise IndexError("Incorrect format for the VERSION tuple")
| VERSION = (0, 7, 0)
def version_str():
if len(VERSION) == 3:
return "%s.%s.%s" % VERSION
elif len(VERSION) == 4:
return "%s.%s.%s-%s" % VERSION
else:
raise IndexError("Incorrect format for the VERSION tuple")
| agpl-3.0 | Python |
440d1e5a578dc79e55d4bd3b399134948650beb9 | test errors | scrapinghub/extruct | tests/test_extruct.py | tests/test_extruct.py | # -*- coding: utf-8 -*-
import json
import unittest
import pytest
import extruct
from tests import get_testdata, jsonize_dict, replace_node_ref_with_node_id
class TestGeneric(unittest.TestCase):
maxDiff = None
def test_all(self):
body = get_testdata('songkick', 'elysianfields.html')
expected = json.loads(get_testdata('songkick', 'elysianfields.json').decode('UTF-8'))
data = extruct.extract(body, base_url='http://www.songkick.com/artists/236156-elysian-fields')
self.assertEqual(jsonize_dict(data), expected)
def test_microdata_custom_url(self):
body, expected = self._microdata_custom_url('product_custom_url.json')
data = extruct.extract(body, base_url='http://some-example.com',
syntaxes=['microdata'])
self.assertEqual(data, expected)
def test_microdata_with_returning_node(self):
body, expected = self._microdata_custom_url('product_custom_url_and_node_id.json')
data = extruct.extract(body, base_url='http://some-example.com',
syntaxes=['microdata'], return_html_node=True)
replace_node_ref_with_node_id(data)
self.assertEqual(data, expected)
def test_deprecated_url(self):
body, expected = self._microdata_custom_url('product_custom_url.json')
with pytest.warns(DeprecationWarning):
data = extruct.extract(body, url='http://some-example.com',
syntaxes=['microdata'])
self.assertEqual(data, expected)
def test_extra_kwargs(self):
body, expected = self._microdata_custom_url('product_custom_url.json')
with self.assertRaises(TypeError):
extruct.extract(body, foo='bar')
def _microdata_custom_url(self, test_file):
body = get_testdata('schema.org', 'product.html')
expected = {'microdata': json.loads(
get_testdata('schema.org', test_file)
.decode('UTF-8'))}
return body, expected
def test_errors(self):
body = ''
# raise exceptions
with pytest.raises(Exception):
data = extruct.extract(body)
# ignore exceptions
expected = {}
data = extruct.extract(body, errors='ignore')
assert data == expected
# ignore exceptions
data = extruct.extract(body, errors='log')
assert data == expected
| # -*- coding: utf-8 -*-
import json
import unittest
import pytest
import extruct
from tests import get_testdata, jsonize_dict, replace_node_ref_with_node_id
class TestGeneric(unittest.TestCase):
maxDiff = None
def test_all(self):
body = get_testdata('songkick', 'elysianfields.html')
expected = json.loads(get_testdata('songkick', 'elysianfields.json').decode('UTF-8'))
data = extruct.extract(body, base_url='http://www.songkick.com/artists/236156-elysian-fields')
self.assertEqual(jsonize_dict(data), expected)
def test_microdata_custom_url(self):
body, expected = self._microdata_custom_url('product_custom_url.json')
data = extruct.extract(body, base_url='http://some-example.com',
syntaxes=['microdata'])
self.assertEqual(data, expected)
def test_microdata_with_returning_node(self):
body, expected = self._microdata_custom_url('product_custom_url_and_node_id.json')
data = extruct.extract(body, base_url='http://some-example.com',
syntaxes=['microdata'], return_html_node=True)
replace_node_ref_with_node_id(data)
self.assertEqual(data, expected)
def test_deprecated_url(self):
body, expected = self._microdata_custom_url('product_custom_url.json')
with pytest.warns(DeprecationWarning):
data = extruct.extract(body, url='http://some-example.com',
syntaxes=['microdata'])
self.assertEqual(data, expected)
def test_extra_kwargs(self):
body, expected = self._microdata_custom_url('product_custom_url.json')
with self.assertRaises(TypeError):
extruct.extract(body, foo='bar')
def _microdata_custom_url(self, test_file):
body = get_testdata('schema.org', 'product.html')
expected = {'microdata': json.loads(
get_testdata('schema.org', test_file)
.decode('UTF-8'))}
return body, expected
| bsd-3-clause | Python |
513b5aecf1e34e775d98f12c41c9bd526a8504a5 | Improve compiler configuration in otf2 package | tmerrick1/spack,EmreAtes/spack,EmreAtes/spack,matthiasdiener/spack,iulian787/spack,lgarren/spack,TheTimmy/spack,mfherbst/spack,matthiasdiener/spack,krafczyk/spack,skosukhin/spack,skosukhin/spack,iulian787/spack,TheTimmy/spack,skosukhin/spack,EmreAtes/spack,tmerrick1/spack,lgarren/spack,LLNL/spack,TheTimmy/spack,EmreAtes/spack,lgarren/spack,LLNL/spack,skosukhin/spack,krafczyk/spack,TheTimmy/spack,LLNL/spack,TheTimmy/spack,tmerrick1/spack,matthiasdiener/spack,skosukhin/spack,EmreAtes/spack,iulian787/spack,tmerrick1/spack,krafczyk/spack,LLNL/spack,mfherbst/spack,mfherbst/spack,iulian787/spack,matthiasdiener/spack,LLNL/spack,lgarren/spack,matthiasdiener/spack,mfherbst/spack,mfherbst/spack,tmerrick1/spack,krafczyk/spack,iulian787/spack,lgarren/spack,krafczyk/spack | var/spack/packages/otf2/package.py | var/spack/packages/otf2/package.py | # FIXME: Add copyright
from spack import *
from contextlib import closing
import os
class Otf2(Package):
"""The Open Trace Format 2 is a highly scalable, memory efficient event
trace data format plus support library."""
homepage = "http://www.vi-hps.org/score-p"
url = "http://www.vi-hps.org/upload/packages/otf2/otf2-1.4.tar.gz"
version('1.4', 'a23c42e936eb9209c4e08b61c3cf5092',
url="http://www.vi-hps.org/upload/packages/otf2/otf2-1.4.tar.gz")
version('1.3.1', 'd0ffc4e858455ace4f596f910e68c9f2',
url="http://www.vi-hps.org/upload/packages/otf2/otf2-1.3.1.tar.gz")
version('1.2.1', '8fb3e11fb7489896596ae2c7c83d7fc8',
url="http://www.vi-hps.org/upload/packages/otf2/otf2-1.2.1.tar.gz")
backend_user_provided = """\
CC=cc
CXX=c++
F77=f77
FC=f90
CFLAGS=-fPIC
CXXFLAGS=-fPIC
"""
frontend_user_provided = """\
CC_FOR_BUILD=cc
CXX_FOR_BUILD=c++
F77_FOR_BUILD=f70
FC_FOR_BUILD=f90
CFLAGS_FOR_BUILD=-fPIC
CXXFLAGS_FOR_BUILD=-fPIC
"""
mpi_user_provided = """\
MPICC=cc
MPICXX=c++
MPIF77=f77
MPIFC=f90
MPI_CFLAGS=-fPIC
MPI_CXXFLAGS=-fPIC
"""
@when('@:1.2')
def version_specific_args(self):
return ["--with-platform=disabled"]
@when('@1.3:')
def version_specific_args(self):
# TODO: figure out what scorep's build does as of otf2 1.3
return ["--with-custom-compilers"]
def install(self, spec, prefix):
# Use a custom compiler configuration, otherwise the score-p
# build system messes with spack's compiler settings.
# Create these three files in the build directory
with closing(open("platform-backend-user-provided", "w")) as backend_file:
backend_file.write(self.backend_user_provided)
with closing(open("platform-frontend-user-provided", "w")) as frontend_file:
frontend_file.write(self.frontend_user_provided)
with closing(open("platform-mpi-user-provided", "w")) as mpi_file:
mpi_file.write(self.mpi_user_provided)
configure_args=["--prefix=%s" % prefix,
"--enable-shared"]
configure_args.extend(self.version_specific_args())
configure(*configure_args)
make()
make("install")
| # FIXME: Add copyright
from spack import *
import os
class Otf2(Package):
"""The Open Trace Format 2 is a highly scalable, memory efficient event
trace data format plus support library."""
homepage = "http://www.vi-hps.org/score-p"
url = "http://www.vi-hps.org/upload/packages/otf2/otf2-1.4.tar.gz"
version('1.4', 'a23c42e936eb9209c4e08b61c3cf5092',
url="http://www.vi-hps.org/upload/packages/otf2/otf2-1.4.tar.gz")
version('1.3.1', 'd0ffc4e858455ace4f596f910e68c9f2',
url="http://www.vi-hps.org/upload/packages/otf2/otf2-1.3.1.tar.gz")
version('1.2.1', '8fb3e11fb7489896596ae2c7c83d7fc8',
url="http://www.vi-hps.org/upload/packages/otf2/otf2-1.2.1.tar.gz")
@when('@:1.2')
def version_specific_args(self, args):
return ["--with-platform=disabled"]
@when('@1.3:')
def version_specific_args(self, args):
# TODO: figure out what scorep's build does as of otf2 1.3
return []
def install(self, spec, prefix):
# FIXME: Modify the configure line to suit your build system here.
cc = os.environ["SPACK_CC"]
configure_args=["--prefix=%s" % prefix,
"--enable-shared"])
configure_args.extend(self.version_specific_args())
configure(*configure_args)
# FIXME: Add logic to build and install here
make()
make("install")
| lgpl-2.1 | Python |
bcad44ba03708a05cfbd86608c1d52e79c1394b5 | Remove broken import | deepchem/deepchem,ktaneishi/deepchem,Agent007/deepchem,ktaneishi/deepchem,miaecle/deepchem,peastman/deepchem,lilleswing/deepchem,miaecle/deepchem,peastman/deepchem,Agent007/deepchem,Agent007/deepchem,lilleswing/deepchem,miaecle/deepchem,deepchem/deepchem,ktaneishi/deepchem,lilleswing/deepchem | deepchem/models/tf_new_models/graph_topology.py | deepchem/models/tf_new_models/graph_topology.py | """Manages Placeholders for Graph convolution networks.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import numpy as np
import tensorflow as tf
from deepchem.feat.mol_graphs import ConvMol
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
def merge_dicts(l):
"""Convenience function to merge list of dictionaries."""
merged = {}
for dict in l:
merged = merge_two_dicts(merged, dict)
return merged
| """Manages Placeholders for Graph convolution networks.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import numpy as np
import tensorflow as tf
from deepchem.nn.copy import Input
from deepchem.feat.mol_graphs import ConvMol
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
def merge_dicts(l):
"""Convenience function to merge list of dictionaries."""
merged = {}
for dict in l:
merged = merge_two_dicts(merged, dict)
return merged
| mit | Python |
e9a30627897bd0eb00c10d5fe758c9673eae87fd | update function in case more than one uri present | erinspace/scrapi,CenterForOpenScience/scrapi,CenterForOpenScience/scrapi,fabianvf/scrapi,erinspace/scrapi,fabianvf/scrapi | scrapi/harvesters/addis_ababa.py | scrapi/harvesters/addis_ababa.py | '''
Harvester for the Addis Ababa University Institutional Repository for the SHARE project
Example API call: http://etd.aau.edu.et/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
from scrapi.base import helpers
def oai_process_uris_addis_ababa(*args):
identifiers = helpers.gather_identifiers(args)
provider_uris, object_uris = helpers.seperate_provider_object_uris(identifiers)
potential_uris = (provider_uris + object_uris)
for i, uri in enumerate(potential_uris):
if 'http://hdl.handle.net/123456789/' in uri:
doc_id = potential_uris[i].replace('http://hdl.handle.net/123456789/', '')
potential_uris[i] = 'http://etd.aau.edu.et/handle/123456789/' + doc_id
try:
canonical_uri = potential_uris[0]
except IndexError:
raise ValueError('No Canonical URI was returned for this record.')
provider_uris = potential_uris[0:len(provider_uris)]
object_uris = potential_uris[(len(provider_uris) + 1):len(potential_uris)]
return {
'canonicalUri': canonical_uri,
'objectUris': object_uris,
'providerUris': provider_uris
}
class AauHarvester(OAIHarvester):
short_name = 'addis_ababa'
long_name = 'Addis Ababa University Institutional Repository'
url = 'http://etd.aau.edu.et'
@property
def schema(self):
return helpers.updated_schema(self._schema, {
"uris": ('//ns0:header/ns0:identifier/node()', '//dc:identifier/node()', oai_process_uris_addis_ababa)
})
base_url = 'http://etd.aau.edu.et/oai/request'
property_list = ['date', 'type', 'identifier', 'setSpec']
timezone_granularity = True
| '''
Harvester for the Addis Ababa University Institutional Repository for the SHARE project
Example API call: http://etd.aau.edu.et/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
from scrapi.base import helpers
def oai_process_uris_addis_ababa(*args):
identifiers = helpers.gather_identifiers(args)
provider_uris, object_uris = helpers.seperate_provider_object_uris(identifiers)
potential_uris = (provider_uris + object_uris)
for uri in potential_uris:
if 'http://hdl.handle.net/123456789/' in uri:
ind = potential_uris.index(uri)
doc_id = potential_uris[ind].replace('http://hdl.handle.net/123456789/', '')
potential_uris[ind] = 'http://etd.aau.edu.et/handle/123456789/' + doc_id
try:
canonical_uri = potential_uris[0]
except IndexError:
raise ValueError('No Canonical URI was returned for this record.')
return {
'canonicalUri': canonical_uri,
'objectUris': object_uris,
'providerUris': provider_uris
}
class AauHarvester(OAIHarvester):
short_name = 'addis_ababa'
long_name = 'Addis Ababa University Institutional Repository'
url = 'http://etd.aau.edu.et'
@property
def schema(self):
return helpers.updated_schema(self._schema, {
"uris": ('//ns0:header/ns0:identifier/node()', '//dc:identifier/node()', oai_process_uris_addis_ababa)
})
base_url = 'http://etd.aau.edu.et/oai/request'
property_list = ['date', 'type', 'identifier', 'setSpec']
timezone_granularity = True
| apache-2.0 | Python |
0728964b7799845723a110328a46bc9c6d4c8614 | Fix examples/python3-urllib/run.py | ouspg/trytls,ouspg/trytls,ouspg/trytls,ouspg/trytls,ouspg/trytls,ouspg/trytls,ouspg/trytls,ouspg/trytls | examples/python3-urllib/run.py | examples/python3-urllib/run.py | import sys
import ssl
import urllib.error
import urllib.request
host = sys.argv[1]
port = sys.argv[2]
cafile = sys.argv[3] if len(sys.argv) > 3 else None
try:
urllib.request.urlopen("https://" + host + ":" + port, cafile=cafile)
except ssl.CertificateError:
print("FAIL")
except urllib.error.URLError as exc:
if not isinstance(exc.reason, ssl.SSLError):
raise
print("FAIL")
else:
print("OK")
| import sys
import ssl
import urllib.error
import urllib.request
host = sys.argv[1]
port = sys.argv[2]
cafile = sys.argv[3] if len(sys.argv) > 3 else None
try:
urllib.request.urlopen("https://" + host + ":" + port, cafile=cafile)
except urllib.error.URLError as exc:
if not isinstance(exc.reason, ssl.SSLError):
raise
print("FAIL")
else:
print("OK")
| mit | Python |
f7b351a43d99a6063c49dfdf8db60c654fd89b74 | Add django setup for some initialization | CenterForOpenScience/scrapi,fabianvf/scrapi,erinspace/scrapi,CenterForOpenScience/scrapi,mehanig/scrapi,erinspace/scrapi,felliott/scrapi,mehanig/scrapi,fabianvf/scrapi,felliott/scrapi | scrapi/processing/postgres.py | scrapi/processing/postgres.py | from __future__ import absolute_import
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webview.settings")
import django
import logging
from api.webview.models import Document
from scrapi import events
from scrapi.processing.base import BaseProcessor
django.setup()
logger = logging.getLogger(__name__)
class PostgresProcessor(BaseProcessor):
NAME = 'postgres'
@events.logged(events.PROCESSING, 'raw.postgres')
def process_raw(self, raw_doc):
source, docID = raw_doc['source'], raw_doc['docID']
document = self._get_by_source_id(Document, source, docID) or Document(source=source, docID=docID)
document.raw = raw_doc.attributes
document.save()
@events.logged(events.PROCESSING, 'normalized.postgres')
def process_normalized(self, raw_doc, normalized):
source, docID = raw_doc['source'], raw_doc['docID']
document = self._get_by_source_id(Document, source, docID) or Document(source=source, docID=docID)
document.normalized = normalized.attributes
document.providerUpdatedDateTime = normalized['providerUpdatedDateTime']
document.save()
def _get_by_source_id(self, model, source, docID):
return Document.objects.filter(source=source, docID=docID)
| from __future__ import absolute_import
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webview.settings")
import logging
from api.webview.models import Document
from scrapi import events
from scrapi.processing.base import BaseProcessor
logger = logging.getLogger(__name__)
class PostgresProcessor(BaseProcessor):
NAME = 'postgres'
@events.logged(events.PROCESSING, 'raw.postgres')
def process_raw(self, raw_doc):
source, docID = raw_doc['source'], raw_doc['docID']
document = self._get_by_source_id(Document, source, docID) or Document(source=source, docID=docID)
document.raw = raw_doc.attributes
document.save()
@events.logged(events.PROCESSING, 'normalized.postgres')
def process_normalized(self, raw_doc, normalized):
source, docID = raw_doc['source'], raw_doc['docID']
document = self._get_by_source_id(Document, source, docID) or Document(source=source, docID=docID)
document.normalized = normalized.attributes
document.providerUpdatedDateTime = normalized['providerUpdatedDateTime']
document.save()
def _get_by_source_id(self, model, source, docID):
return Document.objects.filter(source=source, docID=docID)
| apache-2.0 | Python |
8df261cb9183e5933f49c441f3af8940f8449059 | Improve finders code coverage, fix tests not running due to inheretince approach with __init__ | PyCQA/isort,PyCQA/isort | tests/test_finders.py | tests/test_finders.py | from unittest.mock import patch
import pytest
from isort import finders, settings
from isort.finders import FindersManager
class TestFindersManager:
def test_init(self):
assert FindersManager(settings.DEFAULT_CONFIG)
class ExceptionOnInit(finders.BaseFinder):
def __init__(*args, **kwargs):
super().__init__(*args, **kwargs)
raise ValueError("test")
with patch(
"isort.finders.FindersManager._default_finders_classes",
FindersManager._default_finders_classes + (ExceptionOnInit,),
):
assert FindersManager(settings.Config(verbose=True))
def test_no_finders(self):
assert FindersManager(settings.DEFAULT_CONFIG, []).find("isort") is None
def test_find_broken_finder(self):
class ExceptionOnFind(finders.BaseFinder):
def find(*args, **kwargs):
raise ValueError("test")
assert (
FindersManager(settings.Config(verbose=True), [ExceptionOnFind]).find("isort") is None
)
class AbstractTestFinder:
kind = finders.BaseFinder
@classmethod
def setup_class(cls):
cls.instance = cls.kind(settings.DEFAULT_CONFIG)
def test_create(self):
assert self.kind(settings.DEFAULT_CONFIG)
def test_find(self):
self.instance.find("isort")
self.instance.find("")
class TestForcedSeparateFinder(AbstractTestFinder):
kind = finders.ForcedSeparateFinder
class TestDefaultFinder(AbstractTestFinder):
kind = finders.DefaultFinder
class TestKnownPatternFinder(AbstractTestFinder):
kind = finders.KnownPatternFinder
class TestLocalFinder(AbstractTestFinder):
kind = finders.LocalFinder
class TestPathFinder(AbstractTestFinder):
kind = finders.PathFinder
class TestPipfileFinder(AbstractTestFinder):
kind = finders.PipfileFinder
class TestRequirementsFinder(AbstractTestFinder):
kind = finders.RequirementsFinder
def test_no_pipreqs(self):
with patch("isort.finders.pipreqs", None):
assert not self.kind(settings.DEFAULT_CONFIG).find("isort")
def test_not_enabled(self):
test_finder = self.kind(settings.DEFAULT_CONFIG)
test_finder.enabled = False
assert not test_finder.find("isort")
| from unittest.mock import patch
from isort import finders, settings
from isort.finders import FindersManager
class TestFindersManager:
def test_init(self):
assert FindersManager(settings.DEFAULT_CONFIG)
class ExceptionOnInit(finders.BaseFinder):
def __init__(*args, **kwargs):
super().__init__(*args, **kwargs)
raise ValueError("test")
with patch(
"isort.finders.FindersManager._default_finders_classes",
FindersManager._default_finders_classes + (ExceptionOnInit,),
):
assert FindersManager(settings.Config(verbose=True))
def test_no_finders(self):
assert FindersManager(settings.DEFAULT_CONFIG, []).find("isort") is None
def test_find_broken_finder(self):
class ExceptionOnFind(finders.BaseFinder):
def find(*args, **kwargs):
raise ValueError("test")
assert (
FindersManager(settings.Config(verbose=True), [ExceptionOnFind]).find("isort") is None
)
class AbstractTestFinder:
kind = finders.BaseFinder
def __init__(self):
self.instance = self.kind(settings.DEFAULT_CONFIG)
def test_create(self):
assert self.kind(settings.DEFAULT_CONFIG)
def test_find(self):
self.instance.find("isort")
class TestForcedSeparateFinder(AbstractTestFinder):
kind = finders.ForcedSeparateFinder
class TestDefaultFinder(AbstractTestFinder):
kind = finders.DefaultFinder
class TestKnownPatternFinder(AbstractTestFinder):
kind = finders.KnownPatternFinder
class TestLocalFinder(AbstractTestFinder):
kind = finders.LocalFinder
class TestPathFinder(AbstractTestFinder):
kind = finders.PathFinder
class TestPipfileFinder(AbstractTestFinder):
kind = finders.PipfileFinder
class TestRequirementsFinder(AbstractTestFinder):
kind = finders.RequirementsFinder
def test_no_pipreqs(self):
with patch("isort.finders.pipreqs", None):
assert not self.instance.find("isort")
| mit | Python |
0ea1153438c1d98232a921c8d14d401a541e95fd | Fix regex example, the model must not be a unicode string. | vmuriart/grako,frnknglrt/grako | examples/regex/regex_parser.py | examples/regex/regex_parser.py | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
from parser_base import RegexParser
import model
class RegexSemantics(object):
def __init__(self):
super(RegexSemantics, self).__init__()
self._count = 0
def START(self, ast):
return model.Regex(ast)
def CHOICE(self, ast):
return model.Choice(ast.opts)
def SEQUENCE(self, ast):
if not ast.terms:
return model.Empty()
elif len(ast.terms) < 2:
return ast.terms[0]
else:
return model.Sequence(ast.terms)
def CLOSURE(self, ast):
return model.Closure(ast)
def SUBEXP(self, ast):
return ast
def LITERAL(self, ast):
return model.Literal(ast)
def translate(regex, trace=False):
parser = RegexParser(trace=trace, semantics=RegexSemantics())
model = parser.parse(regex, 'START')
model.set_rule_numbers()
return model.render().encode("ascii")
| # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
from parser_base import RegexParser
import model
class RegexSemantics(object):
def __init__(self):
super(RegexSemantics, self).__init__()
self._count = 0
def START(self, ast):
return model.Regex(ast)
def CHOICE(self, ast):
return model.Choice(ast.opts)
def SEQUENCE(self, ast):
if not ast.terms:
return model.Empty()
elif len(ast.terms) < 2:
return ast.terms[0]
else:
return model.Sequence(ast.terms)
def CLOSURE(self, ast):
return model.Closure(ast)
def SUBEXP(self, ast):
return ast
def LITERAL(self, ast):
return model.Literal(ast)
def translate(regex, trace=False):
parser = RegexParser(trace=trace, semantics=RegexSemantics())
model = parser.parse(regex, 'START')
model.set_rule_numbers()
return model.render()
| bsd-2-clause | Python |
bb86870b4494d6f001d82a824f85b31de5912bd5 | Update session.py | TingPing/plugins,TingPing/plugins | HexChat/session.py | HexChat/session.py | from __future__ import print_function
import hexchat
__module_name__ = "session"
__module_author__ = "TingPing"
__module_version__ = "1"
__module_description__ = "Saves current session for next start"
# To use just disable auto-connect and start using 'Quit and Save' from the menu.
def load_session():
for pref in hexchat.list_pluginpref():
if len(pref) > 8 and pref[:8] == 'session_':
network = pref[8:]
channels = hexchat.get_pluginpref('session_' + network).split(',')
hexchat.command('url irc://"{}"/'.format(network)) # Using url avoids autojoin
hexchat.find_context(server=network).set()
delay = hexchat.get_prefs('irc_join_delay') + 10
for chan in channels:
if chan[0] != '#':
hexchat.command('timer {} query -nofocus {}'.format(delay, chan))
else:
hexchat.command('timer {} join {}'.format(delay, chan))
hexchat.del_pluginpref('session_' + network)
def quit_cb(word, word_eol, userdata):
networks = {}
for chan in hexchat.get_list('channels'):
if chan.type != 1:
if not chan.network in networks:
networks[chan.network] = []
if (chan.channelkey):
networks[chan.network].append(chan.channel + ' ' + chan.channelkey)
else:
networks[chan.network].append(chan.channel)
for network, channels in networks.items():
hexchat.set_pluginpref('session_' + network, ','.join(channels))
hexchat.find_context(server=network).command('quit')
hexchat.command('timer 1 killall')
def unload_cb(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
load_session()
hexchat.hook_command('quitandsave', quit_cb)
hexchat.hook_unload(unload_cb)
hexchat.command('menu -p-1 add "HexChat/Quit and Save" "quitandsave"')
print(__module_name__, 'version', __module_version__, 'loaded.')
| from __future__ import print_function
import xchat as hexchat
__module_name__ = "session"
__module_author__ = "TingPing"
__module_version__ = "1"
__module_description__ = "Saves current session for next start"
# To use just disable auto-connect and start using 'Quit and Save' from the menu.
def load_session():
for pref in hexchat.list_pluginpref():
if len(pref) > 8 and pref[:8] == 'session_':
network = pref[8:]
channels = hexchat.get_pluginpref('session_' + network).split(',')
hexchat.command('url irc://"{}"/'.format(network)) # Using url avoids autojoin
hexchat.find_context(server=network).set()
delay = hexchat.get_prefs('irc_join_delay') + 10
for chan in channels:
if chan[0] != '#':
hexchat.command('timer {} query -nofocus {}'.format(delay, chan))
else:
hexchat.command('timer {} join {}'.format(delay, chan))
hexchat.del_pluginpref('session_' + network)
def quit_cb(word, word_eol, userdata):
networks = {}
for chan in hexchat.get_list('channels'):
if chan.type != 1:
if not chan.network in networks:
networks[chan.network] = []
if (chan.channelkey):
networks[chan.network].append(chan.channel + ' ' + chan.channelkey)
else:
networks[chan.network].append(chan.channel)
for network, channels in networks.items():
hexchat.set_pluginpref('session_' + network, ','.join(channels))
hexchat.find_context(server=network).command('quit')
hexchat.command('timer 1 killall')
def unload_cb(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
load_session()
hexchat.hook_command('quitandsave', quit_cb)
hexchat.hook_unload(unload_cb)
hexchat.command('menu -p-1 add "HexChat/Quit and Save" "quitandsave"')
print(__module_name__, 'version', __module_version__, 'loaded.')
| mit | Python |
0da74f42f7d7311859a340b0e72c1b8902287d5c | Allow for local run of example and demos installed with tool. | CERN/TIGRE,CERN/TIGRE,CERN/TIGRE,CERN/TIGRE | Python/tigre/utilities/sample_loader.py | Python/tigre/utilities/sample_loader.py | from __future__ import division
import os
import numpy as np
import scipy.io
import scipy.ndimage.interpolation
def load_head_phantom(number_of_voxels=None):
if number_of_voxels is None:
number_of_voxels = np.array((128, 128, 128))
dirname = os.path.dirname(__file__)
dirname = os.path.join(dirname,'../../../Common/data/head.mat')
if not os.path.isfile(dirname):
dirname = os.path.dirname(__file__)
dirname = os.path.join(dirname,'./../../data/head.mat')
test_data = scipy.io.loadmat(dirname)
# Loads data in F_CONTIGUOUS MODE (column major), convert to Row major
image = test_data['img'].transpose(2,1,0).copy()
image_dimensions = image.shape
zoom_x = number_of_voxels[0] / image_dimensions[0]
zoom_y = number_of_voxels[1] / image_dimensions[1]
zoom_z = number_of_voxels[2] / image_dimensions[2]
# TODO: add test for this is resizing and not simply zooming
resized_image = scipy.ndimage.interpolation.zoom(image, (zoom_x, zoom_y, zoom_z), order=3, prefilter=False)
return resized_image
| from __future__ import division
import os
import numpy as np
import scipy.io
import scipy.ndimage.interpolation
def load_head_phantom(number_of_voxels=None):
if number_of_voxels is None:
number_of_voxels = np.array((128, 128, 128))
dirname = os.path.dirname(__file__)
dirname = os.path.join(dirname,'../../../Common/data/head.mat')
test_data = scipy.io.loadmat(dirname)
# Loads data in F_CONTIGUOUS MODE (column major), convert to Row major
image = test_data['img'].transpose(2,1,0).copy()
image_dimensions = image.shape
zoom_x = number_of_voxels[0] / image_dimensions[0]
zoom_y = number_of_voxels[1] / image_dimensions[1]
zoom_z = number_of_voxels[2] / image_dimensions[2]
# TODO: add test for this is resizing and not simply zooming
resized_image = scipy.ndimage.interpolation.zoom(image, (zoom_x, zoom_y, zoom_z), order=3, prefilter=False)
return resized_image
| bsd-3-clause | Python |
eb7e89f8c4ce1ef928dafee160f28966818db669 | Add TxT import (#2643) | intel-analytics/analytics-zoo,intel-analytics/analytics-zoo,intel-analytics/analytics-zoo | pyzoo/zoo/models/recommendation/__init__.py | pyzoo/zoo/models/recommendation/__init__.py | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .recommender import *
from .neuralcf import *
from .wide_and_deep import *
from .session_recommender import *
from .txt import *
| #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .recommender import *
from .neuralcf import *
from .wide_and_deep import *
from .session_recommender import *
| apache-2.0 | Python |
cef6c095681f478ad1a04691573ec308bd15143c | fix import for HQPillow | qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,qedsoftware/commcare-hq,gmimano/commcaretest,SEL-Columbia/commcare-hq,gmimano/commcaretest,SEL-Columbia/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq | corehq/pillows/group.py | corehq/pillows/group.py | from django.conf import settings
from corehq.apps.groups.models import Group
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from .mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from .base import HQPillow
class GroupPillow(HQPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
| from corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(HQPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
| bsd-3-clause | Python |
ff7ce0ca2a8019e67d512b47a4a340f176d96adf | update example to reflect current API | stscieisenhamer/glue,saimn/glue,stscieisenhamer/glue,JudoWill/glue,JudoWill/glue,saimn/glue | doc/simple_glue.py | doc/simple_glue.py | from glue.core.message import DataMessage, SubsetMessage
from glue.core import HubListener, Data, DataCollection
class MyClient(HubListener):
def register_to_hub(self, hub):
""" Sign up to receive DataMessages from the hub """
hub.subscribe(self, # subscribing object
DataMessage, # message type to subscribe to
handler = self.receive_message) # method to call
def receive_message(self, message):
""" Receives each DataMessage relay """
print " MyClient received a message \n"
# create objects
client = MyClient()
data = Data()
subset = data.new_subset()
data_collection = DataCollection()
# connect them to each other
hub = data_collection.hub
data_collection.append(data)
client.register_to_hub(hub)
# manually send a DataMessage. Relayed to MyClient
print 'Manually sending DataMessage'
message = DataMessage(data)
hub.broadcast(message)
#modify the data object. Automatically generates a DataMessage
print 'Automatically triggering DataMessage'
data.label = "New label"
#send a SubsetMessage to the Hub.
print 'Manually sending SubsetMessage'
message = SubsetMessage(subset)
hub.broadcast(message) # nothing is printed
| from glue.core.message import DataMessage, SubsetMessage
from glue.core import Hub, HubListener, Data, DataCollection
class MyClient(HubListener):
def register_to_hub(self, hub):
""" Sign up to receive DataMessages from the hub """
hub.subscribe(self, # subscribing object
DataMessage, # message type to subscribe to
handler = self.receive_message) # method to call
def receive_message(self, message):
""" Receives each DataMessage relay """
print " MyClient received a message \n"
# create objects
hub = Hub()
client = MyClient()
data = Data()
subset = data.new_subset()
data_collection = DataCollection()
# connect them to each other
data_collection.append(data)
data_collection.register_to_hub(hub)
client.register_to_hub(hub)
# manually send a DataMessage. Relayed to MyClient
print 'Manually sending DataMessage'
message = DataMessage(data)
hub.broadcast(message)
#modify the data object. Automatically generates a DataMessage
print 'Automatically triggering DataMessage'
data.label = "New label"
#send a SubsetMessage to the Hub.
print 'Manually sending SubsetMessage'
message = SubsetMessage(subset)
hub.broadcast(message) # nothing is printed
| bsd-3-clause | Python |
52eef06d6ab50f5287949ab8b33f145a58f1cb44 | bump version 0.1.4 -> 0.1.5 | connectome-neuprint/neuprint-python,connectome-neuprint/neuprint-python | neuprint/__init__.py | neuprint/__init__.py | __version__ = (0, 1, 5)
__verstr__ = "0.1.5"
from .client import Client
from .fetch import *
| __version__ = (0, 1, 4)
__verstr__ = "0.1.4"
from .client import Client
from .fetch import *
| bsd-3-clause | Python |
7ebfd6ca59b167ae4b9cf582c20f517bd5500f24 | fix final 2.6 test issue | tmlee/pyrollbar,Affirm/pyrollbar,rollbar/pyrollbar,xbmc-catchuptv-au/script.module.rollbar,juggernaut/pyrollbar | rollbar/test/__init__.py | rollbar/test/__init__.py | import difflib
import pprint
import unittest
# from http://hg.python.org/cpython/file/67ada6ab7fe2/Lib/unittest/util.py
# for Python 2.6 support
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
class BaseTest(unittest.TestCase):
# from http://hg.python.org/cpython/file/67ada6ab7fe2/Lib/unittest/case.py
# for Python 2.6 support
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
| import difflib
import pprint
import unittest
# from http://hg.python.org/cpython/file/67ada6ab7fe2/Lib/unittest/util.py
# for Python 2.6 support
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
class BaseTest(unittest.TestCase):
# from http://hg.python.org/cpython/file/67ada6ab7fe2/Lib/unittest/case.py
# for Python 2.6 support
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg)) | mit | Python |
fcd85a1b15ca8b82f892bba171c21f9a1b4f6e4a | Correct URI and list categories | acigna/pywez,acigna/pywez,acigna/pywez | SOAPpy/tests/alanbushTest.py | SOAPpy/tests/alanbushTest.py | #!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
import sys
sys.path.insert (1, '..')
import SOAP
ident = '$Id$'
SoapEndpointURL = 'http://www.alanbushtrust.org.uk/soap/compositions.asp'
MethodNamespaceURI = 'urn:alanbushtrust-org-uk:soap.methods'
SoapAction = MethodNamespaceURI + ".GetCategories"
server = SOAP.SOAPProxy( SoapEndpointURL, namespace=MethodNamespaceURI, soapaction=SoapAction )
for category in server.GetCategories():
print category
| #!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
import sys
sys.path.insert (1, '..')
import SOAP
ident = '$Id$'
SoapEndpointURL = 'http://www.alanbushtrust.org.uk/soap/compositions.asp'
MethodNamespaceURI = 'urn:alanbushtrust-org-uk:soap:methods'
SoapAction = MethodNamespaceURI + "#GetCategories"
server = SOAP.SOAPProxy( SoapEndpointURL, namespace=MethodNamespaceURI, soapaction=SoapAction )
print "server level>>", server.GetCategories()
| mit | Python |
5e2d8aad2771122da26507b67630c055e2f13de3 | make reindent. | pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments | external/markdown-processor.py | external/markdown-processor.py | # -*- coding: utf-8 -*-
"""
The Pygments Markdown Preprocessor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Markdown_ preprocessor that renders source code
to HTML via Pygments. To use it, invoke Markdown like so::
from markdown import Markdown
md = Markdown()
md.preprocessors.insert(0, CodeBlockPreprocessor())
markdown = md.__str__
markdown is then a callable that can be passed to the context of
a template and used in that template, for example.
This uses CSS classes by default, so use
``pygmentize -S <some style> -f html > pygments.css``
to create a stylesheet to be added to the website.
You can then highlight source code in your markdown markup::
[sourcecode:lexer]
some code
[/sourcecode]
.. _Markdown: http://www.freewisdom.org/projects/python-markdown/
:copyright: 2007 by Jochen Kupperschmidt.
:license: BSD, see LICENSE for more details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
import re
from markdown import Preprocessor
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
class CodeBlockPreprocessor(Preprocessor):
pattern = re.compile(
r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
formatter = HtmlFormatter(noclasses=INLINESTYLES)
def run(self, lines):
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, formatter)
code = code.replace('\n\n', '\n \n')
return '\n\n<div class="code">%s</div>\n\n' % code
return self.pattern.sub(
repl, '\n'.join(lines)).split('\n')
| # -*- coding: utf-8 -*-
"""
The Pygments Markdown Preprocessor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Markdown_ preprocessor that renders source code
to HTML via Pygments. To use it, invoke Markdown like so::
from markdown import Markdown
md = Markdown()
md.preprocessors.insert(0, CodeBlockPreprocessor())
markdown = md.__str__
markdown is then a callable that can be passed to the context of
a template and used in that template, for example.
This uses CSS classes by default, so use
``pygmentize -S <some style> -f html > pygments.css``
to create a stylesheet to be added to the website.
You can then highlight source code in your markdown markup::
[sourcecode:lexer]
some code
[/sourcecode]
.. _Markdown: http://www.freewisdom.org/projects/python-markdown/
:copyright: 2007 by Jochen Kupperschmidt.
:license: BSD, see LICENSE for more details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
import re
from markdown import Preprocessor
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
class CodeBlockPreprocessor(Preprocessor):
pattern = re.compile(
r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
formatter = HtmlFormatter(noclasses=INLINESTYLES)
def run(self, lines):
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, formatter)
code = code.replace('\n\n', '\n \n')
return '\n\n<div class="code">%s</div>\n\n' % code
return self.pattern.sub(
repl, '\n'.join(lines)).split('\n')
| bsd-2-clause | Python |
9eb440774a7fba22fbafcb9958a185cec8461649 | Fix #722: aggregate_coverage.py does not aggregate the coverage reports of the same tracked lines | yapdns/yapdnsbeat,yapdns/yapdnsbeat | scripts/aggregate_coverage.py | scripts/aggregate_coverage.py | #!/usr/bin/env python
"""Simple script to concatenate coverage reports.
"""
import os
import sys
import argparse
import fnmatch
def main(arguments):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('dir', help="Input dir to search recursively for .cov files")
parser.add_argument('-o', '--outfile', help="Output file",
default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args(arguments)
# Recursively find all matching .cov files.
matches = []
for root, dirnames, filenames in os.walk(args.dir):
for filename in fnmatch.filter(filenames, '*.cov'):
matches.append(os.path.join(root, filename))
# Write to output.
lines = {}
args.outfile.write('mode: atomic\n')
for m in matches:
if os.path.abspath(args.outfile.name) != os.path.abspath(m):
with open(m) as f:
for line in f:
if not line.startswith('mode:') and "vendor" not in line:
(position, stmt, count) = line.split(" ")
stmt = int(stmt)
count = int (count)
prev_count = 0
if lines.has_key(position):
(_, prev_stmt, prev_count) = lines[position]
assert prev_stmt == stmt
lines[position] = (position, stmt, prev_count + count)
for line in sorted(["%s %d %d\n" % lines[key] for key in lines.keys()]):
args.outfile.write(line)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| #!/usr/bin/env python
"""Simple script to concatenate coverage reports.
"""
import os
import sys
import argparse
import fnmatch
def main(arguments):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('dir', help="Input dir to search recursively for .cov files")
parser.add_argument('-o', '--outfile', help="Output file",
default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args(arguments)
# Recursively find all matching .cov files.
matches = []
for root, dirnames, filenames in os.walk(args.dir):
for filename in fnmatch.filter(filenames, '*.cov'):
matches.append(os.path.join(root, filename))
# Write to output.
args.outfile.write('mode: atomic\n')
for m in matches:
if os.path.abspath(args.outfile.name) != os.path.abspath(m):
with open(m) as f:
for line in f:
if not line.startswith('mode:'):
args.outfile.write(line)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit | Python |
bc2c0395b3374c0f6abfb7bead9a9e5acd468263 | Update expedia.py | twiindan/selenium_lessons | Selenium/exercices/solutions/expedia.py | Selenium/exercices/solutions/expedia.py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
# Configure the baseURL
baseUrl = "https://www.expedia.es"
# Create a webDriver instance and maximize window
driver = webdriver.Firefox()
driver.maximize_window()
# Navigage to URL and put a 10 seconds implicit wait
driver.get(baseUrl)
driver.implicitly_wait(10)
# Find and click on element "Flights"
travelType = driver.find_element(By.ID, "tab-flight-tab-hp")
travelType.click()
# Find departure textbox and type "Barcelona"
originBox = driver.find_element(By.ID, "flight-origin-hp-flight")
originBox.clear()
originBox.send_keys("Barcelona")
# Find departure textbox and type "Madrid"
destinationBox = driver.find_element(By.ID, "flight-destination-hp-flight")
destinationBox.clear()
destinationBox.send_keys("Madrid")
# Find departure time and type "23/11/2018"
departTime = driver.find_element(By.ID, "flight-departing-hp-flight")
departTime.clear()
departTime.send_keys("23/11/2018")
# Find departure time and type "30/11/2018"
returnTime = driver.find_element(By.ID, "flight-returning-hp-flight")
returnTime.clear()
returnTime.send_keys("30/11/2018")
# Find adult dropdown and select 5 adults
adultsDropdown = driver.find_element(By.ID, "flight-adults-hp-flight")
adultsSel = Select(adultsDropdown)
adultsSel.select_by_value("5")
# Find child dropdown and select 1 children
childDropdown = driver.find_element(By.ID, "flight-children-hp-flight")
childSel = Select(childDropdown)
childSel.select_by_value("1")
# Find the first option in the child age
oldDropdown = driver.find_element(By.ID, "flight-age-select-1-hp-flight")
oldSel = Select(oldDropdown)
oldSel.select_by_index(1)
# Find the "Find" button and click on
findButtons = driver.find_elements(By.XPATH, "//span[text()='Buscar']")
for button in findButtons:
if button.is_displayed():
button.click()
break
#Quit Driver
driver.quit()
| from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
# Configure the baseURL
baseUrl = "https://www.expedia.es"
# Create a webDriver instance and maximize window
driver = webdriver.Firefox()
driver.maximize_window()
# Navigage to URL and put a 10 seconds implicit wait
driver.get(baseUrl)
driver.implicitly_wait(10)
# Find and click on element "Flights"
travelType = driver.find_element(By.ID, "tab-flight-tab-hp")
travelType.click()
# Find departure textbox and type "Barcelona"
originBox = driver.find_element(By.ID, "flight-origin-hp-flight")
originBox.clear()
originBox.send_keys("Barcelona")
# Find departure textbox and type "Madrid"
destinationBox = driver.find_element(By.ID, "flight-destination-hp-flight")
destinationBox.clear()
destinationBox.send_keys("Madrid")
# Find departure time and type "23/11/2017"
departTime = driver.find_element(By.ID, "flight-departing-hp-flight")
departTime.clear()
departTime.send_keys("23/11/2017")
# Find departure time and type "30/11/2017"
returnTime = driver.find_element(By.ID, "flight-returning-hp-flight")
returnTime.clear()
returnTime.send_keys("30/11/2017")
# Find adult dropdown and select 5 adults
adultsDropdown = driver.find_element(By.ID, "flight-adults-hp-flight")
adultsSel = Select(adultsDropdown)
adultsSel.select_by_value("5")
# Find child dropdown and select 1 children
childDropdown = driver.find_element(By.ID, "flight-children-hp-flight")
childSel = Select(childDropdown)
childSel.select_by_value("1")
# Find the first option in the child age
oldDropdown = driver.find_element(By.ID, "flight-age-select-1-hp-flight")
oldSel = Select(oldDropdown)
oldSel.select_by_index(1)
# Find the "Find" button and click on
findButtons = driver.find_elements(By.XPATH, "//span[text()='Buscar']")
for button in findButtons:
if button.is_displayed():
button.click()
break
#Quit Driver
driver.quit()
| apache-2.0 | Python |
d53a4a9fe6f37c9886977bd27a1d8caad7eb7c1c | Use black as default text colour | tonioo/rst2pdf,sychen/rst2pdf,aquavitae/rst2pdf,openpolis/rst2pdf-patched-docutils-0.8,sychen/rst2pdf,aquavitae/rst2pdf-py3-dev,aquavitae/rst2pdf,tonioo/rst2pdf,openpolis/rst2pdf-patched-docutils-0.8,aquavitae/rst2pdf-py3-dev | rst2pdf/pygments2json.py | rst2pdf/pygments2json.py | # -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
'''
Creates a rst2pdf stylesheet for each pygments style.
'''
import sys
import os
import simplejson
from pygments.token import STANDARD_TYPES
from pygments import styles as pstyles
def css2rl(css):
dstyles = {}
# First create a dumb stylesheet
for key in STANDARD_TYPES:
dstyles["pygments-" + STANDARD_TYPES[key]] = {'parent': 'code'}
styles = []
for line in css.splitlines():
line = line.strip()
sname = "pygments-" + line.split(' ')[0][1:]
style = dstyles.get(sname, {'parent': 'code'})
options = line.split('{')[1].split('}')[0].split(';')
for option in options:
option = option.strip()
option, argument = option.split(':')
option=option.strip()
argument=argument.strip()
if option == 'color':
style['textColor'] = argument.strip()
if option == 'background-color':
style['backColor'] = argument.strip()
# These two can come in any order
if option == 'font-weight' and argument == 'bold':
if 'fontName' in style and \
style['fontName'] == 'stdMonoItalic':
style['fontName'] = 'stdMonoBoldItalic'
else:
style['fontName'] = 'stdMonoBold'
if option == 'font-style' and argument == 'italic':
if 'fontName' in style and style['fontName'] == 'stdBold':
style['fontName'] = 'stdMonoBoldItalic'
else:
style['fontName'] = 'stdMonoItalic'
if style.get('textColor', None) is None:
style['textColor']='black'
styles.append([sname, style])
return simplejson.dumps({'styles': styles}, indent=2)
for name in list(pstyles.get_all_styles()):
css=os.popen('pygmentize -S %s -f html'%name, 'r').read()
open(name+'.json', 'w').write(css2rl(css))
| # -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
'''
Creates a rst2pdf stylesheet for each pygments style.
'''
import sys
import os
import simplejson
from pygments.token import STANDARD_TYPES
from pygments import styles as pstyles
def css2rl(css):
dstyles = {}
# First create a dumb stylesheet
for key in STANDARD_TYPES:
dstyles["pygments-" + STANDARD_TYPES[key]] = {'parent': 'code'}
styles = []
for line in css.splitlines():
line = line.strip()
sname = "pygments-" + line.split(' ')[0][1:]
style = dstyles.get(sname, {'parent': 'code'})
options = line.split('{')[1].split('}')[0].split(';')
for option in options:
option = option.strip()
option, argument = option.split(':')
option=option.strip()
argument=argument.strip()
if option == 'color':
style['textColor'] = argument.strip()
if option == 'background-color':
style['backColor'] = argument.strip()
# These two can come in any order
if option == 'font-weight' and argument == 'bold':
if 'fontName' in style and \
style['fontName'] == 'stdMonoItalic':
style['fontName'] = 'stdMonoBoldItalic'
else:
style['fontName'] = 'stdMonoBold'
if option == 'font-style' and argument == 'italic':
if 'fontName' in style and style['fontName'] == 'stdBold':
style['fontName'] = 'stdMonoBoldItalic'
else:
style['fontName'] = 'stdMonoItalic'
styles.append([sname, style])
return simplejson.dumps({'styles': styles}, indent=2)
for name in list(pstyles.get_all_styles()):
css=os.popen('pygmentize -S %s -f html'%name, 'r').read()
open(name+'.json', 'w').write(css2rl(css))
| mit | Python |
9091035443e06beeea359e373e4809f4965c7ffe | Add Events::EventBusPolicy (#1386) | ikben/troposphere,ikben/troposphere,cloudtools/troposphere,cloudtools/troposphere,johnctitus/troposphere,johnctitus/troposphere | troposphere/events.py | troposphere/events.py | # Copyright (c) 2012-2019, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject
from . import AWSProperty
from .validators import integer
class Condition(AWSProperty):
props = {
'Key': (basestring, False),
'Type': (basestring, False),
'Value': (basestring, False),
}
class EventBusPolicy(AWSObject):
resource_type = "AWS::Events::EventBusPolicy"
props = {
'Action': (basestring, True),
'Condition': (Condition, False),
'Principal': (basestring, True),
'StatementId': (basestring, True),
}
class EcsParameters(AWSProperty):
props = {
'TaskCount': (integer, False),
'TaskDefinitionArn': (basestring, True),
}
class InputTransformer(AWSProperty):
props = {
'InputPathsMap': (dict, False),
'InputTemplate': (basestring, True),
}
class KinesisParameters(AWSProperty):
props = {
'PartitionKeyPath': (basestring, True),
}
class RunCommandTarget(AWSProperty):
props = {
'Key': (basestring, True),
'Values': ([basestring], True),
}
class RunCommandParameters(AWSProperty):
props = {
'RunCommandTargets': ([RunCommandTarget], True),
}
class SqsParameters(AWSProperty):
props = {
'MessageGroupId': (basestring, True),
}
class Target(AWSProperty):
props = {
'Arn': (basestring, True),
'EcsParameters': (EcsParameters, False),
'Id': (basestring, True),
'Input': (basestring, False),
'InputPath': (basestring, False),
'InputTransformer': (InputTransformer, False),
'KinesisParameters': (KinesisParameters, False),
'RoleArn': (basestring, False),
'RunCommandParameters': (RunCommandParameters, False),
'SqsParameters': (SqsParameters, False),
}
class Rule(AWSObject):
resource_type = "AWS::Events::Rule"
props = {
'Description': (basestring, False),
'EventPattern': (dict, False),
'Name': (basestring, False),
'RoleArn': (basestring, False),
'ScheduleExpression': (basestring, False),
'State': (basestring, False),
'Targets': ([Target], False),
}
| # Copyright (c) 2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
class EcsParameters(AWSProperty):
props = {
"TaskCount": (int, False),
"TaskDefinitionArn": (basestring, True),
}
class InputTransformer(AWSProperty):
props = {
'InputPathsMap': (dict, False),
'InputTemplate': (basestring, True),
}
class KinesisParameters(AWSProperty):
props = {
'PartitionKeyPath': (basestring, True),
}
class RunCommandTarget(AWSProperty):
props = {
'Key': (basestring, True),
'Values': ([basestring], True),
}
class RunCommandParameters(AWSProperty):
props = {
'RunCommandTargets': ([RunCommandTarget], True),
}
class SqsParameters(AWSProperty):
props = {
'MessageGroupId': (basestring, True),
}
class Target(AWSProperty):
props = {
'Arn': (basestring, True),
"EcsParameters": (EcsParameters, False),
'Id': (basestring, True),
'Input': (basestring, False),
'InputPath': (basestring, False),
'InputTransformer': (InputTransformer, False),
'KinesisParameters': (KinesisParameters, False),
'RoleArn': (basestring, False),
'RunCommandParameters': (RunCommandParameters, False),
'SqsParameters': (SqsParameters, False),
}
class Rule(AWSObject):
resource_type = "AWS::Events::Rule"
props = {
'Description': (basestring, False),
'EventPattern': (dict, False),
'Name': (basestring, False),
'ScheduleExpression': (basestring, False),
'State': (basestring, False),
'Targets': ([Target], False),
}
| bsd-2-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.