commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
a8791229ee7e54224f112f2bbdf529a20bafbafb | Fix automatic author list generation for Read The Docs | blab/nextstrain-augur,nextstrain/augur,nextstrain/augur,nextstrain/augur | docs/conf.py | docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
from datetime import date
import subprocess
def git_authors():
result = subprocess.run(
["git", "shortlog", "--summary", "HEAD"],
stdout = subprocess.PIPE,
check = True)
names = [
line.strip().split("\t")[1]
for line in result.stdout.decode("utf-8").splitlines()
]
return names
def prose_list(items):
if not items:
return ""
if len(items) == 1:
return items[0]
elif len(items) == 2:
return " and ".join(items)
else:
return ", ".join([*items[0:-1], "and " + items[-1]])
project = 'augur'
copyright = '2014–%d Trevor Bedford and Richard Neher' % (date.today().year)
author = prose_list(git_authors())
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark', 'sphinx.ext.autodoc', 'sphinxarg.ext', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# A string of reStructuredText that will be included at the end of every source
# file that is read. This is a possible place to add substitutions that should
# be available in every file.
rst_epilog = f"""
.. |authors| replace:: {author}
"""
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
from datetime import date
import subprocess
def git_authors():
result = subprocess.run(
["git", "shortlog", "--summary"],
stdout = subprocess.PIPE,
check = True)
names = [
line.strip().split("\t")[1]
for line in result.stdout.decode("utf-8").splitlines()
]
return names
def prose_list(items):
if not items:
return ""
if len(items) == 1:
return items[0]
elif len(items) == 2:
return " and ".join(items)
else:
return ", ".join([*items[0:-1], "and " + items[-1]])
project = 'augur'
copyright = '2014–%d Trevor Bedford and Richard Neher' % (date.today().year)
author = prose_list(git_authors())
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark', 'sphinx.ext.autodoc', 'sphinxarg.ext', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# A string of reStructuredText that will be included at the end of every source
# file that is read. This is a possible place to add substitutions that should
# be available in every file.
rst_epilog = f"""
.. |authors| replace:: {author}
"""
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| agpl-3.0 | Python |
63803a449d519c2bdc817574539322293b05c6ef | fix cape breton post_id | opencivicdata/scrapers-ca,opencivicdata/scrapers-ca | ca_ns_cape_breton/people.py | ca_ns_cape_breton/people.py | from pupa.scrape import Scraper, Legislator
from utils import lxmlize
import re
COUNCIL_PAGE = 'http://www.cbrm.ns.ca/councillors.html'
class CapeBretonPersonScraper(Scraper):
# @todo Need to scrape Mayor
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
councillors = page.xpath('//table[@class="table_style"]/tbody/tr')[1:]
for councillor in councillors:
name = councillor.xpath('.//a')[0].text_content()
district = 'District %s' % councillor.xpath('.//strong')[0].text_content()
address = councillor.xpath('.//td')[3].text_content().replace("\r\n", ', ')
phone = councillor.xpath('.//td[5]/p/text()')[0].split(':')[1].replace("(", '').replace(") ", '-')
fax = councillor.xpath('.//td[5]/p/text()')[1].split(':')[1].replace("(", '').replace(") ", '-')
p = Legislator(name=name, post_id=district, role='Councillor')
p.add_source(COUNCIL_PAGE)
p.add_contact('address', address, 'legislature')
p.add_contact('voice', phone, 'legislature')
p.add_contact('fax', fax, 'legislature')
councillor_url = councillor.xpath('.//a/@href')[0]
p.add_source(councillor_url)
page = lxmlize(councillor_url)
p.image = page.xpath('//img[@class="image_left"]/@src')[0]
yield p
| from pupa.scrape import Scraper, Legislator
from utils import lxmlize
import re
COUNCIL_PAGE = 'http://www.cbrm.ns.ca/councillors.html'
class CapeBretonPersonScraper(Scraper):
# @todo Need to scrape Mayor
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
councillors = page.xpath('//table[@class="table_style"]/tbody/tr')[1:]
for councillor in councillors:
name = councillor.xpath('.//a')[0].text_content()
district = councillor.xpath('.//strong')[0].text_content()
address = councillor.xpath('.//td')[3].text_content().replace("\r\n", ', ')
phone = councillor.xpath('.//td[5]/p/text()')[0].split(':')[1].replace("(", '').replace(") ", '-')
fax = councillor.xpath('.//td[5]/p/text()')[1].split(':')[1].replace("(", '').replace(") ", '-')
p = Legislator(name=name, post_id=district, role='Councillor')
p.add_source(COUNCIL_PAGE)
p.add_contact('address', address, 'legislature')
p.add_contact('voice', phone, 'legislature')
p.add_contact('fax', fax, 'legislature')
councillor_url = councillor.xpath('.//a/@href')[0]
p.add_source(councillor_url)
page = lxmlize(councillor_url)
p.image = page.xpath('//img[@class="image_left"]/@src')[0]
yield p
| mit | Python |
c79cedf826a3b6ee89e6186954185ef3217dd901 | Add the minimum scalar value to the result of the InvertOperator | OpenChemistry/tomviz,mathturtle/tomviz,OpenChemistry/tomviz,mathturtle/tomviz,OpenChemistry/tomviz,OpenChemistry/tomviz,mathturtle/tomviz | tomviz/python/InvertData.py | tomviz/python/InvertData.py | import tomviz.operators
NUMBER_OF_CHUNKS = 10
class InvertOperator(tomviz.operators.CancelableOperator):
def transform_scalars(self, dataset):
from tomviz import utils
import numpy as np
self.progress.maximum = NUMBER_OF_CHUNKS
scalars = utils.get_scalars(dataset)
if scalars is None:
raise RuntimeError("No scalars found!")
result = np.float32(scalars)
min = np.amin(scalars)
max = np.amax(scalars)
step = 0
for chunk in np.array_split(result, NUMBER_OF_CHUNKS):
if self.canceled:
return
chunk[:] = max - chunk + min
step += 1
self.progress.value = step
utils.set_scalars(dataset, result)
| import tomviz.operators
NUMBER_OF_CHUNKS = 10
class InvertOperator(tomviz.operators.CancelableOperator):
def transform_scalars(self, dataset):
from tomviz import utils
import numpy as np
self.progress.maximum = NUMBER_OF_CHUNKS
scalars = utils.get_scalars(dataset)
if scalars is None:
raise RuntimeError("No scalars found!")
result = np.float32(scalars)
max = np.amax(scalars)
step = 0
for chunk in np.array_split(result, NUMBER_OF_CHUNKS):
if self.canceled:
return
chunk[:] = max - chunk
step += 1
self.progress.value = step
utils.set_scalars(dataset, result)
| bsd-3-clause | Python |
4978f7ad6b87672d7fc1df7a836e2b8931bca1c9 | Fix missing relations pool for new proposals | liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator | src/adhocracy_core/adhocracy_core/resources/proposal.py | src/adhocracy_core/adhocracy_core/resources/proposal.py | """Proposal resource types."""
from adhocracy_core.interfaces import IItem
from adhocracy_core.interfaces import IItemVersion
from adhocracy_core.resources import add_resource_type_to_registry
from adhocracy_core.resources.badge import add_badge_assignments_service
from adhocracy_core.resources.comment import add_commentsservice
from adhocracy_core.resources.item import item_meta
from adhocracy_core.resources.itemversion import itemversion_meta
from adhocracy_core.resources.rate import add_ratesservice
from adhocracy_core.resources.relation import add_relationsservice
from adhocracy_core.sheets.badge import IBadgeable
from adhocracy_core.sheets.comment import ICommentable
from adhocracy_core.sheets.description import IDescription
from adhocracy_core.sheets.geo import IPoint
from adhocracy_core.sheets.rate import IRateable
from adhocracy_core.sheets.title import ITitle
from adhocracy_core.sheets.relation import IPolarizable
class IProposalVersion(IItemVersion):
"""Proposal version."""
proposal_version_meta = itemversion_meta._replace(
content_name='ProposalVersion',
iresource=IProposalVersion,
extended_sheets=(IBadgeable,
ITitle,
IDescription,
ICommentable,
IRateable,
IPolarizable,
),
permission_create='edit_proposal',
)
class IProposal(IItem):
"""Proposal versions pool."""
proposal_meta = item_meta._replace(
content_name='Proposal',
iresource=IProposal,
element_types=(IProposalVersion,),
extended_sheets=(IBadgeable,
),
item_type=IProposalVersion,
is_implicit_addable=True,
autonaming_prefix='proposal_',
permission_create='create_proposal',
)._add(after_creation=(
add_commentsservice,
add_ratesservice,
add_badge_assignments_service,
add_relationsservice,
))
class IGeoProposalVersion(IProposalVersion):
"""Geolocalisable proposal version."""
geo_proposal_version_meta = proposal_version_meta._replace(
iresource=IGeoProposalVersion,
)._add(extended_sheets=(IPoint,))
class IGeoProposal(IProposal):
"""Geolocalisable proposal versions pool."""
geo_proposal_meta = proposal_meta._replace(
iresource=IGeoProposal,
element_types=(IGeoProposalVersion,),
)
def includeme(config):
"""Add resources type to content."""
add_resource_type_to_registry(proposal_meta, config)
add_resource_type_to_registry(proposal_version_meta, config)
add_resource_type_to_registry(geo_proposal_meta, config)
add_resource_type_to_registry(geo_proposal_version_meta, config)
| """Proposal resource types."""
from adhocracy_core.interfaces import IItem
from adhocracy_core.interfaces import IItemVersion
from adhocracy_core.resources import add_resource_type_to_registry
from adhocracy_core.resources.badge import add_badge_assignments_service
from adhocracy_core.resources.comment import add_commentsservice
from adhocracy_core.resources.item import item_meta
from adhocracy_core.resources.itemversion import itemversion_meta
from adhocracy_core.resources.rate import add_ratesservice
from adhocracy_core.sheets.badge import IBadgeable
from adhocracy_core.sheets.comment import ICommentable
from adhocracy_core.sheets.description import IDescription
from adhocracy_core.sheets.geo import IPoint
from adhocracy_core.sheets.rate import IRateable
from adhocracy_core.sheets.title import ITitle
from adhocracy_core.sheets.relation import IPolarizable
class IProposalVersion(IItemVersion):
"""Proposal version."""
proposal_version_meta = itemversion_meta._replace(
content_name='ProposalVersion',
iresource=IProposalVersion,
extended_sheets=(IBadgeable,
ITitle,
IDescription,
ICommentable,
IRateable,
IPolarizable,
),
permission_create='edit_proposal',
)
class IProposal(IItem):
"""Proposal versions pool."""
proposal_meta = item_meta._replace(
content_name='Proposal',
iresource=IProposal,
element_types=(IProposalVersion,),
extended_sheets=(IBadgeable,
),
item_type=IProposalVersion,
is_implicit_addable=True,
autonaming_prefix='proposal_',
permission_create='create_proposal',
)._add(after_creation=(
add_commentsservice,
add_ratesservice,
add_badge_assignments_service,
))
class IGeoProposalVersion(IProposalVersion):
"""Geolocalisable proposal version."""
geo_proposal_version_meta = proposal_version_meta._replace(
iresource=IGeoProposalVersion,
)._add(extended_sheets=(IPoint,))
class IGeoProposal(IProposal):
"""Geolocalisable proposal versions pool."""
geo_proposal_meta = proposal_meta._replace(
iresource=IGeoProposal,
element_types=(IGeoProposalVersion,),
)
def includeme(config):
"""Add resources type to content."""
add_resource_type_to_registry(proposal_meta, config)
add_resource_type_to_registry(proposal_version_meta, config)
add_resource_type_to_registry(geo_proposal_meta, config)
add_resource_type_to_registry(geo_proposal_version_meta, config)
| agpl-3.0 | Python |
ea3ff403ef98f730a5509d735e5e4ab699bff2c0 | change the time to 2 minutes | elixirhub/events-portal-scraping-scripts | ScheduleAddData.py | ScheduleAddData.py | __author__ = 'chuqiao'
from apscheduler.schedulers.blocking import BlockingScheduler
import EventsPortal
import sys
import logging
def logger():
"""
Function that initialises logging system
"""
global logger
# create logger with 'syncsolr'
logger = logging.getLogger('scheduleAddData')
logger.setLevel(logging.DEBUG)
# specifies the lowest severity that will be dispatched to the appropriate destination
# create file handler which logs even debug messages
fh = logging.FileHandler('scheduleAddData.log')
# fh.setLevel(logging.WARN)
# create console handler and set level to debug
ch = logging.StreamHandler()
# StreamHandler instances send messages to streams
# ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
def scheduleUpdateSolr(sourceUrl,patternUrl,solrUrl):
"""
"""
logger()
logger.info('***Start updating every hour***')
sched = BlockingScheduler()
sched.add_job(EventsPortal.addDataToSolrFromUrl, 'interval', minutes= 2, args=[sourceUrl,patternUrl,solrUrl])
sched.start()
logger.info('***Finished updating every hour***')
try:
# Keeps the main thread alive.
while True:
time.sleep(20)
except (KeyboardInterrupt, SystemExit):
logger.error('Can not schedule add data to solr \n%s' % str(sys.exc_info()))
if __name__ == '__main__':
scheduleUpdateSolr("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All",
"http://bioevents-portal.org/events",
# "139.162.217.53:8983/solr/eventsportal/"
"localhost:8983/solr/event_portal"
)
# scheduleUpdateSolr(sys.argv[1],sys.argv[2])
| __author__ = 'chuqiao'
from apscheduler.schedulers.blocking import BlockingScheduler
import EventsPortal
import sys
import logging
def logger():
"""
Function that initialises logging system
"""
global logger
# create logger with 'syncsolr'
logger = logging.getLogger('scheduleAddData')
logger.setLevel(logging.DEBUG)
# specifies the lowest severity that will be dispatched to the appropriate destination
# create file handler which logs even debug messages
fh = logging.FileHandler('scheduleAddData.log')
# fh.setLevel(logging.WARN)
# create console handler and set level to debug
ch = logging.StreamHandler()
# StreamHandler instances send messages to streams
# ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
def scheduleUpdateSolr(sourceUrl,patternUrl,solrUrl):
"""
"""
logger()
logger.info('***Start updating every hour***')
sched = BlockingScheduler()
sched.add_job(EventsPortal.addDataToSolrFromUrl, 'interval', minutes= 60, args=[sourceUrl,patternUrl,solrUrl])
sched.start()
logger.info('***Finished updating every hour***')
try:
# Keeps the main thread alive.
while True:
time.sleep(20)
except (KeyboardInterrupt, SystemExit):
logger.error('Can not schedule add data to solr \n%s' % str(sys.exc_info()))
if __name__ == '__main__':
scheduleUpdateSolr("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All",
"http://bioevents-portal.org/events",
"139.162.217.53:8983/solr/eventsportal/"
)
# scheduleUpdateSolr(sys.argv[1],sys.argv[2])
| mit | Python |
491b1ce982ccfe7aac352d794b7d882efbaa0c1d | Update reconstruct-itinerary.py | jaredkoontz/leetcode,kamyu104/LeetCode,yiwen-luo/LeetCode,jaredkoontz/leetcode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode,githubutilities/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode,kamyu104/LeetCode,jaredkoontz/leetcode,jaredkoontz/leetcode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,githubutilities/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode | Python/reconstruct-itinerary.py | Python/reconstruct-itinerary.py | # Time: O(t! / (n1! * n2! * ... nk!)), t is the total number of tickets, ni is the number of ticket which from is node i
# Space: O(t)
# Given a list of airline tickets represented by pairs of departure
# and arrival airports [from, to], reconstruct the itinerary in order.
# All of the tickets belong to a man who departs from JFK.
# Thus, the itinerary must begin with JFK.
#
# Note:
# If there are multiple valid itineraries, you should return the itinerary
# that has the smallest lexical order when read as a single string.
# For example, the itinerary ["JFK", "LGA"] has a smaller lexical
# order than ["JFK", "LGB"].
# All airports are represented by three capital letters (IATA code).
# You may assume all tickets may form at least one valid itinerary.
# Example 1:
# tickets = [["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]]
# Return ["JFK", "MUC", "LHR", "SFO", "SJC"].
# Example 2:
# tickets = [["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]]
# Return ["JFK","ATL","JFK","SFO","ATL","SFO"].
# Another possible reconstruction is ["JFK","SFO","ATL","JFK","ATL","SFO"].
# But it is larger in lexical order.
class Solution(object):
def findItinerary(self, tickets):
"""
:type tickets: List[List[str]]
:rtype: List[str]
"""
def route_helper(start, size, graph, ans):
if size == 0:
return True
for i, (end, valid) in enumerate(graph[start]):
if valid:
graph[start][i][1] = False
ans.append(end)
if route_helper(end, size - 1, graph, ans):
return ans
ans.pop()
graph[start][i][1] = True
return False
graph = collections.defaultdict(list)
for ticket in tickets:
graph[ticket[0]].append([ticket[1], True])
for k in graph.keys():
graph[k].sort()
start = "JFK"
ans = [start]
route_helper(start, len(tickets), graph, ans)
return ans
| # Time: O(n!)
# Space: O(1)
# Given a list of airline tickets represented by pairs of departure
# and arrival airports [from, to], reconstruct the itinerary in order.
# All of the tickets belong to a man who departs from JFK.
# Thus, the itinerary must begin with JFK.
#
# Note:
# If there are multiple valid itineraries, you should return the itinerary
# that has the smallest lexical order when read as a single string.
# For example, the itinerary ["JFK", "LGA"] has a smaller lexical
# order than ["JFK", "LGB"].
# All airports are represented by three capital letters (IATA code).
# You may assume all tickets may form at least one valid itinerary.
# Example 1:
# tickets = [["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]]
# Return ["JFK", "MUC", "LHR", "SFO", "SJC"].
# Example 2:
# tickets = [["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]]
# Return ["JFK","ATL","JFK","SFO","ATL","SFO"].
# Another possible reconstruction is ["JFK","SFO","ATL","JFK","ATL","SFO"].
# But it is larger in lexical order.
class Solution(object):
def findItinerary(self, tickets):
"""
:type tickets: List[List[str]]
:rtype: List[str]
"""
def route_helper(start, size, graph, ans):
if size == 0:
return True
for i, (end, valid) in enumerate(graph[start]):
if valid:
graph[start][i][1] = False
ans.append(end)
if route_helper(end, size - 1, graph, ans):
return ans
ans.pop()
graph[start][i][1] = True
return False
graph = collections.defaultdict(list)
for ticket in tickets:
graph[ticket[0]].append([ticket[1], True])
for k in graph.keys():
graph[k].sort()
start = "JFK"
ans = [start]
route_helper(start, len(tickets), graph, ans)
return ans
| mit | Python |
38ea9f7ec5aaaf4175602f353a6ec551e133b8f3 | move the constraint from a comment to executable code | ZeitOnline/zeit.cms,ZeitOnline/zeit.cms,ZeitOnline/zeit.cms,ZeitOnline/zeit.cms | src/zeit/cms/relation/relation.py | src/zeit/cms/relation/relation.py | # Copyright (c) 2008-2009 gocept gmbh & co. kg
# See also LICENSE.txt
import BTrees
import persistent
import zc.relation.catalog
import zc.relation.interfaces
import zeit.cms.relation.interfaces
import zope.interface
class Relations(persistent.Persistent):
"""Handles relations between content."""
zope.interface.implements(zeit.cms.relation.interfaces.IRelations)
def __init__(self):
super(Relations, self).__init__()
self._catalog = zc.relation.catalog.Catalog(
_dump_content, _load_content, btree=BTrees.family32.OI)
# IRelations
def index(self, obj):
self._catalog.index(obj)
def get_relations(self, obj):
index = 'referenced_by'
token = list(self._catalog.tokenizeValues([obj], index))[0]
if token is None:
return ()
# TODO: add some code to remove removed objects from the index
return (obj for obj in self._catalog.findRelations({index: token})
if obj is not None)
def add_index(self, element, multiple=False):
"""add a value index for given element."""
self._catalog.addValueIndex(
element, _dump_content, _load_content,
btree=BTrees.family32.OI, multiple=multiple)
def _dump_content(content, catalog, cache):
return content.uniqueId
def _load_content(token, catalog, cache):
repository = cache.get('repository')
if repository is None:
cache['repository'] = repository = zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
try:
return repository.getContent(token)
except KeyError:
# If the object doesn't exist, return None
return None
def referenced_by(content, catalog):
"""Index for the zeit.cms.relation catalog."""
return zeit.cms.relation.interfaces.IReferences(content, None)
@zope.component.adapter(zeit.cms.interfaces.ICMSContent)
@zope.interface.implementer(zeit.cms.relation.interfaces.IReferences)
def references(context):
result = []
for name, adapter in zope.component.getAdapters(
(context,), zeit.cms.relation.interfaces.IReferenceProvider):
if not name:
raise ValueError(
'IReferenceProvider %r is registered without a name,'
' this will cause configuration conflicts.')
result.extend(adapter)
return result
| # Copyright (c) 2008-2009 gocept gmbh & co. kg
# See also LICENSE.txt
import BTrees
import persistent
import zc.relation.catalog
import zc.relation.interfaces
import zeit.cms.relation.interfaces
import zope.interface
class Relations(persistent.Persistent):
"""Handles relations between content."""
zope.interface.implements(zeit.cms.relation.interfaces.IRelations)
def __init__(self):
super(Relations, self).__init__()
self._catalog = zc.relation.catalog.Catalog(
_dump_content, _load_content, btree=BTrees.family32.OI)
# IRelations
def index(self, obj):
self._catalog.index(obj)
def get_relations(self, obj):
index = 'referenced_by'
token = list(self._catalog.tokenizeValues([obj], index))[0]
if token is None:
return ()
# TODO: add some code to remove removed objects from the index
return (obj for obj in self._catalog.findRelations({index: token})
if obj is not None)
def add_index(self, element, multiple=False):
"""add a value index for given element."""
self._catalog.addValueIndex(
element, _dump_content, _load_content,
btree=BTrees.family32.OI, multiple=multiple)
def _dump_content(content, catalog, cache):
return content.uniqueId
def _load_content(token, catalog, cache):
repository = cache.get('repository')
if repository is None:
cache['repository'] = repository = zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
try:
return repository.getContent(token)
except KeyError:
# If the object doesn't exist, return None
return None
def referenced_by(content, catalog):
"""Index for the zeit.cms.relation catalog."""
return zeit.cms.relation.interfaces.IReferences(content, None)
@zope.component.adapter(zeit.cms.interfaces.ICMSContent)
@zope.interface.implementer(zeit.cms.relation.interfaces.IReferences)
def references(context):
result = []
for name, adapter in zope.component.getAdapters(
(context,), zeit.cms.relation.interfaces.IReferenceProvider):
# require a name since unnamed adapters would cause configuration
# conflicts
if not name:
continue
result.extend(adapter)
return result
| bsd-3-clause | Python |
457a3ff74c37b641b53261f8eecbc4aee0d30f04 | Return all breakers to API | Somsubhra/Simplify,Somsubhra/Simplify,Somsubhra/Simplify | src/syntax/breaker.py | src/syntax/breaker.py | __author__ = 's7a'
# All imports
from appositions import Appositions
from relative_clauses import RelativeClauses
from prefix_subordination import PrefixSubordination
from infix_subordination import InfixSubordination
from infix_coordination import InfixCoordination
# Constructor for the breaker class
class Breaker:
# Constructor for the breaker class
def __init__(self):
self.appositions = Appositions()
self.relative_clauses = RelativeClauses()
self.prefix_subordination = PrefixSubordination()
self.infix_subordination = InfixSubordination()
self.infix_coordination = InfixCoordination()
# Break the tree
def break_tree(self, tree):
apposition_result = self.appositions.break_tree(tree)
relative_clause_result = self.relative_clauses.break_tree(tree)
prefix_subordination_result = self.prefix_subordination.break_tree(tree)
infix_subordination_result = self.infix_subordination.break_tree(tree)
infix_coordination_result = self.infix_coordination.break_tree(tree)
return {
"apposition": apposition_result,
"relative_clause": relative_clause_result,
'prefix_subordination': prefix_subordination_result,
"infix_subordination": infix_subordination_result,
"infix_coordination": infix_coordination_result
} | __author__ = 's7a'
# All imports
from appositions import Appositions
from relative_clauses import RelativeClauses
from prefix_subordination import PrefixSubordination
from infix_subordination import InfixSubordination
from infix_coordination import InfixCoordination
# Constructor for the breaker class
class Breaker:
# Constructor for the breaker class
def __init__(self):
self.appositions = Appositions()
self.relative_clauses = RelativeClauses()
self.prefix_subordination = PrefixSubordination()
self.infix_subordination = InfixSubordination()
self.infix_coordination = InfixCoordination()
# Break the tree
def break_tree(self, tree):
self.appositions.break_tree(tree)
self.relative_clauses.break_tree(tree)
self.prefix_subordination.break_tree(tree)
self.infix_subordination.break_tree(tree)
self.infix_coordination.break_tree(tree) | mit | Python |
898fe6057e6381c23fa75d2a707e709ab93ffe53 | add demo about reference count of object | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | reading-notes/PythonRef/src/ch03/main.py | reading-notes/PythonRef/src/ch03/main.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def object_type():
a = 34
b = a
print ('id(a) = %d, id(b) = %d' % (id(a), id(b)))
if a is b:
print ('a and b are the same type')
print ('type(a) = %s, type(b) = %s' % (type(a), type(b)))
a = 34
if isinstance(a, int):
print ('%d is integer type' % a)
a = 34.5
if isinstance(a, float):
print ('%f is float type' % a)
a = True
if isinstance(a, bool):
print ('%s is boolean type' % a)
a = 'Hello, world'
if isinstance(a, str):
print ('\'%s\' is string type' % a)
a = [1, 2, 3, 4, 5]
if isinstance(a, list):
print (a, 'is list type')
a = (1, 2, 3)
if isinstance(a, tuple):
print (a, 'is tuple type')
a = {'name': 'tim', 'age': 20, 'sex': 'male'}
if isinstance(a, dict):
print (a, 'is dict type')
def object_reference_count():
import sys
def add(a, b):
return a + b
a = 33
b = 44
print ('%d + %d = %d <a.ref_count=>%d, b.ref_count=>%d>' %
(a, b, add(a, b), sys.getrefcount(a), sys.getrefcount(b)))
del a
del b
if __name__ == '__main__':
# object_type()
object_reference_count()
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def object_type():
a = 34
b = a
print ('id(a) = %d, id(b) = %d' % (id(a), id(b)))
if a is b:
print ('a and b are the same type')
print ('type(a) = %s, type(b) = %s' % (type(a), type(b)))
a = 34
if isinstance(a, int):
print ('%d is integer type' % a)
a = 34.5
if isinstance(a, float):
print ('%f is float type' % a)
a = True
if isinstance(a, bool):
print ('%s is boolean type' % a)
a = 'Hello, world'
if isinstance(a, str):
print ('\'%s\' is string type' % a)
a = [1, 2, 3, 4, 5]
if isinstance(a, list):
print (a, 'is list type')
a = (1, 2, 3)
if isinstance(a, tuple):
print (a, 'is tuple type')
a = {'name': 'tim', 'age': 20, 'sex': 'male'}
if isinstance(a, dict):
print (a, 'is dict type')
if __name__ == '__main__':
object_type()
| bsd-2-clause | Python |
afe526d0193ee82d74e41acac26baacdc1aabc62 | Update ratio2circos.py | mvdbeek/tools-iuc,Delphine-L/tools-iuc,mblue9/tools-iuc,ieguinoa/tools-iuc,pavanvidem/tools-iuc,galaxyproject/tools-iuc,mblue9/tools-iuc,abretaud/tools-iuc,gregvonkuster/tools-iuc,pavanvidem/tools-iuc,martenson/tools-iuc,nsoranzo/tools-iuc,galaxyproject/tools-iuc,nsoranzo/tools-iuc,loraine-gueguen/tools-iuc,blankenberg/tools-iuc,loraine-gueguen/tools-iuc,natefoo/tools-iuc,ieguinoa/tools-iuc,jj-umn/tools-iuc,loraine-gueguen/tools-iuc,blankenberg/tools-iuc,abretaud/tools-iuc,abretaud/tools-iuc,Delphine-L/tools-iuc,mblue9/tools-iuc,jj-umn/tools-iuc,nsoranzo/tools-iuc,blankenberg/tools-iuc,loraine-gueguen/tools-iuc,lparsons/tools-iuc,nsoranzo/tools-iuc,nekrut/tools-iuc,mvdbeek/tools-iuc,gregvonkuster/tools-iuc,davebx/tools-iuc,gregvonkuster/tools-iuc,davebx/tools-iuc,lparsons/tools-iuc,mblue9/tools-iuc,pjbriggs/tools-iuc,pavanvidem/tools-iuc,galaxyproject/tools-iuc,nekrut/tools-iuc,pjbriggs/tools-iuc,Delphine-L/tools-iuc,jj-umn/tools-iuc,jj-umn/tools-iuc,gregvonkuster/tools-iuc,pavanvidem/tools-iuc,abretaud/tools-iuc,abretaud/tools-iuc,natefoo/tools-iuc,davebx/tools-iuc,blankenberg/tools-iuc,pjbriggs/tools-iuc,galaxyproject/tools-iuc,loraine-gueguen/tools-iuc,nekrut/tools-iuc,mvdbeek/tools-iuc,davebx/tools-iuc,gregvonkuster/tools-iuc,natefoo/tools-iuc,abretaud/tools-iuc,pavanvidem/tools-iuc,mblue9/tools-iuc,Delphine-L/tools-iuc,Delphine-L/tools-iuc,blankenberg/tools-iuc,davebx/tools-iuc,davebx/tools-iuc,ieguinoa/tools-iuc,lparsons/tools-iuc,pjbriggs/tools-iuc,galaxyproject/tools-iuc,natefoo/tools-iuc,mvdbeek/tools-iuc,natefoo/tools-iuc,blankenberg/tools-iuc,jj-umn/tools-iuc,mvdbeek/tools-iuc,mblue9/tools-iuc,nsoranzo/tools-iuc,Delphine-L/tools-iuc,natefoo/tools-iuc,davebx/tools-iuc,jj-umn/tools-iuc,mvdbeek/tools-iuc,ieguinoa/tools-iuc,loraine-gueguen/tools-iuc,blankenberg/tools-iuc,abretaud/tools-iuc,lparsons/tools-iuc,pavanvidem/tools-iuc,lparsons/tools-iuc,lparsons/tools-iuc,jj-umn/tools-iuc,nsoranzo/tools-iuc,ieguinoa/tools-iuc,gregvonkuster/tools-iuc,nekrut/tools-iuc,natefoo/tools-iuc,mvdbeek/tools-iuc,pavanvidem/tools-iuc,pjbriggs/tools-iuc,nsoranzo/tools-iuc,ieguinoa/tools-iuc,nekrut/tools-iuc,ieguinoa/tools-iuc,loraine-gueguen/tools-iuc,Delphine-L/tools-iuc,nekrut/tools-iuc,galaxyproject/tools-iuc,nekrut/tools-iuc,gregvonkuster/tools-iuc,pjbriggs/tools-iuc,galaxyproject/tools-iuc | tools/freec/ratio2circos.py | tools/freec/ratio2circos.py | import math
import sys
ploidy = int(sys.argv[1])
with open("./output/sample.bam_ratio.BedGraph") as bed:
with open("./output/sample.bam_ratio_log2_circos.txt", "w+") as olog2r:
for line in bed.readlines():
ls = line.split()
if ls[0] != "track" and float(ls[3]) > 0:
log2_ratio = math.log2(float(ls[3]) / ploidy)
olog2r.write("{}\t{}\t{}\t{}\n".format(ls[0], ls[1], ls[2], log2_ratio))
with open("./genome.fa.fai") as fai:
with open("./output/karyotype_circos.txt", "w+") as ochr:
for line in fai.readlines():
ls = line.split()
ochr.write("chr - {}\t{}\t0\t{}\t{}\n".format(ls[0], ls[0].strip("chr").lower(), ls[1], ls[0]))
| import math
import sys
ploidy = int(sys.argv[1])
with open("./output/sample.bam_ratio.BedGraph") as bed:
with open("./output/sample.bam_ratio_log2_circos.txt", "a") as olog2r:
for line in bed.readlines():
ls = line.split()
if ls[0] != "track" and float(ls[3]) > 0:
log2_ratio = math.log2(float(ls[3]) / ploidy)
olog2r.write("{}\t{}\t{}\t{}\n".format(ls[0], ls[1], ls[2], log2_ratio))
with open("./genome.fa.fai") as fai:
with open("./output/karyotype_circos.txt", "a") as ochr:
for line in fai.readlines():
ls = line.split()
ochr.write("chr - {}\t{}\t0\t{}\t{}\n".format(ls[0], ls[0].strip("chr").lower(), ls[1], ls[0]))
| mit | Python |
53aa9e61087b909a55d6ce2d7b16381e14c8304f | Add functions coloring | BrickText/BrickText | redactor/coloring/Coloring.py | redactor/coloring/Coloring.py | import re
from coloring.config_tags import config_tags
class Coloring:
def __init__(self, text_editor, language):
self.root = text_editor.get_root()
self.text_widget = text_editor.get_text_panel()
self.keywords = config_tags(self.text_widget, language)
self.pattern = r"\w+\(|\w+"
def coloring(self, indices):
for f, l in indices:
word = self.text_widget.get(f, l)
if word in self.keywords.keys():
self.text_widget.tag_remove('blank', f, l)
self.text_widget.tag_add(word, f, l)
else:
for k, _ in self.keywords.items():
self.text_widget.tag_remove(k, f, l)
pos = word.find('(')
if pos > 0:
fs = f.split('.')
self.text_widget.tag_remove('blank', f, l)
self.text_widget.tag_add('functions', f,
'{}.{}'.format(fs[0],
int(fs[1]) + pos))
else:
self.text_widget.tag_add('blank', f, l)
def findall(self, start="1.0", end="end"):
start = self.text_widget.index(start)
end = self.text_widget.index(end)
string = self.text_widget.get(start, end)
indices = []
if string:
matches = re.finditer(self.pattern, string)
for match in matches:
match_start = self.text_widget.index("%s+%dc" %
(start, match.start()))
match_end = self.text_widget.index("%s+%dc" %
(start, match.end()))
indices.append((match_start, match_end))
self.coloring(indices)
| import re
from coloring.config_tags import config_tags
class Coloring:
def __init__(self, text_editor, language):
self.root = text_editor.get_root()
self.text_widget = text_editor.get_text_panel()
self.keywords = config_tags(self.text_widget, language)
self.pattern = r"\w+"
def coloring(self, indices):
for f, l in indices:
word = self.text_widget.get(f, l)
if word in self.keywords.keys():
self.text_widget.tag_remove('blank', f, l)
self.text_widget.tag_add(word, f, l)
else:
for k, _ in self.keywords.items():
self.text_widget.tag_remove(k, f, l)
self.text_widget.tag_add('blank', f, l)
def findall(self, start="1.0", end="end"):
start = self.text_widget.index(start)
end = self.text_widget.index(end)
string = self.text_widget.get(start, end)
indices = []
if string:
matches = re.finditer(self.pattern, string)
for match in matches:
match_start = self.text_widget.index("%s+%dc" %
(start, match.start()))
match_end = self.text_widget.index("%s+%dc" %
(start, match.end()))
indices.append((match_start, match_end))
self.coloring(indices)
| mit | Python |
369f8f0b323f8d326f6a4b66af427364f805ffb5 | fix a bug in scale | pfnet/chainercv,yuyu2172/chainercv,chainer/chainercv,yuyu2172/chainercv,chainer/chainercv | chainercv/transforms/image/scale.py | chainercv/transforms/image/scale.py | from chainercv.transforms import resize
def scale(img, size, fit_short=True):
"""Rescales the input image to the given "size".
When :obj:`fit_short == True`, the input image will be resized so that
the shorter edge will be scaled to length :obj:`size` after
resizing. For example, if the height of the image is larger than
its width, image will be resized to (size * height / width, size).
Otherwise, the input image will be resized so that
the longer edge will be scaled to length :obj:`size` after
resizing.
Args:
img (~numpy.ndarray): An image array to be scaled. This is in
CHW format.
size (int): The length of the smaller edge.
fit_short (bool): Determines whether to match the length
of the shorter edge or the longer edge to :obj:`size`.
Returns:
~numpy.ndarray: A scaled image in CHW format.
"""
_, H, W = img.shape
# If resizing is not necessary, return the input as is.
if fit_short and ((H <= W and H == size) or (W <= H and W == size)):
return img
if not fit_short and ((H >= W and H == size) or (W >= H and W == size)):
return img
if fit_short:
if H < W:
out_size = (size, int(size * W / H))
else:
out_size = (int(size * H / W), size)
else:
if H < W:
out_size = (int(size * H / W), size)
else:
out_size = (size, int(size * W / H))
return resize(img, out_size)
| from chainercv.transforms import resize
def scale(img, size, fit_short=True):
"""Rescales the input image to the given "size".
When :obj:`fit_short == True`, the input image will be resized so that
the shorter edge will be scaled to length :obj:`size` after
resizing. For example, if the height of the image is larger than
its width, image will be resized to (size * height / width, size).
Otherwise, the input image will be resized so that
the longer edge will be scaled to length :obj:`size` after
resizing.
Args:
img (~numpy.ndarray): An image array to be scaled. This is in
CHW format.
size (int): The length of the smaller edge.
fit_short (bool): Determines whether to match the length
of the shorter edge or the longer edge to :obj:`size`.
Returns:
~numpy.ndarray: A scaled image in CHW format.
"""
_, H, W = img.shape
# If resizing is not necessary, return the input as is.
if fit_short and (H <= W and H == size) or (W <= H and W == size):
return img
if not fit_short and (H >= W and H == size) or (W >= H and W == size):
return img
if fit_short:
if H < W:
out_size = (size, int(size * W / H))
else:
out_size = (int(size * H / W), size)
else:
if H < W:
out_size = (int(size * H / W), size)
else:
out_size = (size, int(size * W / H))
return resize(img, out_size)
| mit | Python |
19dd9465efa0cde5efcd41f895a3ecd9e2e60174 | comment autodoc typehints | diyclassics/cltk,kylepjohnson/cltk,D-K-E/cltk,cltk/cltk | docs/conf.py | docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "The Classical Language Toolkit"
copyright = '2019, "Kyle P. Johnson <kyle@kyle-p-johnson.com>"'
author = '"Kyle P. Johnson <kyle@kyle-p-johnson.com>"'
# The full version, including alpha/beta/rc tags
release = "1.0.0a1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#
# TODO: Decide which of these are necessary
# TODO: Re-enable "sphinx_autodoc_typehints", which fails on RTD builds
#
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
# "sphinx_autodoc_typehints", # Must come *after* sphinx.ext.napoleon.
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.extlinks",
"sphinx.ext.ifconfig",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "The Classical Language Toolkit"
copyright = '2019, "Kyle P. Johnson <kyle@kyle-p-johnson.com>"'
author = '"Kyle P. Johnson <kyle@kyle-p-johnson.com>"'
# The full version, including alpha/beta/rc tags
release = "1.0.0a1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#
# TODO: Decide which of these are necessary
#
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints", # Must come *after* sphinx.ext.napoleon.
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.extlinks",
"sphinx.ext.ifconfig",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| mit | Python |
e98ed15ba3a94d5a39609a6f78918982ca0db1ea | add columns width check | faneshion/MatchZoo,faneshion/MatchZoo | matchzoo/datapack.py | matchzoo/datapack.py | """Matchzoo DataPack, pair-wise tuple (feature) and context as input."""
import typing
import pandas as pd
class DataPack(pd.DataFrame):
"""
Matchzoo DataPack data structure, store dataframe and context.
Example:
>>> # features, context generate by processors.
>>> features = [([1,3], [2,3]), ([3,0], [1,6])]
>>> context = {'vocab_size': 2000}
>>> dp = DataPack(data=features,
... context=context)
>>> dp.context
{'vocab_size': 2000}
>>> # sample without replacement for generation.
>>> type(dp.sample(1))
<class 'matchzoo.datapack.DataPack'>
>>> dp.size
2
>>> features, context = dp.unpack()
"""
_metadata = ['context']
def __init__(self,
data: list,
context: dict={},
index: list= None,
columns: list=['text_left', 'text_right'],
dtype: object=None,
copy: bool=True):
"""Initialize."""
super(DataPack, self).__init__(data=data,
index=index,
columns=columns,
dtype=dtype,
copy=copy)
if self.shape[1] != 2:
raise ValueError("Pair-wise input expected.")
self.context = context
@property
def _constructor(self) -> callable:
"""Subclass pd.DataFrame."""
return DataPack._internal_ctor
@classmethod
def _internal_ctor(cls, *args, **kwargs):
"""Create subclass inputs to store context."""
kwargs['context'] = None
return cls(*args, **kwargs)
@property
def size(self) -> int:
"""Get size of the data pack."""
return self.shape[0]
def unpack(self) -> typing.Union[pd.DataFrame, dict]:
"""Unpack DataPack."""
return self, self.context
| """Matchzoo DataPack, paiir-wise tuple (feature) and context as input."""
import typing
import pandas as pd
class DataPack(pd.DataFrame):
"""
Matchzoo DataPack data structure, store dataframe and context.
Example:
>>> # features, context generate by processors.
>>> features = [([1,3], [2,3]), ([3,0], [1,6])]
>>> context = {'vocab_size': 2000}
>>> dp = DataPack(data=features,
... context=context)
>>> dp.context
{'vocab_size': 2000}
>>> # sample without replacement for generation.
>>> type(dp.sample(1))
<class 'matchzoo.datapack.DataPack'>
>>> dp.size
2
>>> features, context = dp.unpack()
"""
_metadata = ['context']
def __init__(self,
data: list,
context: dict={},
index: list= None,
columns: list=['text_left', 'text_right'],
dtype: object=None,
copy: bool=True):
"""Initialize."""
super(DataPack, self).__init__(data=data,
index=index,
columns=columns,
dtype=dtype,
copy=copy)
self.context = context
@property
def _constructor(self) -> callable:
"""Subclass pd.DataFrame."""
return DataPack._internal_ctor
@classmethod
def _internal_ctor(cls, *args, **kwargs):
"""Create subclass inputs to store context."""
kwargs['context'] = None
return cls(*args, **kwargs)
@property
def size(self) -> int:
"""Get size of the data pack."""
return self.shape[0]
def unpack(self) -> typing.Union[pd.DataFrame, dict]:
"""Unpack DataPack."""
return self, self.context
| apache-2.0 | Python |
a2b664c89e60985a0c67db5d1f18bc20b34ad5c6 | remove unused command | rr-/dotfiles,rr-/dotfiles,rr-/dotfiles | cfg/bubblesub/scripts/cc.py | cfg/bubblesub/scripts/cc.py | import re
from PyQt5 import QtWidgets
from bubblesub.api.cmd import BaseCommand
from bubblesub.cfg.menu import MenuCommand
from bubblesub.fmt.ass.event import AssEvent
from bubblesub.ui.util import load_dialog
try:
import pysubs2
except ImportError as ex:
raise CommandUnavailable(f"{ex.name} is not installed")
class LoadClosedCaptionsCommand(BaseCommand):
names = ["load-cc"]
help_text = "Loads closed captions from a file."
async def run(self):
await self.api.gui.exec(self._run)
async def _run(self, main_window: QtWidgets.QMainWindow) -> None:
path = load_dialog(
main_window, "Subtitles (*.ass *.srt);;All files (*.*)"
)
if not path:
return
source = pysubs2.load(str(path))
with self.api.undo.capture():
for line in source:
self.api.subs.events.append(
AssEvent(
start=line.start,
end=line.end,
note=line.text,
style=self.api.subs.default_style_name,
)
)
COMMANDS = [LoadClosedCaptionsCommand]
MENU = [MenuCommand("&Load closed captions", "load-cc")]
| import re
from PyQt5 import QtWidgets
from bubblesub.api.cmd import BaseCommand
from bubblesub.cfg.menu import MenuCommand
from bubblesub.fmt.ass.event import AssEvent
from bubblesub.ui.util import load_dialog
try:
import pysubs2
except ImportError as ex:
raise CommandUnavailable(f"{ex.name} is not installed")
class LoadClosedCaptionsCommand(BaseCommand):
names = ["load-cc"]
help_text = "Loads closed captions from a file."
async def run(self):
await self.api.gui.exec(self._run)
async def _run(self, main_window: QtWidgets.QMainWindow) -> None:
path = load_dialog(
main_window, "Subtitles (*.ass *.srt);;All files (*.*)"
)
if not path:
return
source = pysubs2.load(str(path))
with self.api.undo.capture():
for line in source:
self.api.subs.events.append(
AssEvent(
start=line.start,
end=line.end,
note=line.text,
style=self.api.subs.default_style_name,
)
)
class CleanClosedCaptionsCommand(BaseCommand):
names = ["clean-cc"]
help_text = (
"Cleans common closed caption punctuation from the selected events."
)
async def run(self):
with self.api.undo.capture():
for line in self.api.subs.selected_events:
note = line.note
note = re.sub(r"\\N", "\n", note)
note = re.sub(r"\(\(\)\)", "", note) # retrospection
note = re.sub(r"\([^\(\)]*\)", "", note) # actors
note = re.sub(r"\[[^\[\]]*\]", "", note) # actors
note = re.sub("[➡→]", "", note) # line continuation
note = re.sub("≪", "", note) # distant dialogues
note = re.sub("[<>《》]", "", note)
note = re.sub("。", "。", note) # half-width period
note = re.sub("([…!?])。", r"\1", note) # unneeded periods
note = note.rstrip("・")
note = re.sub(" ", "", note) # Japanese doesn't need spaces
note = note.strip()
line.note = note
COMMANDS = [LoadClosedCaptionsCommand, CleanClosedCaptionsCommand]
MENU = [
MenuCommand("&Load closed captions", "load-cc"),
MenuCommand("&Clean closed captions", "clean-cc"),
]
| mit | Python |
725f019451f827e7d565d721001ef31d40254b2e | Exclude README documents in _themes folder | crsmithdev/arrow | docs/conf.py | docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'arrow'
copyright = '2019, Chris Smith'
author = 'Chris Smith'
# The full version, including alpha/beta/rc tags
release = '0.13.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_themes/README.rst', '_themes/f6/README.rst']
master_doc = 'index'
source_suffix = '.rst'
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'f6'
html_theme_path = ['_themes']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'arrow'
copyright = '2019, Chris Smith'
author = 'Chris Smith'
# The full version, including alpha/beta/rc tags
release = '0.13.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
source_suffix = '.rst'
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'f6'
html_theme_path = ['_themes']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| apache-2.0 | Python |
001ba693dda4f11a3b7bff4634a0336b8905728f | Support list in conversion | OTL/jps | jps/utils.py | jps/utils.py | import json
from .publisher import Publisher
from .common import DEFAULT_PUB_PORT
from .common import DEFAULT_HOST
from .env import get_master_host
class JsonMultiplePublisher(object):
'''publish multiple topics by one json message
Example:
>>> p = JsonMultiplePublisher()
>>> p.publish('{"topic1": 1.0, "topic2": {"x": 0.1}}')
'''
def __init__(self, host=get_master_host(), pub_port=DEFAULT_PUB_PORT):
self._pub = Publisher('*', host=host, pub_port=pub_port)
def publish(self, json_msg):
'''
json_msg = '{"topic1": 1.0, "topic2": {"x": 0.1}}'
'''
pyobj = json.loads(json_msg)
for topic, value in pyobj.items():
msg = '{topic} {data}'.format(topic=topic, data=json.dumps(value))
self._pub.publish(msg)
class MultiplePublisher(object):
def __init__(self, base_topic_name):
self._publishers = {}
self._base_topic_name = base_topic_name
def publish(self, msg, topic_suffix=''):
if topic_suffix not in self._publishers:
self._publishers[topic_suffix] = Publisher(self._base_topic_name + topic_suffix)
self._publishers[topic_suffix].publish(msg)
def to_obj(msg):
class _obj(object):
def __init__(self, d):
for a, b in d.iteritems():
if isinstance(b, (list, tuple)):
setattr(self, a, [_obj(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, _obj(b) if isinstance(b, dict) else b)
json_obj = json.loads(msg)
if isinstance(json_obj, (list)):
return [_obj(x) for x in json_obj]
return _obj(json_obj)
| import json
from .publisher import Publisher
from .common import DEFAULT_PUB_PORT
from .common import DEFAULT_HOST
from .env import get_master_host
class JsonMultiplePublisher(object):
'''publish multiple topics by one json message
Example:
>>> p = JsonMultiplePublisher()
>>> p.publish('{"topic1": 1.0, "topic2": {"x": 0.1}}')
'''
def __init__(self, host=get_master_host(), pub_port=DEFAULT_PUB_PORT):
self._pub = Publisher('*', host=host, pub_port=pub_port)
def publish(self, json_msg):
'''
json_msg = '{"topic1": 1.0, "topic2": {"x": 0.1}}'
'''
pyobj = json.loads(json_msg)
for topic, value in pyobj.items():
msg = '{topic} {data}'.format(topic=topic, data=json.dumps(value))
self._pub.publish(msg)
class MultiplePublisher(object):
def __init__(self, base_topic_name):
self._publishers = {}
self._base_topic_name = base_topic_name
def publish(self, msg, topic_suffix=''):
if topic_suffix not in self._publishers:
self._publishers[topic_suffix] = Publisher(self._base_topic_name + topic_suffix)
self._publishers[topic_suffix].publish(msg)
def to_obj(msg):
class _obj(object):
def __init__(self, d):
for a, b in d.iteritems():
if isinstance(b, (list, tuple)):
setattr(self, a, [_obj(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, _obj(b) if isinstance(b, dict) else b)
json_obj = json.loads(msg)
return _obj(json_obj)
| apache-2.0 | Python |
d73bc854c5cf34f57916ebfd4b1d51c21701d48a | Update paths for TravisCI | JamesPHoughton/pysd | tests/unit_test_xmile2py.py | tests/unit_test_xmile2py.py | import os
import unittest
import tempfile
from io import StringIO
from pysd.py_backend.xmile.xmile2py import translate_xmile
TARGET_STMX_FILE = 'test-models/tests/game/test_game.stmx'
class TestXmileConversion(unittest.TestCase):
def test_python_file_creation(self):
with open(TARGET_STMX_FILE, 'r') as stmx:
contents = stmx.read()
# Write out contents to temporary file
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(contents)
# Convert file (should not raise error)
generated_file = translate_xmile(temp_file.name)
# Check if both source file and python file exists
try:
assert generated_file != temp_file.name, "Accidental replacement of original model file!"
assert generated_file.endswith('.py'), 'File created without python extension'
assert os.path.exists(temp_file.name) and os.path.exists(generated_file), 'Expected files are missing'
finally:
os.remove(temp_file.name)
try:
os.remove(generated_file)
except FileNotFoundError:
# Okay if python file is missing
pass
def test_multiline_equation(self):
with open(TARGET_STMX_FILE, 'r') as stmx:
contents = stmx.read()
# Insert line break in equation definition
contents = contents.replace('<eqn>(Stock+Constant)</eqn>', '<eqn>(Stock+\nConstant)</eqn>')
# Write out contents to temporary file
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(contents)
# Convert file (should not raise error)
generated_file = translate_xmile(temp_file.name)
with open(generated_file, 'r') as fp:
contents = fp.read()
idx = contents.find('stock() + constant()')
try:
assert idx > 0, 'Correct, generated, equation not found'
finally:
os.remove(temp_file.name)
os.remove(generated_file)
| import os
import unittest
import tempfile
from io import StringIO
from pysd.py_backend.xmile.xmile2py import translate_xmile
class TestXmileConversion(unittest.TestCase):
def test_python_file_creation(self):
with open('tests/test-models/tests/game/test_game.stmx', 'r') as stmx:
contents = stmx.read()
# Write out contents to temporary file
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(contents)
# Convert file (should not raise error)
generated_file = translate_xmile(temp_file.name)
# Check if both source file and python file exists
try:
assert generated_file != temp_file.name, "Accidental replacement of original model file!"
assert generated_file.endswith('.py'), 'File created without python extension'
assert os.path.exists(temp_file.name) and os.path.exists(generated_file), 'Expected files are missing'
finally:
os.remove(temp_file.name)
try:
os.remove(generated_file)
except FileNotFoundError:
# Okay if python file is missing
pass
def test_multiline_equation(self):
with open('tests/test-models/tests/game/test_game.stmx', 'r') as stmx:
contents = stmx.read()
# Insert line break in equation definition
contents = contents.replace('<eqn>(Stock+Constant)</eqn>', '<eqn>(Stock+\nConstant)</eqn>')
# Write out contents to temporary file
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(contents)
# Convert file (should not raise error)
generated_file = translate_xmile(temp_file.name)
with open(generated_file, 'r') as fp:
contents = fp.read()
idx = contents.find('stock() + constant()')
try:
assert idx > 0, 'Correct, generated, equation not found'
finally:
os.remove(temp_file.name)
os.remove(generated_file)
| mit | Python |
47053a42b9053755aad052159dac845b34195297 | Use common testHarness in derived projects | CauldronDevelopmentLLC/cbang,CauldronDevelopmentLLC/cbang,CauldronDevelopmentLLC/cbang,CauldronDevelopmentLLC/cbang | config/test/__init__.py | config/test/__init__.py | from SCons.Script import *
import inspect
def run_tests(env):
import shlex
import subprocess
import sys
cmd = shlex.split(env.get('TEST_COMMAND'))
print('Executing:', cmd)
sys.exit(subprocess.call(cmd))
def generate(env):
import os
import distutils.spawn
python = distutils.spawn.find_executable('python3')
if not python: python = distutils.spawn.find_executable('python')
if not python: python = distutils.spawn.find_executable('python2')
if not python: python = 'python'
if env['PLATFORM'] == 'win32': python = python.replace('\\', '\\\\')
path = inspect.getfile(inspect.currentframe())
home = os.path.dirname(os.path.abspath(path)) + '/../..'
cmd = python + ' ' + home + '/tests/testHarness -C tests --diff-failed ' \
'--view-failed --view-unfiltered --save-failed --build'
if 'DOCKBOT_MASTER_PORT' in os.environ: cmd += ' --no-color'
env.CBAddVariables(('TEST_COMMAND', '`test` target command line', cmd))
if 'test' in COMMAND_LINE_TARGETS: env.CBAddConfigureCB(run_tests)
def exists(): return 1
| from SCons.Script import *
def run_tests(env):
import shlex
import subprocess
import sys
cmd = shlex.split(env.get('TEST_COMMAND'))
print('Executing:', cmd)
sys.exit(subprocess.call(cmd))
def generate(env):
import os
import distutils.spawn
python = distutils.spawn.find_executable('python3')
if not python: python = distutils.spawn.find_executable('python')
if not python: python = distutils.spawn.find_executable('python2')
if not python: python = 'python'
if env['PLATFORM'] == 'win32': python = python.replace('\\', '\\\\')
cmd = python + ' tests/testHarness -C tests --diff-failed --view-failed ' \
'--view-unfiltered --save-failed --build'
if 'DOCKBOT_MASTER_PORT' in os.environ: cmd += ' --no-color'
env.CBAddVariables(('TEST_COMMAND', '`test` target command line', cmd))
if 'test' in COMMAND_LINE_TARGETS: env.CBAddConfigureCB(run_tests)
def exists(): return 1
| lgpl-2.1 | Python |
5075fc57126ccec2ae7e993705e323fc5c75e965 | fix module babymonitoringcontroller | k-team/KHome,k-team/KHome,k-team/KHome | modules/baby_monitoring_controller/local_module.py | modules/baby_monitoring_controller/local_module.py | import module
from module import use_module
import fields
import logging
class BabyMonitoringController(module.Base):
update_rate = 10
sound_sensor = use_module('SoundSensor')
alarm_actuator = use_module('AlarmActuator')
class controller(fields.Base):
def __init__(self):
self.decibel_value = 97 #seuil du cri d'un nourisson
super(BabyMonitoringController.controller, self).__init__()
def always(self):
print "testons"
try:
sound_now = self.module.sound_sensor.sound()[1]
print 'sound_now= %s' % sound_now
print 'decibel_limit = %s' % self.decibel_value
except TypeError as e:
logger = logging.getLogger()
logger.exception(e)
else:
if sound_now > self.decibel_value:
self.module.alarm_actuator.alarm(True)
print 'Alert the baby is crying'
else:
self.module.alarm_actuator.alarm(False)
print 'Nothing to worry the baby is ok'
| import module
from module import use_module
import fields
class BabyMonitoringController(module.Base):
update_rate = 10
sound_sensor = use_module('SoundSensor')
alarm_actuator = use_module('AlarmActuator')
class controller(fields.Base):
def __init__(self):
self.decibel_value = 97 #seuil du cri d'un nourisson
super(BabyMonitoringController.controller, self).__init__()
def always(self):
#print 'sound = %s' % self.module.sound_sensor.sound()[1]
#print 'decibel_limit = %s' % self.decibel_value
if self.module.sound_sensor.sound()[1] > self.decibel_value:
self.module.alarm_actuator.alarm(True)
#print 'alerte'
else:
self.module.alarm_actuator.alarm(False)
#print 'pas alerte'
| mit | Python |
343c840835f688ae109093ea61357302cd78d9bf | Use relative import | synw/django-chartflo,synw/django-chartflo,synw/django-chartflo | chartflo/views.py | chartflo/views.py | # -*- coding: utf-8 -*-
from django.views.generic import TemplateView
from .factory import ChartDataPack
class ChartsView(TemplateView):
template_name = 'chartflo/charts.html'
chart_type = "pie"
title = ""
engine = "amcharts"
def get_data(self):
return {}
def get_context_data(self, **kwargs):
context = super(ChartsView, self).get_context_data(**kwargs)
# get data
P = ChartDataPack()
dataset = self.get_data()
# package the data
datapack = P.package("chart_id", self.title, dataset)
# options
datapack['legend'] = True
datapack['export'] = False
context['datapack'] = datapack
context["title"] = context["label"] = self.title
context["chart_url"] = self._get_template_url()
return context
def _get_template_url(self):
url = "chartflo/" + self.engine + "/" + self.chart_type + ".html"
return url
| # -*- coding: utf-8 -*-
from django.views.generic import TemplateView
from chartflo.factory import ChartDataPack
class ChartsView(TemplateView):
template_name = 'chartflo/charts.html'
chart_type = "pie"
title = ""
engine = "amcharts"
def get_data(self):
return {}
def get_context_data(self, **kwargs):
context = super(ChartsView, self).get_context_data(**kwargs)
# get data
P = ChartDataPack()
dataset = self.get_data()
# package the data
datapack = P.package("chart_id", self.title, dataset)
# options
datapack['legend'] = True
datapack['export'] = False
context['datapack'] = datapack
context["title"] = context["label"] = self.title
context["chart_url"] = self._get_template_url()
return context
def _get_template_url(self):
url = "chartflo/" + self.engine + "/" + self.chart_type + ".html"
return url
| mit | Python |
37831f549eddc014ab89cc7dba3616a133c774a2 | Add create_app method to __init__.py | patlub/BucketListAPI,patlub/BucketListAPI | api/BucketListAPI.py | api/BucketListAPI.py | from flask import Flask, jsonify
from modals.modals import User, Bucket, Item
from api.__init__ import create_app, db
app = create_app('DevelopmentEnv')
@app.errorhandler(404)
def page_not_found(e):
response = jsonify({'error': 'The request can not be completed'})
response.status_code = 404
return response
if __name__ == '__main__':
app.run()
| from flask import Flask, jsonify
from modals.modals import User, Bucket, Item
from api.__init__ import app, db
@app.errorhandler(404)
def page_not_found(e):
response = jsonify({'error': 'The request can not be completed'})
response.status_code = 404
return response
if __name__ == '__main__':
app.run()
| mit | Python |
061ebe64e96966149d66ee2e61792980dccbfe92 | test set and get | martinkirch/tofbot,soulaklabs/tofbot,soulaklabs/tofbot,p0nce/tofbot,p0nce/tofbot,martinkirch/tofbot,chmduquesne/tofbot,tofbot/tofbot,tofbot/tofbot | testbot.py | testbot.py | from bot import Tofbot
import unittest
from collections import namedtuple
def print_resp(msg):
print (" -> %s" % msg)
class TestTofbot(Tofbot):
def __init__(self, nick, name, chan, origin):
chans = [chan]
Tofbot.__init__(self, nick, name, chans, debug=False)
self.chan = chan
self.origin = origin
self.cb = None
self.joined = True
def msg(self, chan, msg):
if self.cb:
self.cb(msg)
else:
print_resp(msg)
def send(self, msg, cb=None):
print ("<- %s" % msg)
self.dispatch(self.origin, [msg, 'PRIVMSG', self.chan])
class BotInput:
def __init__(self, bot, msg):
self.bot = bot
self.msg = msg
def __enter__(self):
msgs = []
def capture_out(msg):
msgs.append(msg)
self.bot.cb = capture_out
self.bot.send(self.msg)
return msgs[0]
def __exit__(self, *args):
pass
class TestCase(unittest.TestCase):
def setUp(self):
nick = "testbot"
name = "Test Bot"
chan = "#chan"
Origin = namedtuple('Origin', ['sender', 'nick'])
origin = Origin('sender', 'nick')
self.bot = TestTofbot(nick, name, chan, origin)
def test_set_allowed(self):
msg = "!set autoTofadeThreshold 9000"
self.bot.send(msg)
with BotInput(self.bot, "!get autoTofadeThreshold") as l:
self.assertEqual(l, "autoTofadeThreshold = 9000")
| from bot import Tofbot
import unittest
def print_resp(msg):
print (" -> %s" % msg)
class TestTofbot(Tofbot):
def __init__(self, nick, name, chan, origin):
chans = [chan]
Tofbot.__init__(self, nick, name, chans, debug=False)
self.chan = chan
self.origin = origin
self.cb = print_resp
def msg(self, chan, msg):
if self.cb:
self.cb(msg)
else:
print_resp(msg)
def send(self, msg, cb=None):
print ("<- %s" % msg)
saved_cb = self.cb
self.cb = cb
self.dispatch(self.origin, [msg, 'PRIVMSG', self.chan])
self.cb = saved_cb
class TestCase(unittest.TestCase):
def setUp(self):
nick = "testbot"
name = "Test Bot"
chan = "#chan"
password = ""
self.bot = TestTofbot(nick, name, chan, password)
| bsd-2-clause | Python |
6b36bdff8393834cfb9890b88fa03ab5ba3acb6e | Support JSON and arrays in JavaScript action in Mistral | StackStorm/mistral,openstack/mistral,StackStorm/mistral,openstack/mistral | mistral/utils/javascript.py | mistral/utils/javascript.py | # Copyright 2015 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
from oslo_utils import importutils
from mistral import exceptions as exc
_PYV8 = importutils.try_import('PyV8')
class JSEvaluator(object):
@classmethod
@abc.abstractmethod
def evaluate(cls, script, context):
"""Executes given JavaScript.
"""
pass
class V8Evaluator(JSEvaluator):
@classmethod
def evaluate(cls, script, context):
if not _PYV8:
raise exc.MistralException(
"PyV8 module is not available. Please install PyV8."
)
with _PYV8.JSContext() as ctx:
# Prepare data context and way for interaction with it.
ctx.eval('$ = %s' % json.dumps(context))
result = ctx.eval(script)
return _PYV8.convert(result)
# TODO(nmakhotkin) Make it configurable.
EVALUATOR = V8Evaluator
def evaluate(script, context):
return EVALUATOR.evaluate(script, context)
| # Copyright 2015 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
from oslo_utils import importutils
from mistral import exceptions as exc
_PYV8 = importutils.try_import('PyV8')
class JSEvaluator(object):
@classmethod
@abc.abstractmethod
def evaluate(cls, script, context):
"""Executes given JavaScript.
"""
pass
class V8Evaluator(JSEvaluator):
@classmethod
def evaluate(cls, script, context):
if not _PYV8:
raise exc.MistralException(
"PyV8 module is not available. Please install PyV8."
)
with _PYV8.JSContext() as ctx:
# Prepare data context and way for interaction with it.
ctx.eval('$ = %s' % json.dumps(context))
return ctx.eval(script)
# TODO(nmakhotkin) Make it configurable.
EVALUATOR = V8Evaluator
def evaluate(script, context):
return EVALUATOR.evaluate(script, context)
| apache-2.0 | Python |
353643c612414e4292d419f373c551029af93b90 | add tests for default serve behavior for new static application | kezabelle/clastic,kezabelle/clastic | clastic/tests/test_serve.py | clastic/tests/test_serve.py | from __future__ import unicode_literals
from nose.tools import ok_, eq_
import os
from werkzeug.test import Client
from clastic import Application, default_response, Response
from clastic.session import CookieSessionMiddleware
from common import session_hello_world
_CUR_DIR = os.path.dirname(os.path.abspath(__file__))
def test_serve():
cookie_session = CookieSessionMiddleware()
app = Application([('/', session_hello_world, default_response),
('/<name>/', session_hello_world, default_response)],
middlewares=[cookie_session])
yield ok_, app.serve(_jk_just_testing=True, static_path=_CUR_DIR)
cl = Client(app, Response)
yield eq_, cl.get('/').status_code, 200
yield eq_, cl.get('/_meta/').status_code, 200
yield eq_, cl.get('/static/test_serve.py').status_code, 200
| from __future__ import unicode_literals
from nose.tools import ok_
from clastic import Application, default_response
from clastic.session import CookieSessionMiddleware
from common import session_hello_world
def test_serve():
cookie_session = CookieSessionMiddleware()
app = Application([('/', session_hello_world, default_response),
('/<name>/', session_hello_world, default_response)],
middlewares=[cookie_session])
yield ok_, app.serve(_jk_just_testing=True)
| bsd-3-clause | Python |
ffa8f79fe15621081acbb220a2a4dfd3d4d6d500 | Correct env_key input argument default value | Neurita/galton | galton/utils/logger.py | galton/utils/logger.py | # -*- coding: utf-8 -*-
import os
import os.path as op
import yaml
import logging.config
from .text_files import read
from ..config import LOG_LEVEL
MODULE_NAME = __name__.split('.')[0]
def setup_logging(log_config_file=op.join(op.dirname(__file__), 'logger.yml'),
log_default_level=LOG_LEVEL,
env_key=MODULE_NAME.upper() + '_LOG_CFG'):
"""Setup logging configuration."""
path = log_config_file
value = os.getenv(env_key, None)
if value:
path = value
if op.exists(path):
log_cfg = yaml.load(read(path).format(MODULE_NAME))
logging.config.dictConfig(log_cfg)
#print('Started logging using config file {0}.'.format(path))
else:
logging.basicConfig(level=log_default_level)
#print('Started default logging. Could not find config file '
# 'in {0}.'.format(path))
log = logging.getLogger(__name__)
log.debug('Start logging.')
| # -*- coding: utf-8 -*-
import os
import os.path as op
import yaml
import logging.config
from .text_files import read
from ..config import LOG_LEVEL
MODULE_NAME = __name__.split('.')[0]
def setup_logging(log_config_file=op.join(op.dirname(__file__), 'logger.yml'),
log_default_level=LOG_LEVEL,
env_key=MODULE_NAME + '_LOG_CFG'):
"""Setup logging configuration."""
path = log_config_file
value = os.getenv(env_key, None)
if value:
path = value
if op.exists(path):
log_cfg = yaml.load(read(path).format(MODULE_NAME))
logging.config.dictConfig(log_cfg)
#print('Started logging using config file {0}.'.format(path))
else:
logging.basicConfig(level=log_default_level)
#print('Started default logging. Could not find config file '
# 'in {0}.'.format(path))
log = logging.getLogger(__name__)
log.debug('Start logging.')
| bsd-3-clause | Python |
64b4e175d59f3f7c06ef009c0e0ff2cbf07e8e1e | fix flake8 | he7d3r/revscoring,wiki-ai/revscoring | revscoring/datasources/meta/tests/test_vectorizers.py | revscoring/datasources/meta/tests/test_vectorizers.py |
from .. import vectorizers
from revscoring.datasources import revision_oriented as ro
from revscoring.dependencies import solve
from revscoring.features import wikitext
test_vectors = {'a': [1] * 200,
'b': [1] * 200,
'c': [1] * 200}
def test_vectorize():
wv = vectorizers.word2vec(wikitext.revision.datasources.tokens,
test_vectors, dim=200, name='word vectors')
vector = solve(wv, cache={ro.revision.text: 'a bv c d'})
assert len(vector) == 7
assert len(vector[0]) == 200
| import numpy as np
from .. import vectorizers
from revscoring.datasources import revision_oriented as ro
from revscoring.dependencies import solve
from revscoring.features import wikitext
test_vectors = {'a': [1]*200,
'b': [1]*200,
'c': [1]*200}
def test_vectorize():
wv = vectorizers.word2vec(wikitext.revision.datasources.tokens,
test_vectors, dim=200, name='word vectors')
vector = solve(wv, cache={ro.revision.text: 'a bv c d'})
assert len(vector) == 7
assert len(vector[0]) == 200
| mit | Python |
1eb809e15347b89d61077a4f4be9a3d0d005bc77 | 修改BUG:创建的用户都是超级用户 | ZhangXiaoyu-Chief/sandwich,ZhangXiaoyu-Chief/sandwich,ZhangXiaoyu-Chief/sandwich,ZhangXiaoyu-Chief/sandwich,ZhangXiaoyu-Chief/sandwich | api/views/account.py | api/views/account.py | from django.shortcuts import render, HttpResponse
from api.libs.base import CoreView
from account.models import UserProfile
from django.contrib.auth.models import User
# Create your views here.
class Account(CoreView):
"""
用户相关接口
"""
def get_list(self):
"""
获取用户列表接口
:return:
"""
user_list = []
user_objs = UserProfile.objects.all()
for user_obj in user_objs:
user_list.append(user_obj.get_info())
self.response_data['data'] = user_list
def post_create(self):
"""
创建用户接口
:return:
"""
username = self.parameters('username')
password = self.parameters('password')
email = self.parameters('email')
is_active = True if self.parameters('active') == 'true' else False
is_superuser = True if self.parameters('is_superuser') == 'true' else False
nickname = self.parameters('nickname')
avatar = self.parameters('avatar')
user_obj = User.objects.create(username=username, password=password, email=email,
is_superuser=is_superuser, is_active=is_active)
user_profile_obj = UserProfile.objects.create(user=user_obj, nickname=nickname, avatar=avatar)
self.response_data['data'] = user_profile_obj.get_info()
| from django.shortcuts import render, HttpResponse
from api.libs.base import CoreView
from account.models import UserProfile
from django.contrib.auth.models import User
# Create your views here.
class Account(CoreView):
"""
用户相关接口
"""
def get_list(self):
"""
获取用户列表接口
:return:
"""
user_list = []
user_objs = UserProfile.objects.all()
for user_obj in user_objs:
user_list.append(user_obj.get_info())
self.response_data['data'] = user_list
def post_create(self):
"""
创建用户接口
:return:
"""
username = self.parameters('username')
password = self.parameters('password')
email = self.parameters('email')
is_active = True if self.parameters('active') == 'true' else False
is_superuser = True if self.parameters('is_superuser') else False
nickname = self.parameters('nickname')
avatar = self.parameters('avatar')
user_obj = User.objects.create(username=username, password=password, email=email,
is_superuser=is_superuser, is_active=is_active)
user_profile_obj = UserProfile.objects.create(user=user_obj, nickname=nickname, avatar=avatar)
self.response_data['data'] = user_profile_obj.get_info()
| apache-2.0 | Python |
b3cbc33e9cd43e22500a484599010146fbb7f012 | add missing description field to PolicyForm | dpausp/arguments,dpausp/arguments,dpausp/arguments,dpausp/arguments | src/ekklesia_portal/concepts/policy/policy_contracts.py | src/ekklesia_portal/concepts/policy/policy_contracts.py | from colander import Length
from deform.widget import SelectWidget, TextAreaWidget
from ekklesia_portal.enums import Majority, VotingSystem
from ekklesia_portal.helper.contract import Schema, Form, decimal_property, int_property, string_property, enum_property
from ekklesia_portal.helper.translation import _
class PolicySchema(Schema):
name = string_property(title=_('name'), validator=Length(max=64))
description = string_property(title=_('description'), validator=Length(max=4000), missing='')
majority = enum_property(Majority, title=_('majority'))
proposition_expiration = int_property(title=_('proposition_expiration'))
qualification_minimum = int_property(title=_('qualification_minimum'))
qualification_quorum = decimal_property(title=_('qualification_quorum'))
range_max = int_property(title=_('range_max'))
range_small_max = int_property(title=_('range_small_max'))
range_small_options = int_property(title=_('range_small_options'))
secret_minimum = int_property(title=_('secret_minimum'))
secret_quorum = decimal_property(title=_('secret_quorum'))
submitter_minimum = int_property(title=_('submitter_minimum'))
voting_duration = int_property(title=_('voting_duration'))
voting_system = enum_property(VotingSystem, title=_('voting_system'))
class PolicyForm(Form):
def __init__(self, request, action):
super().__init__(PolicySchema(), request, action, buttons=("submit", ))
def prepare_for_render(self, items_for_selects):
widgets = {
'description': TextAreaWidget(rows=8),
'majority': SelectWidget(values=items_for_selects['majority']),
'voting_system': SelectWidget(values=items_for_selects['voting_system']),
}
self.set_widgets(widgets)
| from colander import Length
from deform.widget import SelectWidget
from ekklesia_portal.enums import Majority, VotingSystem
from ekklesia_portal.helper.contract import Schema, Form, decimal_property, int_property, string_property, enum_property
from ekklesia_portal.helper.translation import _
class PolicySchema(Schema):
name = string_property(title=_('name'), validator=Length(max=64))
majority = enum_property(Majority, title=_('majority'))
proposition_expiration = int_property(title=_('proposition_expiration'))
qualification_minimum = int_property(title=_('qualification_minimum'))
qualification_quorum = decimal_property(title=_('qualification_quorum'))
range_max = int_property(title=_('range_max'))
range_small_max = int_property(title=_('range_small_max'))
range_small_options = int_property(title=_('range_small_options'))
secret_minimum = int_property(title=_('secret_minimum'))
secret_quorum = decimal_property(title=_('secret_quorum'))
submitter_minimum = int_property(title=_('submitter_minimum'))
voting_duration = int_property(title=_('voting_duration'))
voting_system = enum_property(VotingSystem, title=_('voting_system'))
class PolicyForm(Form):
def __init__(self, request, action):
super().__init__(PolicySchema(), request, action, buttons=("submit", ))
def prepare_for_render(self, items_for_selects):
widgets = {
'majority': SelectWidget(values=items_for_selects['majority']),
'voting_system': SelectWidget(values=items_for_selects['voting_system']),
}
self.set_widgets(widgets)
| agpl-3.0 | Python |
618ad9ef667dfe269fd4d9bf926e4b7e22913d18 | Update russianroulette.py | kallerdaller/Cogs-Yorkfield | russianroulette/russianroulette.py | russianroulette/russianroulette.py | import discord
from discord.ext import commands
import os
from .utils.dataIO import dataIO
import time
import asyncio
class Russianroulette:
"""Russian Roulette"""
def __init__(self, bot):
self.bot = bot
self.file_path = "data/russianroulette/russianroulette.json"
self.json_data = dataIO.load_json(self.file_path)
@commands.command(pass_context=True, aliases=["rr", "russian"])
async def russianroulette(self, ctx, type):
"""Type = start or join"""
#Your code will go here
user = ctx.message.author
bank = self.bot.get_cog("Economy").bank
if type.lower() == "start":
if self.json_data["System"]["Status"] == "Stopped":
await self.bot.say("Bet")
betAmount()
else:
await self.bot.say("Start")
elif type.lower() == "join":
await self.bot.say("Join")
else:
await self.bot.say(user.mention + " This command only accepts 'start' or 'join'")
def betAmount():
bet = await self.bot.wait_for_message(timeout=60, author=user, check=check)
def check_folders():
if not os.path.exists("data/russianroulette"):
print("Creating data/russianroulette floder...")
os.makedirs("data/russianroulette")
def check_files():
system = {"System": {"Pot": 0,
"Active": False,
"Bet": 0,
"Roulette Initial": False,
"Status": "Stopped",
"Player Count": 0},
"Players": {},
"Config": {"Min Bet": 10}}
f = "data/russianroulette/russianroulette.json"
if not dataIO.is_valid_json(f):
print("Creating defualt russianroulette.json...")
dataIO.save_json(f, system)
def setup(bot):
check_folders()
check_files()
n = Russianroulette(bot)
bot.add_cog(n)
| import discord
from discord.ext import commands
import os
from .utils.dataIO import dataIO
import time
import asyncio
class Russianroulette:
"""Russian Roulette"""
def __init__(self, bot):
self.bot = bot
self.file_path = "data/russianroulette/russianroulette.json"
self.json_data = dataIO.load_json(self.file_path)
def betAmount():
bet = await self.bot.wait_for_message(timeout=60, author=user, check=check)
@commands.command(pass_context=True, aliases=["rr", "russian"])
async def russianroulette(self, ctx, type):
"""Type = start or join"""
#Your code will go here
user = ctx.message.author
bank = self.bot.get_cog("Economy").bank
if type.lower() == "start":
if self.json_data["System"]["Status"] == "Stopped":
await self.bot.say("Bet")
betAmount()
else:
await self.bot.say("Start")
elif type.lower() == "join":
await self.bot.say("Join")
else:
await self.bot.say(user.mention + " This command only accepts 'start' or 'join'")
def check_folders():
if not os.path.exists("data/russianroulette"):
print("Creating data/russianroulette floder...")
os.makedirs("data/russianroulette")
def check_files():
system = {"System": {"Pot": 0,
"Active": False,
"Bet": 0,
"Roulette Initial": False,
"Status": "Stopped",
"Player Count": 0},
"Players": {},
"Config": {"Min Bet": 10}}
f = "data/russianroulette/russianroulette.json"
if not dataIO.is_valid_json(f):
print("Creating defualt russianroulette.json...")
dataIO.save_json(f, system)
def setup(bot):
check_folders()
check_files()
n = Russianroulette(bot)
bot.add_cog(n)
| mit | Python |
86b3e67c507cbc0360a93776b61b177dd201e48e | Read JSOn data attempt 2 | kallerdaller/Cogs-Yorkfield | russianroulette/russianroulette.py | russianroulette/russianroulette.py | import discord
from discord.ext import commands
import os
from .utils.dataIO import dataIO
import time
import asyncio
class Russianroulette:
"""Russian Roulette"""
def __init__(self, bot):
self.bot = bot
self.file_path = "data/russianroulette/russianroulette.json"
self.json_data = dataIO.load_json(self.file_path)
@commands.command(pass_context=True, aliases=["rr", "russian"])
async def russianroulette(self, ctx, type):
"""Type = start or join"""
#Your code will go here
user = ctx.message.author
bank = self.bot.get_cog("Economy").bank
if type.lower() == "start":
if self.json_data.status == "Stopped":
await self.bot.say("Bet")
else:
await self.bot.say("Start")
elif type.lower() == "join":
await self.bot.say("Join")
else:
await self.bot.say(user.mention + " This command only accepts 'start' or 'join'")
def check_folders():
if not os.path.exists("data/russianroulette"):
print("Creating data/russianroulette floder...")
os.makedirs("data/russianroulette")
def check_files():
system = {"System": {"Pot": 0,
"Active": False,
"Start Bet": 0,
"Roulette Initial": False,
"Player Count": 0},
"Players": {},
"Status": "Stopped",
"Config": {"Min Bet": 10}}
f = "data/russianroulette/russianroulette.json"
if not dataIO.is_valid_json(f):
print("Creating defualt russianroulette.json...")
dataIO.save_json(f, system)
def setup(bot):
check_folders()
check_files()
n = Russianroulette(bot)
bot.add_cog(n)
| import discord
from discord.ext import commands
import os
from .utils.dataIO import dataIO
import time
import asyncio
class Russianroulette:
"""Russian Roulette"""
def __init__(self, bot):
self.bot = bot
self.file_path = "data/russianroulette/russianroulette.json"
self.json_data = dataIO.load_json(self.file_path)
@commands.command(pass_context=True, aliases=["rr", "russian"])
async def russianroulette(self, ctx, type):
"""Type = start or join"""
#Your code will go here
user = ctx.message.author
bank = self.bot.get_cog("Economy").bank
if type.lower() == "start":
if json_data.status == "Stopped":
await self.bot.say("Bet")
else:
await self.bot.say("Start")
elif type.lower() == "join":
await self.bot.say("Join")
else:
await self.bot.say(user.mention + " This command only accepts 'start' or 'join'")
def check_folders():
if not os.path.exists("data/russianroulette"):
print("Creating data/russianroulette floder...")
os.makedirs("data/russianroulette")
def check_files():
system = {"System": {"Pot": 0,
"Active": False,
"Start Bet": 0,
"Roulette Initial": False,
"Player Count": 0},
"Players": {},
"Status": "Stopped",
"Config": {"Min Bet": 10}}
f = "data/russianroulette/russianroulette.json"
if not dataIO.is_valid_json(f):
print("Creating defualt russianroulette.json...")
dataIO.save_json(f, system)
def setup(bot):
check_folders()
check_files()
n = Russianroulette(bot)
bot.add_cog(n)
| mit | Python |
7df447bf561ca5148c9397d1621f81f4de6da5ad | Remove hello command | novafloss/populous | populous/cli.py | populous/cli.py | import click
@click.group()
@click.version_option()
def cli():
pass
| import click
@click.group()
@click.version_option()
def cli():
pass
@cli.command()
def hello():
click.echo("Hello World!")
| mit | Python |
b28e67a541e39b687a6484ea71a40efdbb4ebbf3 | Bump to 0.9.5 | czpython/aldryn-newsblog,czpython/aldryn-newsblog,czpython/aldryn-newsblog,czpython/aldryn-newsblog | aldryn_newsblog/__init__.py | aldryn_newsblog/__init__.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
__version__ = '0.9.5'
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
__version__ = '0.9.4.1'
| bsd-3-clause | Python |
848530a53838a6ce47b0023670444e6773bf3e27 | Make the transaction admin a bit more usable | gregplaysguitar/django-dps,takeflight/django-dps | dps/admin.py | dps/admin.py | from dps.models import Transaction
from django.contrib import admin
from django.contrib.contenttypes import generic
class TransactionAdmin(admin.ModelAdmin):
list_display = ('amount', 'status', 'transaction_type', 'content_object',
'created',)
search_fields = ('secret', )
class TransactionInlineAdmin(generic.GenericTabularInline):
model = Transaction
def has_add_permission(self, request):
return False
admin.site.register(Transaction, TransactionAdmin)
| from dps.models import Transaction
from django.contrib import admin
from django.contrib.contenttypes import generic
class TransactionAdmin(admin.ModelAdmin):
pass
class TransactionInlineAdmin(generic.GenericTabularInline):
model = Transaction
def has_add_permission(self, request):
return False
admin.site.register(Transaction, TransactionAdmin)
| bsd-2-clause | Python |
bab5a9c6e6d1ceead96e33b9a48618c29c14ed3e | Remove yocto forward in status | amm0nite/unicornclient,amm0nite/unicornclient | unicornclient/routines/status.py | unicornclient/routines/status.py | from unicornclient import spy
from unicornclient import routine
from unicornclient import message
class Routine(routine.Routine):
def __init__(self):
routine.Routine.__init__(self)
def process(self, data):
self.send_status()
def send_status(self):
status = self.get_status()
payload = {
'type':'status',
'status': status
}
self.manager.send(message.Message(payload))
def get_status(self):
status = {
'serial' : spy.get_serial(),
'machine_id': spy.get_machine_id(),
'hostname': spy.get_hostname(),
'kernel': spy.get_kernel(),
'uptime': spy.get_uptime(),
'local_ip': spy.get_local_ip(),
'addresses': spy.get_macs(),
'temp': spy.get_temp(),
'ssid': spy.get_ssid(),
'signal_level': spy.get_signal_level(),
'written_kbytes': spy.get_written_kbytes()
}
return status
| from unicornclient import spy
from unicornclient import routine
from unicornclient import message
class Routine(routine.Routine):
def __init__(self):
routine.Routine.__init__(self)
def process(self, data):
self.send_status()
def send_status(self):
status = self.get_status()
payload = {
'type':'status',
'status': status
}
self.manager.forward('yocto', {})
self.manager.send(message.Message(payload))
def get_status(self):
status = {
'serial' : spy.get_serial(),
'machine_id': spy.get_machine_id(),
'hostname': spy.get_hostname(),
'kernel': spy.get_kernel(),
'uptime': spy.get_uptime(),
'local_ip': spy.get_local_ip(),
'addresses': spy.get_macs(),
'temp': spy.get_temp(),
'ssid': spy.get_ssid(),
'signal_level': spy.get_signal_level(),
'written_kbytes': spy.get_written_kbytes()
}
return status
| mit | Python |
b3ccb1d6a8525f2fe699c07e1d7c20bf10974522 | Remove unused var | DemocracyClub/EveryElection,DemocracyClub/EveryElection,DemocracyClub/EveryElection | every_election/apps/elections/query_helpers.py | every_election/apps/elections/query_helpers.py | import requests
from django.contrib.gis.geos import Point
class PostcodeError(Exception):
pass
class BasePostcodeLookup:
def __init__(self, postcode):
self.postcode = postcode.replace(' ', '')
@property
def point(self):
raise NotImplementedError
class MaPitPostcodeLookup(BasePostcodeLookup):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fetch_from_mapit()
def fetch_from_mapit(self):
if hasattr(self, 'mapit_data'):
return self.mapit_data
req = requests.get("https://mapit.mysociety.org/postcode/{}".format(
self.postcode
))
self.mapit_data = req.json()
return self.mapit_data
@property
def point(self):
return Point(
self.mapit_data['wgs84_lon'],
self.mapit_data['wgs84_lat']
)
class ONSPDStaticJsonLookup(BasePostcodeLookup):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.fetch()
except:
raise PostcodeError()
def fetch(self):
if hasattr(self, 'data'):
return self.data
url_fmt = "https://s3-eu-west-1.amazonaws.com/onspd-static-json/{}"
req = requests.get(url_fmt.format(self.postcode))
if req.status_code != 200:
raise PostcodeError
self.data = req.json()
return self.data
@property
def point(self):
return Point(
self.data['wgs84_lon'],
self.data['wgs84_lat']
)
def get_point_from_postcode(postcode):
methods = [
ONSPDStaticJsonLookup,
MaPitPostcodeLookup,
]
for method in methods:
try:
return method(postcode).point
except PostcodeError:
continue
raise PostcodeError
| import requests
from django.contrib.gis.geos import Point
class PostcodeError(Exception):
pass
class BasePostcodeLookup:
def __init__(self, postcode):
self.postcode = postcode.replace(' ', '')
@property
def point(self):
raise NotImplementedError
class MaPitPostcodeLookup(BasePostcodeLookup):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fetch_from_mapit()
def fetch_from_mapit(self):
if hasattr(self, 'mapit_data'):
return self.mapit_data
req = requests.get("https://mapit.mysociety.org/postcode/{}".format(
self.postcode
))
self.mapit_data = req.json()
return self.mapit_data
@property
def point(self):
return Point(
self.mapit_data['wgs84_lon'],
self.mapit_data['wgs84_lat']
)
class ONSPDStaticJsonLookup(BasePostcodeLookup):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.fetch()
except:
raise PostcodeError()
def fetch(self):
if hasattr(self, 'data'):
return self.data
url_fmt = "https://s3-eu-west-1.amazonaws.com/onspd-static-json/{}"
req = requests.get(url_fmt.format(self.postcode))
if req.status_code != 200:
raise PostcodeError
self.data = req.json()
return self.data
@property
def point(self):
return Point(
self.data['wgs84_lon'],
self.data['wgs84_lat']
)
def get_point_from_postcode(postcode):
point = None
methods = [
ONSPDStaticJsonLookup,
MaPitPostcodeLookup,
]
for method in methods:
try:
return method(postcode).point
except PostcodeError:
continue
raise PostcodeError
| bsd-3-clause | Python |
69102f560eced438838f0377fe351adc7b542c1e | Change version. | Kami/python-yubico-client | yubico/__init__.py | yubico/__init__.py | __version__ = (1, 6, 3)
| __version__ = (1, 6, 3, 'dev')
| bsd-3-clause | Python |
9de9c50ee2c166efd42f88a730973c7ebd9b108b | add csv | RaphaelLapierre/INF4215 | TP3/RiskAI/Main.py | TP3/RiskAI/Main.py | import signal
from Controller import *
from RandomAI import *
from CarreRougeAi import CarreRougeAI
import csv
finish = False
def signal_handler(signal, frame):
global finish
print "Arret demande"
finish = True
signal.signal(signal.SIGINT, signal_handler)
ai1 = RandomAI() # agent adverse sans apprentissage machine
ai2 = CarreRougeAI() # agent adverse aleatoire
nbWinAI2 = 0
winRate = []
lastTenWin = 0
for i in xrange(10000):
if finish:
break
controller = Controller("Americas", "Normal", "Random", ai1, ai2)
winningPlayerIndex = controller.play()
if winningPlayerIndex == 1:
nbWinAI2 += 1
lastTenWin += 1
if i%10 == 0:
winRate.append(lastTenWin/10.0)
ai2.save()
writer = csv.writer(open("data.csv", 'wb'), delimiter=';')
writer.writerow(winRate)
print "Nb win of ai2 : ", nbWinAI2
| import signal
from Controller import *
from RandomAI import *
from CarreRougeAi import CarreRougeAI
finish = False
def signal_handler(signal, frame):
global finish
print "Arret demande"
finish = True
signal.signal(signal.SIGINT, signal_handler)
ai1 = RandomAI() # agent adverse sans apprentissage machine
ai2 = CarreRougeAI() # agent adverse aleatoire
nbWinAI2 = 0
for i in xrange(100):
if finish:
break
controller = Controller("Americas", "Normal", "Random", ai1, ai2)
winningPlayerIndex = controller.play()
if winningPlayerIndex == 1:
nbWinAI2 += 1
ai2.save()
print "Nb win of ai2 : ", nbWinAI2
| mit | Python |
47671cfcf7900d66fc320c24772f73888b777a95 | update to remove old whitenoise setup | bcgov/gwells,SethGreylyn/gwells,rstens/gwells,SethGreylyn/gwells,rstens/gwells,bcgov/gwells,rstens/gwells,SethGreylyn/gwells,rstens/gwells,bcgov/gwells,SethGreylyn/gwells,bcgov/gwells | project/wsgi.py | project/wsgi.py | """
WSGI config for project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
application = get_wsgi_application()
| """
WSGI config for project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from whitenoise.django import DjangoWhiteNoise
application = DjangoWhiteNoise(get_wsgi_application())
| apache-2.0 | Python |
554572f327e4b9c920f65b416bfc6a3a5b549846 | Reset the num threads to the env variable, not the default | numba/numba,stonebig/numba,cpcloud/numba,gmarkall/numba,sklam/numba,stuartarchibald/numba,seibert/numba,seibert/numba,cpcloud/numba,numba/numba,sklam/numba,IntelLabs/numba,stuartarchibald/numba,IntelLabs/numba,IntelLabs/numba,gmarkall/numba,cpcloud/numba,seibert/numba,stuartarchibald/numba,numba/numba,sklam/numba,seibert/numba,gmarkall/numba,seibert/numba,stuartarchibald/numba,cpcloud/numba,numba/numba,gmarkall/numba,cpcloud/numba,IntelLabs/numba,numba/numba,stonebig/numba,stonebig/numba,stuartarchibald/numba,stonebig/numba,IntelLabs/numba,sklam/numba,stonebig/numba,gmarkall/numba,sklam/numba | numba/tests/npyufunc/test_parallel_env_variable.py | numba/tests/npyufunc/test_parallel_env_variable.py | from numba.np.ufunc.parallel import get_thread_count
from os import environ as env
from numba.core import config
import unittest
class TestParallelEnvVariable(unittest.TestCase):
"""
Tests environment variables related to the underlying "parallel"
functions for npyufuncs.
"""
_numba_parallel_test_ = False
def test_num_threads_variable(self):
"""
Tests the NUMBA_NUM_THREADS env variable behaves as expected.
"""
key = 'NUMBA_NUM_THREADS'
current = str(getattr(env, key, config.NUMBA_NUM_THREADS))
threads = "3154"
env[key] = threads
try:
config.reload_config()
except RuntimeError as e:
# This test should fail if threads have already been launched
self.assertIn("Cannot set NUMBA_NUM_THREADS", e.args[0])
else:
self.assertEqual(threads, str(get_thread_count()))
self.assertEqual(threads, str(config.NUMBA_NUM_THREADS))
finally:
# reset the env variable/set to default. Should not fail even if
# threads are launched because the value is the same.
env[key] = current
config.reload_config()
if __name__ == '__main__':
unittest.main()
| from numba.np.ufunc.parallel import get_thread_count
from os import environ as env
from numba.core import config
import unittest
class TestParallelEnvVariable(unittest.TestCase):
"""
Tests environment variables related to the underlying "parallel"
functions for npyufuncs.
"""
_numba_parallel_test_ = False
def test_num_threads_variable(self):
"""
Tests the NUMBA_NUM_THREADS env variable behaves as expected.
"""
key = 'NUMBA_NUM_THREADS'
current = str(getattr(env, key, config.NUMBA_DEFAULT_NUM_THREADS))
threads = "3154"
env[key] = threads
try:
config.reload_config()
except RuntimeError as e:
# This test should fail if threads have already been launched
self.assertIn("Cannot set NUMBA_NUM_THREADS", e.args[0])
else:
self.assertEqual(threads, str(get_thread_count()))
self.assertEqual(threads, str(config.NUMBA_NUM_THREADS))
finally:
# reset the env variable/set to default. Should not fail even if
# threads are launched because the value is the same.
env[key] = current
config.reload_config()
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python |
4ebc539f77f7b0dc0d32b79edb34c651cf5e5b97 | Update index path | WesleyyC/DT-In-The-House | generate-from-model.py | generate-from-model.py | import sys
import numpy
import pickle
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from scipy.misc import logsumexp
# create mapping of unique chars to integers, and a reverse mapping
char_to_int = pickle.load(open("index/char_to_int.json", "r"))
int_to_char = pickle.load(open("index/int_to_char.json", "r"))
seq_length = 20
# define the LSTM model
model = Sequential()
model.add(LSTM(512, input_shape=(seq_length, 1), return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(512))
model.add(Dropout(0.5))
model.add(Dense(len(char_to_int), activation='softmax'))
# load the network weights
filename = "model-DT.hdf5"
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# helper softmax
def log_softmax(vec):
return vec - logsumexp(vec)
def softmax(vec):
return numpy.exp(log_softmax(vec))
# helper for sampling
def sample_i(a, temp=1.0):
a = numpy.log(a) / temp
a = softmax(a)
a /= (1 + 1e-5)
return numpy.argmax(numpy.random.multinomial(1,a,1))
# pick a random seed
seed_text = "i have the best temperament"
seed_text = seed_text[0:seq_length]
pattern = [char_to_int[char] for char in seed_text]
result = ""
# generate characters
for i in range(5000):
x = numpy.reshape(pattern, (1, len(pattern), 1))
x = x / float(len(char_to_int))
prediction = model.predict(x, verbose=0)[0]
index = sample_i(prediction, 0.5)
result += int_to_char[index]
pattern.append(index)
pattern = pattern[1:len(pattern)]
print result[result.find('.')+1:result.rfind('.')+1]
| import sys
import numpy
import pickle
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from scipy.misc import logsumexp
# create mapping of unique chars to integers, and a reverse mapping
char_to_int = pickle.load(open("char_indic.json", "r"))
int_to_char = pickle.load(open("indic_char.json", "r"))
seq_length = 20
# define the LSTM model
model = Sequential()
model.add(LSTM(512, input_shape=(seq_length, 1), return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(512))
model.add(Dropout(0.5))
model.add(Dense(len(char_to_int), activation='softmax'))
# load the network weights
filename = "model-DT.hdf5"
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# helper softmax
def log_softmax(vec):
return vec - logsumexp(vec)
def softmax(vec):
return numpy.exp(log_softmax(vec))
# helper for sampling
def sample_i(a, temp=1.0):
a = numpy.log(a) / temp
a = softmax(a)
a /= (1 + 1e-5)
return numpy.argmax(numpy.random.multinomial(1,a,1))
# pick a random seed
seed_text = "i have the best temperament"
seed_text = seed_text[0:seq_length]
pattern = [char_to_int[char] for char in seed_text]
result = ""
# generate characters
for i in range(5000):
x = numpy.reshape(pattern, (1, len(pattern), 1))
x = x / float(len(char_to_int))
prediction = model.predict(x, verbose=0)[0]
index = sample_i(prediction, 0.5)
result += int_to_char[index]
pattern.append(index)
pattern = pattern[1:len(pattern)]
print result[result.find('.')+1:result.rfind('.')+1]
| mit | Python |
a425898865166813c8d64311ec10b23ac8625882 | add validation for validating than file is ignored file | faycheng/tpl,faycheng/tpl | tpl/tpl.py | tpl/tpl.py | # -*- coding:utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import jinja2
from tpl import path
from tpl import errors
class Template(object):
IGNORE_FILES = [
'construct.sh',
'construct.py'
]
def __init__(self, tpl_dir):
self.tpl_dir = tpl_dir
def is_ignored_file(self, file):
file_name = file.split('/')[-1]
if file_name in self.IGNORE_FILES:
return True
return False
def render_file(self, file):
pass
def render_dir(self, dir):
pass
def render(self, context):
pass
| # -*- coding:utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import jinja2
from tpl import path
from tpl import errors
class Template(object):
IGNORE_FILES = [
'construct.sh',
'construct.py'
]
def __init__(self, tpl_dir):
self.tpl_dir = tpl_dir
def is_ignored_file(self, file):
pass
def render_file(self, file):
pass
def render_dir(self, dir):
pass
def render(self, context):
pass
| mit | Python |
cb52303c74a76bfaf8f3017b9be78f3620b00483 | Add pragma: no cover to migration data unload | OpenVolunteeringPlatform/django-ovp-core,OpenVolunteeringPlatform/django-ovp-core | ovp_core/migrations/0004_load_skills_and_causes.py | ovp_core/migrations/0004_load_skills_and_causes.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-25 01:14
from __future__ import unicode_literals
from django.db import migrations
skills = ['Arts/Handcrafting', 'Communication', 'Dance/Music', 'Law', 'Education', 'Sports', 'Cooking', 'Management', 'Idioms', 'Computers/Technology', 'Health', 'Others']
causes = ['Professional Training', 'Fight Poverty', 'Conscious consumption', 'Culture, Sport and Art', 'Human Rights', 'Education', 'Youth', 'Elders', 'Environment', 'Citizen Participation', 'Animal Protection', 'Health', 'People with disabilities']
def load_data(apps, schema_editor):
Skill = apps.get_model("ovp_core", "Skill")
Cause = apps.get_model("ovp_core", "Cause")
for skill in skills:
s = Skill(name=skill)
s.save()
for cause in causes:
c = Cause(name=cause)
c.save()
def unload_data(apps, schema_editor): #pragma: no cover
Skill = apps.get_model("ovp_core", "Skill")
Cause = apps.get_model("ovp_core", "Cause")
for skill in skills:
s = Skill.objects.filter(name=skill)
s.delete()
for cause in causes:
c = Cause.objects.filter(name=cause)
c.delete()
class Migration(migrations.Migration):
dependencies = [
('ovp_core', '0003_cause_skill'),
]
operations = [
migrations.RunPython(load_data, reverse_code=unload_data)
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-25 01:14
from __future__ import unicode_literals
from django.db import migrations
skills = ['Arts/Handcrafting', 'Communication', 'Dance/Music', 'Law', 'Education', 'Sports', 'Cooking', 'Management', 'Idioms', 'Computers/Technology', 'Health', 'Others']
causes = ['Professional Training', 'Fight Poverty', 'Conscious consumption', 'Culture, Sport and Art', 'Human Rights', 'Education', 'Youth', 'Elders', 'Environment', 'Citizen Participation', 'Animal Protection', 'Health', 'People with disabilities']
def load_data(apps, schema_editor):
Skill = apps.get_model("ovp_core", "Skill")
Cause = apps.get_model("ovp_core", "Cause")
for skill in skills:
s = Skill(name=skill)
s.save()
for cause in causes:
c = Cause(name=cause)
c.save()
def unload_data(apps, schema_editor):
Skill = apps.get_model("ovp_core", "Skill")
Cause = apps.get_model("ovp_core", "Cause")
for skill in skills:
s = Skill.objects.filter(name=skill)
s.delete()
for cause in causes:
c = Cause.objects.filter(name=cause)
c.delete()
class Migration(migrations.Migration):
dependencies = [
('ovp_core', '0003_cause_skill'),
]
operations = [
migrations.RunPython(load_data, reverse_code=unload_data)
]
| agpl-3.0 | Python |
35a0a42b4311828ded6d4d614cc50d4da6a60b8c | Use context with file | lozadaOmr/ansible-admin,lozadaOmr/ansible-admin,lozadaOmr/ansible-admin | src/utils/playbook.py | src/utils/playbook.py | from django.conf import settings
from ansible.models import Playbook
import os
def content_loader(pk, slug):
playbook = Playbook.query_set.get(pk=pk)
playbook_dir = playbook.directory
# TODO: for now assume without validation
playbook_file = os.path.join(playbook_dir, slug + '.yml')
with open(playbook_file, 'r') as f:
content = f.read()
return content
| from django.conf import settings
from ansible.models import Playbook
import os
def content_loader(pk, slug):
playbook = Playbook.query_set.get(pk=pk)
playbook_dir = playbook.directory
# TODO: for now assume without validation
playbook_file = os.path.join(playbook_dir, slug + '.yml')
return playbook_file
| bsd-3-clause | Python |
3ae6787d1e5fdfc746f1ec92409a75b397d702e9 | Bump version to 2.1.1-dev | pferreir/indico,ThiefMaster/indico,indico/indico,mic4ael/indico,mvidalgarcia/indico,mic4ael/indico,mvidalgarcia/indico,mic4ael/indico,pferreir/indico,DirkHoffmann/indico,pferreir/indico,DirkHoffmann/indico,OmeGak/indico,OmeGak/indico,DirkHoffmann/indico,mvidalgarcia/indico,ThiefMaster/indico,indico/indico,mvidalgarcia/indico,ThiefMaster/indico,pferreir/indico,mic4ael/indico,indico/indico,DirkHoffmann/indico,OmeGak/indico,ThiefMaster/indico,OmeGak/indico,indico/indico | indico/__init__.py | indico/__init__.py | # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
try:
_patch_wtforms_sqlalchemy()
except ImportError as exc:
# pip seems to run this sometimes while uninstalling an old sqlalchemy version
print 'Could not monkeypatch wtforms', exc
finally:
del _patch_wtforms_sqlalchemy
| # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
try:
_patch_wtforms_sqlalchemy()
except ImportError as exc:
# pip seems to run this sometimes while uninstalling an old sqlalchemy version
print 'Could not monkeypatch wtforms', exc
finally:
del _patch_wtforms_sqlalchemy
| mit | Python |
f57fc2abd861e7eb9f1ce698c05b87b66cd0e408 | Update UploadedTo.py | pyblub/pyload,vuolter/pyload,vuolter/pyload,pyblub/pyload,vuolter/pyload | module/plugins/accounts/UploadedTo.py | module/plugins/accounts/UploadedTo.py | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mkaay
"""
from module.plugins.Account import Account
import re
from time import time
class UploadedTo(Account):
__name__ = "UploadedTo"
__version__ = "0.24"
__type__ = "account"
__description__ = """ul.net account plugin"""
__author_name__ = ("mkaay")
__author_mail__ = ("mkaay@mkaay.de")
def loadAccountInfo(self, user, req):
req.load("http://uploaded.net/language/en")
html = req.load("http://uploaded.net/me")
premium = '<a href="register"><em>Premium</em>' in html or '<em>Premium</em></th>' in html
if premium:
raw_traffic = re.search(r'<th colspan="2"><b class="cB">([^<]+)', html).group(1)
raw_valid = re.search(r"<td>Duration:</td>\s*<th>([^<]+)", html, re.MULTILINE).group(1).strip()
traffic = int(self.parseTraffic(raw_traffic))
if raw_valid == "unlimited":
validuntil = -1
else:
raw_valid = re.findall(r"(\d+) (Week|weeks|days|hours)", raw_valid)
validuntil = time()
for n, u in raw_valid:
validuntil += 3600 * int(n) * {"Week": 168, "weeks": 168, "days": 24, "hours": 1}[u]
return {"validuntil":validuntil, "trafficleft":traffic, "maxtraffic":50*1024*1024}
else:
return {"premium" : False, "validuntil" : -1}
def login(self, user, data, req):
req.load("http://uploaded.net/language/en")
req.cj.setCookie("uploaded.net", "lang", "en")
page = req.load("http://uploaded.net/io/login", post={ "id" : user, "pw" : data["password"], "_" : ""})
if "User and password do not match!" in page:
self.wrongPassword()
| # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mkaay
"""
from module.plugins.Account import Account
import re
from time import time
class UploadedTo(Account):
__name__ = "UploadedTo"
__version__ = "0.23"
__type__ = "account"
__description__ = """ul.net account plugin"""
__author_name__ = ("mkaay")
__author_mail__ = ("mkaay@mkaay.de")
def loadAccountInfo(self, user, req):
req.load("http://uploaded.net/language/en")
html = req.load("http://uploaded.net/me")
premium = '<a href="register"><em>Premium</em>' in html or '<em>Premium</em></th>' in html
if premium:
raw_traffic = re.search(r'<th colspan="2"><b class="cB">([^<]+)', html).group(1)
raw_valid = re.search(r"<td>Duration:</td>\s*<th>([^<]+)", html, re.MULTILINE).group(1).strip()
traffic = int(self.parseTraffic(raw_traffic))
if raw_valid == "unlimited":
validuntil = -1
else:
raw_valid = re.findall(r"(\d+) (weeks|days|hours)", raw_valid)
validuntil = time()
for n, u in raw_valid:
validuntil += 3600 * int(n) * {"weeks": 168, "days": 24, "hours": 1}[u]
return {"validuntil":validuntil, "trafficleft":traffic, "maxtraffic":50*1024*1024}
else:
return {"premium" : False, "validuntil" : -1}
def login(self, user, data, req):
req.load("http://uploaded.net/language/en")
req.cj.setCookie("uploaded.net", "lang", "en")
page = req.load("http://uploaded.net/io/login", post={ "id" : user, "pw" : data["password"], "_" : ""})
if "User and password do not match!" in page:
self.wrongPassword()
| agpl-3.0 | Python |
4b5dd61607c9692bb330f89545d5f76d7a1ed221 | Fix linkage in the RSS feed | jomoore/threepins,jomoore/threepins,jomoore/threepins | puzzle/feeds.py | puzzle/feeds.py | """
Generate an RSS feed of published crosswords from staff users.
Uses the built-in feed framework. There's no attempt to send the actual
crossword, it's just a message indicating that a new one is available.
"""
from django.contrib.syndication.views import Feed
from django.urls import reverse
from django.utils import timezone
from puzzle.models import Puzzle
class PuzzleFeed(Feed):
"""RSS feed of new puzzles from the staff."""
#pylint: disable=no-self-use,missing-docstring
title = 'Three Pins'
link = 'http://www.threepins.org'
description = 'A cryptic crossword outlet.'
def items(self):
return Puzzle.objects.filter(user__is_staff=True,
pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
def item_title(self, item):
return 'Crossword #' + str(item.number)
def item_description(self, item):
return 'Crossword #' + str(item.number) + ' is now available.'
def item_link(self, item):
return reverse('puzzle', kwargs={'author': item.user.username, 'number': item.number})
def item_pubdate(self, item):
return item.pub_date
| """
Generate an RSS feed of published crosswords from staff users.
Uses the built-in feed framework. There's no attempt to send the actual
crossword, it's just a message indicating that a new one is available.
"""
from django.contrib.syndication.views import Feed
from django.urls import reverse
from django.utils import timezone
from puzzle.models import Puzzle
class PuzzleFeed(Feed):
"""RSS feed of new puzzles from the staff."""
#pylint: disable=no-self-use,missing-docstring
title = 'Three Pins'
link = 'http://www.threepins.org'
description = 'A cryptic crossword outlet.'
def items(self):
return Puzzle.objects.filter(user__is_staff=True,
pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
def item_title(self, item):
return 'Crossword #' + str(item.number)
def item_description(self, item):
return 'Crossword #' + str(item.number) + ' is now available.'
def item_link(self, item):
return reverse('puzzle', args=[item.number])
def item_pubdate(self, item):
return item.pub_date
| mit | Python |
8f3b57fade88a596aa5fec95342e074c00367997 | remove test code | Willyham/tchannel-python,Willyham/tchannel-python,uber/tchannel-python,uber/tchannel-python | tchannel/messages/call_request.py | tchannel/messages/call_request.py | from __future__ import absolute_import
from .base import BaseMessage
from .types import Types
from .. import rw
from . import common
class CallRequestMessage(BaseMessage):
"""Initiate an RPC call."""
message_type = Types.CALL_REQ
__slots__ = (
'flags',
'ttl',
'tracing',
'service',
'headers',
'checksum',
'arg_1',
'arg_2',
'arg_3',
)
def __init__(
self,
flags=0,
ttl=10,
tracing=None,
service=None,
headers=None,
checksum=None,
arg_1=None,
arg_2=None,
arg_3=None,
):
self.flags = flags
self.ttl = ttl
self.tracing = tracing or common.Tracing(0, 0, 0, 0)
self.service = service or ''
self.headers = dict(headers) if headers else {}
if checksum is not None:
checksum = common.ChecksumType.standardize(checksum)
self.checksum = checksum or \
(common.ChecksumType.none, None)
self.arg_1 = arg_1 or ''
self.arg_2 = arg_2 or ''
self.arg_3 = arg_3 or ''
call_req_rw = rw.instance(
CallRequestMessage,
("flags", rw.number(1)), # flags:1
("ttl", rw.number(4)), # ttl:4
("tracing", common.tracing_rw), # tracing:24
# traceflags: 1
("service", rw.len_prefixed_string(rw.number(1))), # service~1
("headers", rw.headers( # nh:1 (hk~1 hv~1){nh}
rw.number(1),
rw.len_prefixed_string(rw.number(1))
)),
("checksum", common.checksum_rw), # csumtype:1 (csum:4){0, 1}
("arg_1", rw.len_prefixed_string(rw.number(2), is_binary=True)), # arg1~2
("arg_2", rw.len_prefixed_string(rw.number(2), is_binary=True)), # arg2~2
("arg_3", rw.len_prefixed_string(rw.number(2), is_binary=True)), # arg3~2
)
| from __future__ import absolute_import
from .base import BaseMessage
from .types import Types
from .. import rw
from . import common
class CallRequestMessage(BaseMessage):
"""Initiate an RPC call."""
message_type = Types.CALL_REQ
__slots__ = (
'flags',
'ttl',
'tracing',
'service',
'headers',
'checksum',
'arg_1',
'arg_2',
'arg_3',
)
def __init__(
self,
flags=0,
ttl=10,
tracing=None,
service=None,
headers=None,
checksum=None,
arg_1=None,
arg_2=None,
arg_3=None,
):
self.flags = flags
self.ttl = ttl
self.tracing = tracing or common.Tracing(0, 0, 0, 0)
self.service = service or ''
self.headers = dict(headers) if headers else {"as":"http"}
if checksum is not None:
checksum = common.ChecksumType.standardize(checksum)
self.checksum = checksum or \
(common.ChecksumType.none, None)
self.arg_1 = arg_1 or ''
self.arg_2 = arg_2 or ''
self.arg_3 = arg_3 or ''
call_req_rw = rw.instance(
CallRequestMessage,
("flags", rw.number(1)), # flags:1
("ttl", rw.number(4)), # ttl:4
("tracing", common.tracing_rw), # tracing:24
# traceflags: 1
("service", rw.len_prefixed_string(rw.number(1))), # service~1
("headers", rw.headers( # nh:1 (hk~1 hv~1){nh}
rw.number(1),
rw.len_prefixed_string(rw.number(1))
)),
("checksum", common.checksum_rw), # csumtype:1 (csum:4){0, 1}
("arg_1", rw.len_prefixed_string(rw.number(2), is_binary=True)), # arg1~2
("arg_2", rw.len_prefixed_string(rw.number(2), is_binary=True)), # arg2~2
("arg_3", rw.len_prefixed_string(rw.number(2), is_binary=True)), # arg3~2
)
| mit | Python |
be08bbd5249e31345dc42140558a3a3f4720e71d | add coverage sanitizer option (#6171) | skia-dev/oss-fuzz,robertswiecki/oss-fuzz,skia-dev/oss-fuzz,googlefonts/oss-fuzz,googlefonts/oss-fuzz,google/oss-fuzz,googlefonts/oss-fuzz,robertswiecki/oss-fuzz,skia-dev/oss-fuzz,google/oss-fuzz,google/oss-fuzz,robertswiecki/oss-fuzz,skia-dev/oss-fuzz,skia-dev/oss-fuzz,google/oss-fuzz,skia-dev/oss-fuzz,google/oss-fuzz,skia-dev/oss-fuzz,skia-dev/oss-fuzz,robertswiecki/oss-fuzz,robertswiecki/oss-fuzz,googlefonts/oss-fuzz,google/oss-fuzz,google/oss-fuzz,google/oss-fuzz,google/oss-fuzz,googlefonts/oss-fuzz,robertswiecki/oss-fuzz,robertswiecki/oss-fuzz,google/oss-fuzz,googlefonts/oss-fuzz,robertswiecki/oss-fuzz,skia-dev/oss-fuzz,google/oss-fuzz,skia-dev/oss-fuzz,robertswiecki/oss-fuzz,googlefonts/oss-fuzz,robertswiecki/oss-fuzz,skia-dev/oss-fuzz,googlefonts/oss-fuzz,googlefonts/oss-fuzz,robertswiecki/oss-fuzz,googlefonts/oss-fuzz | infra/constants.py | infra/constants.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Constants for OSS-Fuzz."""
DEFAULT_EXTERNAL_BUILD_INTEGRATION_PATH = '.clusterfuzzlite'
DEFAULT_LANGUAGE = 'c++'
DEFAULT_SANITIZER = 'address'
DEFAULT_ARCHITECTURE = 'x86_64'
DEFAULT_ENGINE = 'libfuzzer'
LANGUAGES = [
'c',
'c++',
'go',
'jvm',
'python',
'rust',
'swift',
]
LANGUAGES_WITH_COVERAGE_SUPPORT = ['c', 'c++', 'go', 'jvm', 'rust']
SANITIZERS = [
'address', 'none', 'memory', 'undefined', 'dataflow', 'thread', 'coverage'
]
ARCHITECTURES = ['i386', 'x86_64']
ENGINES = ['libfuzzer', 'afl', 'honggfuzz', 'dataflow', 'none']
| # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Constants for OSS-Fuzz."""
DEFAULT_EXTERNAL_BUILD_INTEGRATION_PATH = '.clusterfuzzlite'
DEFAULT_LANGUAGE = 'c++'
DEFAULT_SANITIZER = 'address'
DEFAULT_ARCHITECTURE = 'x86_64'
DEFAULT_ENGINE = 'libfuzzer'
LANGUAGES = [
'c',
'c++',
'go',
'jvm',
'python',
'rust',
'swift',
]
LANGUAGES_WITH_COVERAGE_SUPPORT = ['c', 'c++', 'go', 'jvm', 'rust']
SANITIZERS = ['address', 'none', 'memory', 'undefined', 'dataflow', 'thread']
ARCHITECTURES = ['i386', 'x86_64']
ENGINES = ['libfuzzer', 'afl', 'honggfuzz', 'dataflow', 'none']
| apache-2.0 | Python |
852349276fcd1aa79ba1a31ab77637db64d9ddae | Mark safe statics | djangoer/django-selectize,djangoer/django-selectize | selectize/templatetags/selectize_tags.py | selectize/templatetags/selectize_tags.py | from django import template
# from django.templatetags.static import static
# see stackoverflow :http://stackoverflow.com/questions/11721818
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def selectize_tags_media(media_type='css',name=''):
"""
Usage:
------
To include css media:
selectize_tags_media 'css' <theme>
To include Selectize Scripts:
selectize_tags_media 'js'
To include Selectize Scripts and Jquery:
selectize_tags_media 'js' 'jquery'
"""
if media_type=='js':
str_script='<script src="{url}"></script>\n'
html=str_script.format(url=static('selectize/selectize.min.js'))
if name=='jquery':
html=str_script.format(url=static('selectize/jquery.min.js'))+html
return mark_safe(html)
if name:name+='.'
fpath='selectize/css/selectize.{name}css'.format(name=name)
return mark_safe('<link rel="stylesheet" href="{url}">'.format(url=static(fpath))) | from django import template
# from django.templatetags.static import static
# see stackoverflow :http://stackoverflow.com/questions/11721818
from django.contrib.staticfiles.templatetags.staticfiles import static
register = template.Library()
@register.simple_tag
def selectize_tags_media(media_type='css',name=''):
"""
Usage:
------
To include css media:
selectize_tags_media 'css' <theme>
To include Selectize Scripts:
selectize_tags_media 'js'
To include Selectize Scripts and Jquery:
selectize_tags_media 'js' 'jquery'
"""
if media_type=='js':
str_script='<script src="{url}"></script>\n'
html=str_script.format(url=static('selectize/selectize.min.js'))
if name=='jquery':
html=str_script.format(url=static('selectize/jquery.min.js'))+html
return html
if name:name+='.'
fpath='selectize/css/selectize.{name}css'.format(name=name)
return '<link rel="stylesheet" href="{url}">'.format(url=static(fpath)) | apache-2.0 | Python |
ee214c1be54af5a1e7ae0892dba8f2569a79828e | Fix logging.conf loading | hammerlab/isovar,hammerlab/isovar | script/isovar-variant-sequences.py | script/isovar-variant-sequences.py | #!/usr/bin/env python
# Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
import logging.config
import pkg_resources
from isovar.cli.variant_sequences import (
make_variant_sequences_arg_parser,
variant_sequences_dataframe_from_args
)
logging.config.fileConfig(pkg_resources.resource_filename('isovar.cli', 'logging.conf'))
logger = logging.getLogger(__name__)
parser = make_variant_sequences_arg_parser(add_sequence_length_arg=True)
parser.add_argument(
"--output",
default="isovar-variant-sequences-results.csv",
help="Name of CSV file which contains predicted sequences")
if __name__ == "__main__":
args = parser.parse_args()
logger.info(args)
df = variant_sequences_dataframe_from_args(args)
logger.info(df)
df.to_csv(args.output)
| #!/usr/bin/env python
# Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
import logging.config
import pkg_resources
from isovar.cli.variant_sequences import (
make_variant_sequences_arg_parser,
variant_sequences_dataframe_from_args
)
logging.config.fileConfig(pkg_resources.resource_filename(__name__, 'logging.conf'))
logger = logging.getLogger(__name__)
parser = make_variant_sequences_arg_parser(add_sequence_length_arg=True)
parser.add_argument(
"--output",
default="isovar-variant-sequences-results.csv",
help="Name of CSV file which contains predicted sequences")
if __name__ == "__main__":
args = parser.parse_args()
logger.info(args)
df = variant_sequences_dataframe_from_args(args)
logger.info(df)
df.to_csv(args.output)
| apache-2.0 | Python |
37a85a8fdac4a08400ff4859a52647a8513dd591 | Allow no collections... | OpenTreeOfLife/peyotl,OpenTreeOfLife/peyotl,mtholder/peyotl,mtholder/peyotl | scripts/concatenate_collections.py | scripts/concatenate_collections.py | #!/usr/bin/env python
from peyotl import concatenate_collections, read_as_json, write_as_json
if __name__ == '__main__':
import argparse
import sys
import os
description = 'Takes a list of collections and writes a collection that is a concatenation of their decisions'
parser = argparse.ArgumentParser(prog='collection_export.py', description=description)
parser.add_argument('--output',
type=str,
required=True,
help='output filepath for collection json')
parser.add_argument('collection',
default=None,
type=str,
nargs="*",
help='filepath for the collections JSON')
args = parser.parse_args(sys.argv[1:])
inp = [read_as_json(i) for i in args.collection]
out = concatenate_collections(inp)
write_as_json(out, args.output)
| #!/usr/bin/env python
from peyotl import concatenate_collections, read_as_json, write_as_json
if __name__ == '__main__':
import argparse
import sys
import os
description = 'Takes a list of collections and writes a collection that is a concatenation of their decisions'
parser = argparse.ArgumentParser(prog='collection_export.py', description=description)
parser.add_argument('--output',
type=str,
required=True,
help='output filepath for collection json')
parser.add_argument('collection',
default=None,
type=str,
nargs="+",
help='filepath for the collections JSON')
args = parser.parse_args(sys.argv[1:])
inp = [read_as_json(i) for i in args.collection]
out = concatenate_collections(inp)
write_as_json(out, args.output)
| bsd-2-clause | Python |
0a7eeffd5632032630ae1c1ed99c105f7700fde4 | fix TypeError url = '\r\n'.join([url]) | zatuper/pywebstepic,ztp99/pyweb,ztp99/pyweb,ztp99/pyweb,zatuper/pywebstepic,zatuper/pywebstepic | etc/hello.py | etc/hello.py |
CONFIG = {
'mode': 'wsgi',
'working_dir': '/path/to/my/app',
'python': '/usr/bin/python',
'args': (
'--bind=127.0.0.1:8080',
'--workers=16',
'--timeout=60',
'app.module',
),
}
def application(env, start_response):
# url = []
start_response('200 OK', [('Content-Type', 'text/plain')])
url = (env['QUERY_STRING'])
url += "&z=0" #z=0 just for test when no url query
url = url.split('&')
datastr=str(url)
datastr=' '.join(map(str, url))
datastr='\n'.join(map(str, data)).replace("'", '').replace("[",'').replace("]",'')
#url = '\r\n'.join([url])
#url = '\r\n'.join([(env['QUERY_STRING'].split("&"))])
return url
|
CONFIG = {
'mode': 'wsgi',
'working_dir': '/path/to/my/app',
'python': '/usr/bin/python',
'args': (
'--bind=127.0.0.1:8080',
'--workers=16',
'--timeout=60',
'app.module',
),
}
def application(env, start_response):
# url = []
start_response('200 OK', [('Content-Type', 'text/plain')])
url = (env['QUERY_STRING'])
url += "&z=0" #z=0 just for test when no url query
url = url.split('&')
url = str(url)
url = '\r\n'.join([url])
#url = '\r\n'.join([(env['QUERY_STRING'].split("&"))])
return url
| apache-2.0 | Python |
86263496d5e541876f4b1f10525b4318a4fa6798 | Remove unnecessary code from treeview | almeidapaulopt/frappe,adityahase/frappe,StrellaGroup/frappe,yashodhank/frappe,StrellaGroup/frappe,almeidapaulopt/frappe,saurabh6790/frappe,frappe/frappe,mhbu50/frappe,yashodhank/frappe,saurabh6790/frappe,mhbu50/frappe,saurabh6790/frappe,mhbu50/frappe,frappe/frappe,frappe/frappe,adityahase/frappe,yashodhank/frappe,almeidapaulopt/frappe,StrellaGroup/frappe,adityahase/frappe,yashodhank/frappe,mhbu50/frappe,adityahase/frappe,saurabh6790/frappe,almeidapaulopt/frappe | frappe/desk/treeview.py | frappe/desk/treeview.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
@frappe.whitelist()
def get_all_nodes(doctype, parent, tree_method, **filters):
'''Recursively gets all data from tree nodes'''
if 'cmd' in filters:
del filters['cmd']
filters.pop('data', None)
tree_method = frappe.get_attr(tree_method)
if not tree_method in frappe.whitelisted:
frappe.throw(_("Not Permitted"), frappe.PermissionError)
data = tree_method(doctype, parent, **filters)
out = [dict(parent=parent, data=data)]
if 'is_root' in filters:
del filters['is_root']
to_check = [d.get('value') for d in data if d.get('expandable')]
while to_check:
parent = to_check.pop()
data = tree_method(doctype, parent, is_root=False, **filters)
out.append(dict(parent=parent, data=data))
for d in data:
if d.get('expandable'):
to_check.append(d.get('value'))
return out
@frappe.whitelist()
def get_children(doctype, parent='', **filters):
parent_field = 'parent_' + doctype.lower().replace(' ', '_')
filters=[['ifnull(`{0}`,"")'.format(parent_field), '=', parent],
['docstatus', '<' ,'2']]
doctype_meta = frappe.get_meta(doctype)
data = frappe.get_list(doctype, fields=[
'name as value',
'{0} as title'.format(doctype_meta.get('title_field') or 'name'),
'is_group as expandable'],
filters=filters,
order_by='name')
return data
@frappe.whitelist()
def add_node():
args = make_tree_args(**frappe.form_dict)
doc = frappe.get_doc(args)
doc.save()
def make_tree_args(**kwarg):
del kwarg['cmd']
doctype = kwarg['doctype']
parent_field = 'parent_' + doctype.lower().replace(' ', '_')
name_field = kwarg.get('name_field', doctype.lower().replace(' ', '_') + '_name')
if kwarg['is_root'] == 'false': kwarg['is_root'] = False
if kwarg['is_root'] == 'true': kwarg['is_root'] = True
kwarg.update({
name_field: kwarg[name_field],
parent_field: kwarg.get("parent") or kwarg.get(parent_field)
})
return frappe._dict(kwarg)
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
@frappe.whitelist()
def get_all_nodes(doctype, parent, tree_method, **filters):
'''Recursively gets all data from tree nodes'''
if 'cmd' in filters:
del filters['cmd']
filters.pop('data', None)
tree_method = frappe.get_attr(tree_method)
if not tree_method in frappe.whitelisted:
frappe.throw(_("Not Permitted"), frappe.PermissionError)
data = tree_method(doctype, parent, **filters)
out = [dict(parent=parent, data=data)]
if 'is_root' in filters:
del filters['is_root']
to_check = [d.get('value') for d in data if d.get('expandable')]
while to_check:
parent = to_check.pop()
data = tree_method(doctype, parent, is_root=False, **filters)
out.append(dict(parent=parent, data=data))
for d in data:
if d.get('expandable'):
to_check.append(d.get('value'))
return out
@frappe.whitelist()
def get_children(doctype, parent='', **filters):
parent_field = 'parent_' + doctype.lower().replace(' ', '_')
filters=[['ifnull(`{0}`,"")'.format(parent_field), '=', parent],
['docstatus', '<' ,'2']]
doctype_meta = frappe.get_meta(doctype)
data = frappe.get_list(doctype, fields=[
'name as value',
'{0} as title'.format(doctype_meta.get('title_field') or 'name'),
'is_group as expandable'],
filters=filters,
order_by='name')
return data
@frappe.whitelist()
def add_node():
args = make_tree_args(**frappe.form_dict)
doc = frappe.get_doc(args)
if args.doctype == "Sales Person":
doc.employee = frappe.form_dict.get('employee')
doc.save()
def make_tree_args(**kwarg):
del kwarg['cmd']
doctype = kwarg['doctype']
parent_field = 'parent_' + doctype.lower().replace(' ', '_')
name_field = kwarg.get('name_field', doctype.lower().replace(' ', '_') + '_name')
if kwarg['is_root'] == 'false': kwarg['is_root'] = False
if kwarg['is_root'] == 'true': kwarg['is_root'] = True
kwarg.update({
name_field: kwarg[name_field],
parent_field: kwarg.get("parent") or kwarg.get(parent_field)
})
return frappe._dict(kwarg)
| mit | Python |
0c1e2b8261415227bf4c0ffa61e8e1fa62317620 | Update __init__.py | tensorflow/similarity | tensorflow_similarity/__init__.py | tensorflow_similarity/__init__.py | """Tensorflow Similarity

TensorFlow Similiarity, as visible in the diagram above, offers the following
components to help research, train, evaluate and serve metric models:
- **`SimilarityModel()`**: This class subclasses the `tf.keras.model` class and extends it with additional properties that are useful for metric learning. For example it adds the methods:
1. `index()`: Enables indexing of the embedding
2. `lookup()`: Takes samples, calls predict(), and searches for neighbors within the index.
3. `calibrate()`: Calibrates the model's index search thresholds using a calibration metric and a test dataset.
- **`MetricLoss()`**: This virtual class, that extends the `tf.keras.Loss` class, is the base class from which Metric losses are derived. This sub-classing ensures proper error checking; that is, it ensures the user is using a loss metric to train the models, performs better static analysis, and enforces additional constraints such as having a distance function that is supported by the index. Additionally, Metric losses make use of the fully tested and highly optimized pairwise distances functions provided by TensorFlow Similarity that are available under the `Distances.*` classes.
- **`Samplers()`**: Samplers are meant to ensure that each batch has at least n (with n >=2) examples of each class, as losses such as TripletLoss can’t work properly if this condition is not met. TensorFlow Similarity offers an in-memory sampler for small dataset and a `tf.data.TFRecordDataset` for large scales one.
- **`Indexer()`**: The Indexer and its sub-components are meant to index known embeddings alongside their metadata. The embedding metadata is stored within `Table()`, while the `Matcher()` is used to perform [fast approximate neighbor searches](https://en.wikipedia.org/wiki/Nearest_neighbor_search) that are meant to quickly retrieve the indexed elements that are the closest to the embeddings supplied in the `lookup()` and `single_lookup()` function.
The default `Index()` sub-compoments run in-memory and are optimized to be used in interactive settings such as Jupyter notebooks, Colab, and metric computation during training (e.g using the `EvalCallback()` provided). Index are serialized as part of `model.save()` so you can reload them via `model.index_load()` for serving purpose or further training / evaluation.
The default implementation can scale up to medium deployment (1M-10M+ points) easily, provided the computers have enough memory. For very large scale deployments you will need to sublcass the compoments to match your own architetctue. See FIXME colab to see how to deploy TensorFlow Similarity in production.
"""
__version__ = '0.13.12'
| """Tensorflow Similarity

TensorFlow Similiarity, as visible in the diagram above, offers the following
components to help research, train, evaluate and serve metric models:
- **`SimilarityModel()`**: This class subclasses the `tf.keras.model` class and extends it with additional properties that are useful for metric learning. For example it adds the methods:
1. `index()`: Enables indexing of the embedding
2. `lookup()`: Takes samples, calls predict(), and searches for neighbors within the index.
3. `calibrate()`: Calibrates the model's index search thresholds using a calibration metric and a test dataset.
- **`MetricLoss()`**: This virtual class, that extends the `tf.keras.Loss` class, is the base class from which Metric losses are derived. This sub-classing ensures proper error checking; that is, it ensures the user is using a loss metric to train the models, performs better static analysis, and enforces additional constraints such as having a distance function that is supported by the index. Additionally, Metric losses make use of the fully tested and highly optimized pairwise distances functions provided by TensorFlow Similarity that are available under the `Distances.*` classes.
- **`Samplers()`**: Samplers are meant to ensure that each batch has at least n (with n >=2) examples of each class, as losses such as TripletLoss can’t work properly if this condition is not met. TensorFlow Similarity offers an in-memory sampler for small dataset and a `tf.data.TFRecordDataset` for large scales one.
- **`Indexer()`**: The Indexer and its sub-components are meant to index known embeddings alongside their metadata. The embedding metadata is stored within `Table()`, while the `Matcher()` is used to perform [fast approximate neighbor searches](https://en.wikipedia.org/wiki/Nearest_neighbor_search) that are meant to quickly retrieve the indexed elements that are the closest to the embeddings supplied in the `lookup()` and `single_lookup()` function.
The default `Index()` sub-compoments run in-memory and are optimized to be used in interactive settings such as Jupyter notebooks, Colab, and metric computation during training (e.g using the `EvalCallback()` provided). Index are serialized as part of `model.save()` so you can reload them via `model.index_load()` for serving purpose or further training / evaluation.
The default implementation can scale up to medium deployment (1M-10M+ points) easily, provided the computers have enough memory. For very large scale deployments you will need to sublcass the compoments to match your own architetctue. See FIXME colab to see how to deploy TensorFlow Similarity in production.
"""
__version__ = '0.13.11'
| apache-2.0 | Python |
f1a3aeb97486c1e02906d1e96b758ed58d1bb252 | fix typo in write_cache | mvyskocil/pyckle | pyckle/cache.py | pyckle/cache.py | # caching support for pyckle
#
# ... based on pickle
#
import errno
import imp
import os
import pickle
import tokenize
from py_compile import wr_long
from . import load
MAGIC=b'pyckle\x00\x00'
def cache_path(filename):
if hasattr(imp, "cache_from_source"):
return imp.cache_from_source(filename) + "kle.cache"
else:
return fullname + ".cache"
def write_cache(file, cfile=None, _obj=None):
"""Write cache of pyckle file
:param file: The source file name.
:param cfile: Target cache-file, default to PEP 3147 location
:param _obj: The object to write, default None
:return: Path to resulting cache file
"""
with tokenize.open(file) as f:
try:
st = os.fstat(f.fileno())
except AttributeError:
st = os.stat(file)
timestamp = int(st.st_mtime)
size = st.st_size & 0xFFFFFFFF
if _obj is None:
_obj = load(f)
if cfile is None:
cfile = cache_path(file)
try:
dirname = os.path.dirname(cfile)
if dirname:
os.makedirs(dirname)
except OSError as error:
if error.errno != errno.EEXIST:
return None
with open(cfile, 'wb') as fp:
fp.write(b'\0\0\0\0\0\0\0\0')
wr_long(fp, timestamp)
wr_long(fp, size)
pickle.dump(_obj, fp, protocol=pickle.HIGHEST_PROTOCOL)
fp.flush()
fp.seek(0, 0)
fp.write(MAGIC)
return cfile
return None
| # caching support for pyckle
#
# ... based on pickle
#
import errno
import imp
import os
import pickle
import tokenize
from py_compile import wr_long
from . import load
MAGIC=b'pyckle\x00\x00'
def cache_path(filename):
if hasattr(imp, "cache_from_source"):
return imp.cache_from_source(filename) + "kle.cache"
else:
return fullname + ".cache"
def write_cache(file, cfile=None, _obj=None):
"""Write cache of pyckle file
:param file: The source file name.
:param cfile: Target cache-file, default to PEP 3147 location
:param _obj: The object to write, default None
:return: Path to resulting cache file
"""
with tokenize.open(file) as f:
try:
st = os.fstat(f.fileno())
except AttributeError:
st = os.stat(file)
timestamp = int(st.st_mtime)
size = st.st_size & 0xFFFFFFFF
if _obj is None:
_obj = load(f)
if cfile is None:
cfile = cache_path(filename)
try:
dirname = os.path.dirname(cfile)
if dirname:
os.makedirs(dirname)
except OSError as error:
if error.errno != errno.EEXIST:
return None
with open(cfile, 'wb') as fp:
fp.write(b'\0\0\0\0\0\0\0\0')
wr_long(fp, timestamp)
wr_long(fp, size)
pickle.dump(_obj, fp, protocol=pickle.HIGHEST_PROTOCOL)
fp.flush()
fp.seek(0, 0)
fp.write(MAGIC)
return cfile
return None
| mit | Python |
95d3c1151d84a6b8c4770881e75ebbd7dfe111dd | correct issue with version. | tutorcruncher/pydf,tutorcruncher/pydf,samuelcolvin/pydf | pydf/version.py | pydf/version.py | from distutils.version import StrictVersion
VERSION = StrictVersion('0.30.0')
| from distutils.version import StrictVersion
VERSION = StrictVersion('0.3.0')
| mit | Python |
a0ceb84519d1bf735979b3afdfdb8b17621d308b | Fix overwriting resolution with empty text | stefanw/froide,stefanw/froide,stefanw/froide,fin/froide,fin/froide,stefanw/froide,stefanw/froide,fin/froide,fin/froide | froide/problem/admin.py | froide/problem/admin.py | from django.contrib import admin
from django.utils.html import format_html
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from froide.helper.admin_utils import make_nullfilter
from .models import ProblemReport
class ProblemReportAdmin(admin.ModelAdmin):
date_hierarchy = 'timestamp'
raw_id_fields = ('message', 'user', 'moderator')
list_filter = (
'auto_submitted', 'resolved', 'kind',
make_nullfilter('claimed', _('Claimed')),
make_nullfilter('escalated', _('Escalated')),
)
list_display = (
'kind', 'timestamp', 'admin_link_message',
'auto_submitted', 'resolved',
)
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = qs.select_related('message')
return qs
def admin_link_message(self, obj):
return format_html('<a href="{}">{}</a>',
reverse('admin:foirequest_foimessage_change',
args=(obj.message_id,)), str(obj.message))
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
if 'resolved' in form.changed_data and obj.resolved:
sent = obj.resolve(request.user, resolution=obj.resolution)
if sent:
self.message_user(
request, _('User will be notified of resolution')
)
admin.site.register(ProblemReport, ProblemReportAdmin)
| from django.contrib import admin
from django.utils.html import format_html
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from froide.helper.admin_utils import make_nullfilter
from .models import ProblemReport
class ProblemReportAdmin(admin.ModelAdmin):
date_hierarchy = 'timestamp'
raw_id_fields = ('message', 'user', 'moderator')
list_filter = (
'auto_submitted', 'resolved', 'kind',
make_nullfilter('claimed', _('Claimed')),
make_nullfilter('escalated', _('Escalated')),
)
list_display = (
'kind', 'timestamp', 'admin_link_message',
'auto_submitted', 'resolved',
)
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = qs.select_related('message')
return qs
def admin_link_message(self, obj):
return format_html('<a href="{}">{}</a>',
reverse('admin:foirequest_foimessage_change',
args=(obj.message_id,)), str(obj.message))
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
if 'resolved' in form.changed_data and obj.resolved:
sent = obj.resolve(request.user)
if sent:
self.message_user(
request, _('User will be notified of resolution')
)
admin.site.register(ProblemReport, ProblemReportAdmin)
| mit | Python |
57dc66d43f6b9afe1729b326ac5bd7587c276a85 | add flag to disable cdep website encoding hack | mgax/mptracker,mgax/mptracker,mgax/mptracker,mgax/mptracker | mptracker/scraper/common.py | mptracker/scraper/common.py | from urllib.parse import urlencode, urlparse, parse_qs
from path import path
import requests
from pyquery import PyQuery as pq
project_root = path(__file__).abspath().parent.parent.parent
class Scraper(object):
def __init__(self, session=None, use_cdep_opener=True):
self.session = session or requests.Session()
self.use_cdep_opener = use_cdep_opener
def fetch_url(self, url, args=None):
if args:
if '?' not in url:
url += '?'
elif url[-1] not in ['?', '&']:
url += '&'
url += urlencode(args)
kwargs = {'parser': 'html'}
if self.use_cdep_opener:
def opener(url):
resp = self.session.get(url)
text = resp.content.decode('iso-8859-2')
# we use utf-16 because the parser's autodetect works fine with it
return text.encode('utf-16')
kwargs['opener'] = opener
page = pq(url, **kwargs)
page.make_links_absolute()
return page
def get_cached_session(name='page-cache'):
import requests_cache
cache_path = project_root / '_data' / name
return requests_cache.CachedSession(cache_path)
def pqitems(ob, selector=None):
cls = type(ob)
if selector is None:
found = ob
else:
found = ob(selector)
return (cls(el) for el in found)
def get_cdep_id(href):
qs = parse_qs(urlparse(href).query)
assert qs['cam'] == ['2']
return '%s-%03d' % (int(qs['leg'][0]), int(qs['idm'][0]))
| from urllib.parse import urlencode, urlparse, parse_qs
from path import path
import requests
from pyquery import PyQuery as pq
project_root = path(__file__).abspath().parent.parent.parent
class Scraper(object):
def __init__(self, session=None):
self.session = session or requests.Session()
def fetch_url(self, url, args=None):
if args:
if '?' not in url:
url += '?'
elif url[-1] not in ['?', '&']:
url += '&'
url += urlencode(args)
def opener(url):
resp = self.session.get(url)
text = resp.content.decode('iso-8859-2')
# we use utf-16 because the parser's autodetect works fine with it
return text.encode('utf-16')
page = pq(url, opener=opener, parser='html')
page.make_links_absolute()
return page
def get_cached_session(name='page-cache'):
import requests_cache
cache_path = project_root / '_data' / name
return requests_cache.CachedSession(cache_path)
def pqitems(ob, selector=None):
cls = type(ob)
if selector is None:
found = ob
else:
found = ob(selector)
return (cls(el) for el in found)
def get_cdep_id(href):
qs = parse_qs(urlparse(href).query)
assert qs['cam'] == ['2']
return '%s-%03d' % (int(qs['leg'][0]), int(qs['idm'][0]))
| mit | Python |
14c11c9a5c34db08cd797a1a13833974d96ac888 | Remove preceedig newlines (unneeded) | ayushin78/coala,NalinG/coala,arush0311/coala,rimacone/testing2,refeed/coala,jayvdb/coala,meetmangukiya/coala,karansingh1559/coala,JohnS-01/coala,Asalle/coala,impmihai/coala,shreyans800755/coala,sagark123/coala,sophiavanvalkenburg/coala,Shade5/coala,karansingh1559/coala,NalinG/coala,coala-analyzer/coala,mr-karan/coala,kartikeys98/coala,Asalle/coala,NalinG/coala,sophiavanvalkenburg/coala,nemaniarjun/coala,JohnS-01/coala,abhiroyg/coala,NalinG/coala,netman92/coala,shreyans800755/coala,impmihai/coala,aptrishu/coala,incorrectusername/coala,sophiavanvalkenburg/coala,tushar-rishav/coala,netman92/coala,tltuan/coala,NalinG/coala,Shade5/coala,sils1297/coala,ayushin78/coala,vinc456/coala,CruiseDevice/coala,kartikeys98/coala,Balaji2198/coala,Balaji2198/coala,MariosPanag/coala,mr-karan/coala,damngamerz/coala,coala/coala,ManjiriBirajdar/coala,refeed/coala,SanketDG/coala,MariosPanag/coala,NalinG/coala,coala/coala,Nosferatul/coala,tltuan/coala,rimacone/testing2,arjunsinghy96/coala,ayushin78/coala,tushar-rishav/coala,incorrectusername/coala,Nosferatul/coala,JohnS-01/coala,jayvdb/coala,shreyans800755/coala,meetmangukiya/coala,kartikeys98/coala,Nosferatul/coala,Asalle/coala,coala-analyzer/coala,Asnelchristian/coala,NiklasMM/coala,ManjiriBirajdar/coala,vinc456/coala,arjunsinghy96/coala,sils1297/coala,aptrishu/coala,yashtrivedi96/coala,Shade5/coala,sagark123/coala,karansingh1559/coala,netman92/coala,d6e/coala,CruiseDevice/coala,arush0311/coala,SanketDG/coala,jayvdb/coala,MariosPanag/coala,coala/coala,tushar-rishav/coala,aptrishu/coala,RJ722/coala,NalinG/coala,tltuan/coala,vinc456/coala,stevemontana1980/coala,scottbelden/coala,yashLadha/coala,scottbelden/coala,NiklasMM/coala,Asnelchristian/coala,mr-karan/coala,scottbelden/coala,RJ722/coala,yashLadha/coala,CruiseDevice/coala,incorrectusername/coala,RJ722/coala,abhiroyg/coala,arjunsinghy96/coala,damngamerz/coala,d6e/coala,arush0311/coala,yashLadha/coala,nemaniarjun/coala,d6e/coala,sagark123/coala,meetmangukiya/coala,nemaniarjun/coala,rimacone/testing2,abhiroyg/coala,SanketDG/coala,Balaji2198/coala,damngamerz/coala,refeed/coala,yashtrivedi96/coala,stevemontana1980/coala,coala-analyzer/coala,Asnelchristian/coala,sils1297/coala,stevemontana1980/coala,impmihai/coala,yashtrivedi96/coala,NiklasMM/coala,ManjiriBirajdar/coala | coalib/misc/Annotations.py | coalib/misc/Annotations.py | def typechain(*args):
"""
Returns function which applies the first transformation it can from args
and returns transformed value, or the value itself if it is in args.
>>> function = typechain(int, 'a', ord, None)
>>> function("10")
10
>>> function("b")
98
>>> function("a")
'a'
>>> function(int)
<class 'int'>
>>> function(None) is None
True
>>> function("str")
Traceback (most recent call last):
...
ValueError: Couldn't convert value 'str' to any specified type or find it \
in specified values.
:raises TypeError: Raises when either no functions are specified for
checking.
"""
if len(args) == 0:
raise TypeError("No arguments were provided.")
def annotation(value):
"""
Returns value either transformed with one of the function in args, or
casted to one of types in args, or the value itself if it is in the
args.
:raises ValueError: Raises when cannot transform value in any one of
specified ways.
"""
for arg in args:
if value == arg:
return value
if isinstance(arg, type) and isinstance(value, arg):
return value
try:
return arg(value)
except (ValueError, TypeError):
pass
raise ValueError(
"Couldn't convert value {value} to any specified type "
"or find it in specified values.".format(
value=repr(value)))
return annotation
|
def typechain(*args):
"""
Returns function which applies the first transformation it can from args
and returns transformed value, or the value itself if it is in args.
>>> function = typechain(int, 'a', ord, None)
>>> function("10")
10
>>> function("b")
98
>>> function("a")
'a'
>>> function(int)
<class 'int'>
>>> function(None) is None
True
>>> function("str")
Traceback (most recent call last):
...
ValueError: Couldn't convert value 'str' to any specified type or find it \
in specified values.
:raises TypeError: Raises when either no functions are specified for
checking.
"""
if len(args) == 0:
raise TypeError("No arguments were provided.")
def annotation(value):
"""
Returns value either transformed with one of the function in args, or
casted to one of types in args, or the value itself if it is in the
args.
:raises ValueError: Raises when cannot transform value in any one of
specified ways.
"""
for arg in args:
if value == arg:
return value
if isinstance(arg, type) and isinstance(value, arg):
return value
try:
return arg(value)
except (ValueError, TypeError):
pass
raise ValueError(
"Couldn't convert value {value} to any specified type "
"or find it in specified values.".format(
value=repr(value)))
return annotation
| agpl-3.0 | Python |
1171ade137c54c778a284ef32fdbdcd9e5c1d828 | Add __repr__() and __str__() to Result | wylee/runcommands,wylee/runcommands | runcommands/runners/result.py | runcommands/runners/result.py | from ..util import cached_property
class Result:
def __init__(self, return_code, stdout_data, stderr_data, encoding):
self.return_code = return_code
self.stdout_data = stdout_data
self.stderr_data = stderr_data
self.encoding = encoding
self.succeeded = self.return_code == 0
self.failed = not self.succeeded
@cached_property
def stdout(self):
if self.stdout_data:
stdout = b''.join(self.stdout_data)
stdout = stdout.decode(self.encoding)
else:
stdout = ''
return stdout
@cached_property
def stderr(self):
if self.stderr_data:
stderr = b''.join(self.stderr_data)
stderr = stderr.decode(self.encoding)
else:
stderr = ''
return stderr
@cached_property
def stdout_lines(self):
return self.stdout.splitlines() if self.stdout else []
@cached_property
def stderr_lines(self):
return self.stderr.splitlines() if self.stderr else []
def __bool__(self):
return self.succeeded
def __str__(self):
return self.stdout
def __repr__(self):
return repr(self.stdout)
| from ..util import cached_property
class Result:
def __init__(self, return_code, stdout_data, stderr_data, encoding):
self.return_code = return_code
self.stdout_data = stdout_data
self.stderr_data = stderr_data
self.encoding = encoding
self.succeeded = self.return_code == 0
self.failed = not self.succeeded
@cached_property
def stdout(self):
if self.stdout_data:
stdout = b''.join(self.stdout_data)
stdout = stdout.decode(self.encoding)
else:
stdout = ''
return stdout
@cached_property
def stderr(self):
if self.stderr_data:
stderr = b''.join(self.stderr_data)
stderr = stderr.decode(self.encoding)
else:
stderr = ''
return stderr
@cached_property
def stdout_lines(self):
return self.stdout.splitlines() if self.stdout else []
@cached_property
def stderr_lines(self):
return self.stderr.splitlines() if self.stderr else []
def __bool__(self):
return self.succeeded
| mit | Python |
34ad33bfcb67dcb972670ea8453b734851528e44 | fix in reconciliation patch | rohitwaghchaure/erpnext-receipher,rohitwaghchaure/digitales_erpnext,Tejal011089/trufil-erpnext,suyashphadtare/gd-erp,geekroot/erpnext,gangadharkadam/saloon_erp,gangadharkadam/v6_erp,gangadharkadam/contributionerp,gangadhar-kadam/verve_erp,gangadharkadam/v6_erp,njmube/erpnext,mbauskar/omnitech-erpnext,gmarke/erpnext,mbauskar/sapphire-erpnext,Tejal011089/paypal_erpnext,indictranstech/Das_Erpnext,rohitwaghchaure/New_Theme_Erp,netfirms/erpnext,SPKian/Testing,gangadharkadam/contributionerp,suyashphadtare/gd-erp,saurabh6790/alert-med-app,suyashphadtare/vestasi-update-erp,gangadhar-kadam/verve_live_erp,aruizramon/alec_erpnext,fuhongliang/erpnext,gangadharkadam/saloon_erp_install,saurabh6790/omn-app,mahabuber/erpnext,Drooids/erpnext,gangadhar-kadam/adb-erp,gangadhar-kadam/latestchurcherp,saurabh6790/test-erp,mbauskar/omnitech-erpnext,mbauskar/helpdesk-erpnext,rohitwaghchaure/digitales_erpnext,gangadhar-kadam/adb-erp,saurabh6790/test-erp,pawaranand/phrerp,saurabh6790/omnisys-app,mbauskar/sapphire-erpnext,susuchina/ERPNEXT,gangadharkadam/office_erp,gangadhar-kadam/helpdesk-erpnext,saurabh6790/medsyn-app1,shitolepriya/test-erp,mbauskar/helpdesk-erpnext,MartinEnder/erpnext-de,mbauskar/alec_frappe5_erpnext,gangadharkadam/v6_erp,indictranstech/internal-erpnext,Tejal011089/digitales_erpnext,Tejal011089/fbd_erpnext,saurabh6790/test-erp,mbauskar/phrerp,gangadharkadam/v5_erp,saurabh6790/trufil_app,indictranstech/buyback-erp,saurabh6790/pow-app,Tejal011089/trufil-erpnext,suyashphadtare/gd-erp,gangadhar-kadam/verve_test_erp,gangadharkadam/vlinkerp,indictranstech/trufil-erpnext,gangadharkadam/vlinkerp,saurabh6790/medsyn-app,gangadharkadam/v6_erp,saurabh6790/med_app_rels,tmimori/erpnext,indictranstech/osmosis-erpnext,saurabh6790/omni-apps,indictranstech/biggift-erpnext,gangadhar-kadam/verve_erp,saurabh6790/alert-med-app,saurabh6790/omni-apps,mbauskar/Das_Erpnext,BhupeshGupta/erpnext,4commerce-technologies-AG/erpnext,4commerce-technologies-AG/erpnext,SPKian/Testing2,gangadhar-kadam/verve-erp,Aptitudetech/ERPNext,gangadharkadam/sterp,indictranstech/osmosis-erpnext,indictranstech/reciphergroup-erpnext,gangadhar-kadam/latestchurcherp,gangadharkadam/v5_erp,Suninus/erpnext,saurabh6790/aimobilize-app-backup,rohitwaghchaure/erpnext-receipher,indictranstech/buyback-erp,suyashphadtare/vestasi-update-erp,hatwar/buyback-erpnext,gangadhar-kadam/smrterp,saurabh6790/omnisys-app,gangadharkadam/office_erp,SPKian/Testing2,gangadharkadam/vlinkerp,sheafferusa/erpnext,SPKian/Testing2,Tejal011089/Medsyn2_app,suyashphadtare/test,indictranstech/erpnext,rohitwaghchaure/erpnext_smart,gangadhar-kadam/latestchurcherp,gangadhar-kadam/church-erpnext,4commerce-technologies-AG/erpnext,gsnbng/erpnext,rohitwaghchaure/New_Theme_Erp,ShashaQin/erpnext,mbauskar/Das_Erpnext,mbauskar/internal-hr,Tejal011089/osmosis_erpnext,gangadharkadam/sher,indictranstech/erpnext,suyashphadtare/sajil-erp,Tejal011089/digitales_erpnext,saurabh6790/omnitech-apps,gangadhar-kadam/smrterp,saurabh6790/omnitech-apps,saurabh6790/medsynaptic1-app,gangadhar-kadam/sms-erpnext,ThiagoGarciaAlves/erpnext,ThiagoGarciaAlves/erpnext,gangadharkadam/contributionerp,suyashphadtare/vestasi-erp-1,dieface/erpnext,gangadhar-kadam/latestchurcherp,sagar30051991/ozsmart-erp,gangadhar-kadam/helpdesk-erpnext,Yellowen/Owrang,Suninus/erpnext,gangadharkadam/verveerp,gangadharkadam/saloon_erp,saurabh6790/medsyn-app1,saurabh6790/test-erp,gangadharkadam/verveerp,hatwar/Das_erpnext,gangadharkadam/letzerp,pombredanne/erpnext,gangadharkadam/letzerp,Tejal011089/huntercamp_erpnext,gangadhar-kadam/verve_test_erp,pawaranand/phrerp,suyashphadtare/gd-erp,gmarke/erpnext,Tejal011089/paypal_erpnext,mahabuber/erpnext,gmarke/erpnext,geekroot/erpnext,hatwar/buyback-erpnext,Tejal011089/trufil-erpnext,indictranstech/phrerp,hanselke/erpnext-1,dieface/erpnext,gsnbng/erpnext,indictranstech/fbd_erpnext,BhupeshGupta/erpnext,susuchina/ERPNEXT,Tejal011089/osmosis_erpnext,mbauskar/internal-hr,gangadharkadam/v5_erp,shitolepriya/test-erp,saurabh6790/tru_app_back,gangadharkadam/letzerp,suyashphadtare/test,suyashphadtare/vestasi-erp-final,gangadharkadam/smrterp,indictranstech/internal-erpnext,gangadharkadam/v4_erp,gangadhar-kadam/helpdesk-erpnext,indictranstech/vestasi-erpnext,anandpdoshi/erpnext,hernad/erpnext,aruizramon/alec_erpnext,gangadhar-kadam/mic-erpnext,indictranstech/Das_Erpnext,BhupeshGupta/erpnext,ThiagoGarciaAlves/erpnext,gangadharkadam/v4_erp,saurabh6790/test_final_med_app,gangadhar-kadam/verve_test_erp,gangadharkadam/johnerp,hatwar/Das_erpnext,mbauskar/phrerp,hatwar/Das_erpnext,rohitwaghchaure/New_Theme_Erp,mbauskar/Das_Erpnext,saurabh6790/medsynaptic-app,indictranstech/buyback-erp,hernad/erpnext,saurabh6790/aimobilize,saurabh6790/aimobilize,indictranstech/reciphergroup-erpnext,rohitwaghchaure/digitales_erpnext,gangadhar-kadam/sapphire_app,indictranstech/Das_Erpnext,rohitwaghchaure/GenieManager-erpnext,saurabh6790/medapp,Tejal011089/fbd_erpnext,gangadharkadam/saloon_erp_install,fuhongliang/erpnext,saurabh6790/tru_app_back,tmimori/erpnext,suyashphadtare/sajil-final-erp,gangadharkadam/sher,indictranstech/phrerp,suyashphadtare/vestasi-update-erp,hanselke/erpnext-1,hernad/erpnext,gangadhar-kadam/prjapp,gangadhar-kadam/sms-erpnext,gangadhar-kadam/mic-erpnext,saurabh6790/med_new_app,gangadhar-kadam/powapp,treejames/erpnext,indictranstech/fbd_erpnext,gangadharkadam/contributionerp,saurabh6790/med_new_app,gangadhar-kadam/hrerp,hatwar/buyback-erpnext,pombredanne/erpnext,indictranstech/trufil-erpnext,MartinEnder/erpnext-de,Tejal011089/huntercamp_erpnext,njmube/erpnext,indictranstech/erpnext,shft117/SteckerApp,ShashaQin/erpnext,indictranstech/reciphergroup-erpnext,mbauskar/alec_frappe5_erpnext,fuhongliang/erpnext,saurabh6790/OFF-RISAPP,gangadhar-kadam/verve_erp,saurabh6790/omnit-app,saurabh6790/pow-app,indictranstech/tele-erpnext,saurabh6790/medsynaptic1-app,anandpdoshi/erpnext,gangadhar-kadam/church-erpnext,hatwar/focal-erpnext,shitolepriya/test-erp,hernad/erpnext,meisterkleister/erpnext,Tejal011089/fbd_erpnext,saurabh6790/omn-app,njmube/erpnext,indictranstech/biggift-erpnext,Tejal011089/trufil-erpnext,ShashaQin/erpnext,indictranstech/vestasi-erpnext,gangadhar-kadam/powapp,Suninus/erpnext,BhupeshGupta/erpnext,suyashphadtare/vestasi-erp-final,mbauskar/omnitech-demo-erpnext,njmube/erpnext,pombredanne/erpnext,gangadhar-kadam/mtn-erpnext,gangadharkadam/smrterp,rohitwaghchaure/GenieManager-erpnext,mahabuber/erpnext,suyashphadtare/vestasi-erp-jan-end,suyashphadtare/vestasi-erp-jan-end,indictranstech/vestasi-erpnext,suyashphadtare/vestasi-erp-final,fuhongliang/erpnext,gangadharkadam/saloon_erp,saurabh6790/med_app_rels,gangadharkadam/saloon_erp_install,indictranstech/erpnext,indictranstech/trufil-erpnext,sheafferusa/erpnext,gangadharkadam/office_erp,indictranstech/phrerp,pombredanne/erpnext,indictranstech/Das_Erpnext,gangadhar-kadam/prjapp,gangadhar-kadam/verve_live_erp,gangadhar-kadam/sapphire_app,pawaranand/phrerp,netfirms/erpnext,gangadhar-kadam/laganerp,Tejal011089/huntercamp_erpnext,hatwar/focal-erpnext,suyashphadtare/vestasi-erp-jan-end,indictranstech/osmosis-erpnext,gangadhar-kadam/powapp,rohitwaghchaure/digitales_erpnext,saurabh6790/medsynaptic-app,indictranstech/internal-erpnext,gangadharkadam/sterp,anandpdoshi/erpnext,Tejal011089/huntercamp_erpnext,meisterkleister/erpnext,gangadharkadam/verveerp,Drooids/erpnext,suyashphadtare/test,gangadharkadam/verveerp,rohitwaghchaure/erpnext_smart,sagar30051991/ozsmart-erp,Tejal011089/paypal_erpnext,mbauskar/phrerp,geekroot/erpnext,Tejal011089/digitales_erpnext,saurabh6790/ON-RISAPP,rohitwaghchaure/erpnext-receipher,mbauskar/omnitech-erpnext,tmimori/erpnext,hatwar/buyback-erpnext,Tejal011089/osmosis_erpnext,gangadhar-kadam/mtn-erpnext,suyashphadtare/sajil-final-erp,Drooids/erpnext,treejames/erpnext,hanselke/erpnext-1,mbauskar/omnitech-demo-erpnext,MartinEnder/erpnext-de,indictranstech/tele-erpnext,indictranstech/reciphergroup-erpnext,suyashphadtare/vestasi-erp-jan-end,gangadharkadam/v4_erp,indictranstech/internal-erpnext,saurabh6790/OFF-RISAPP,shft117/SteckerApp,Suninus/erpnext,gangadhar-kadam/laganerp,suyashphadtare/vestasi-erp-1,rohitwaghchaure/New_Theme_Erp,treejames/erpnext,indictranstech/fbd_erpnext,gangadhar-kadam/nassimapp,gangadharkadam/saloon_erp,gangadhar-kadam/verve_live_erp,Tejal011089/osmosis_erpnext,rohitwaghchaure/erpnext-receipher,indictranstech/osmosis-erpnext,aruizramon/alec_erpnext,indictranstech/biggift-erpnext,indictranstech/tele-erpnext,meisterkleister/erpnext,suyashphadtare/sajil-final-erp,susuchina/ERPNEXT,saurabh6790/ON-RISAPP,shft117/SteckerApp,gangadharkadam/v5_erp,gangadhar-kadam/verve-erp,mbauskar/helpdesk-erpnext,indictranstech/focal-erpnext,saurabh6790/aimobilize-app-backup,rohitwaghchaure/GenieManager-erpnext,gangadharkadam/v4_erp,shitolepriya/test-erp,saurabh6790/test-med-app,gangadharkadam/saloon_erp_install,indictranstech/focal-erpnext,SPKian/Testing,Tejal011089/paypal_erpnext,gangadhar-kadam/hrerp,sheafferusa/erpnext,Tejal011089/fbd_erpnext,Tejal011089/med2-app,indictranstech/trufil-erpnext,indictranstech/focal-erpnext,indictranstech/vestasi-erpnext,anandpdoshi/erpnext,SPKian/Testing,treejames/erpnext,gmarke/erpnext,mbauskar/Das_Erpnext,ThiagoGarciaAlves/erpnext,suyashphadtare/sajil-erp,indictranstech/phrerp,gangadhar-kadam/sapphire_app,mbauskar/helpdesk-erpnext,gangadhar-kadam/nassimapp,gangadhar-kadam/verve-erp,Tejal011089/med2-app,mbauskar/phrerp,mbauskar/internal-hr,mahabuber/erpnext,pawaranand/phrerp,gangadharkadam/tailorerp,SPKian/Testing2,indictranstech/tele-erpnext,saurabh6790/test-med-app,suyashphadtare/sajil-erp,gangadhar-kadam/helpdesk-erpnext,suyashphadtare/vestasi-erp-1,hanselke/erpnext-1,gangadhar-kadam/verve_live_erp,sagar30051991/ozsmart-erp,netfirms/erpnext,shft117/SteckerApp,mbauskar/omnitech-erpnext,indictranstech/buyback-erp,Drooids/erpnext,indictranstech/fbd_erpnext,saurabh6790/medsyn-app,ShashaQin/erpnext,hatwar/Das_erpnext,netfirms/erpnext,mbauskar/sapphire-erpnext,hatwar/focal-erpnext,sheafferusa/erpnext,MartinEnder/erpnext-de,SPKian/Testing,mbauskar/alec_frappe5_erpnext,geekroot/erpnext,gangadharkadam/tailorerp,dieface/erpnext,gsnbng/erpnext,aruizramon/alec_erpnext,sagar30051991/ozsmart-erp,mbauskar/omnitech-demo-erpnext,gsnbng/erpnext,rohitwaghchaure/GenieManager-erpnext,indictranstech/focal-erpnext,meisterkleister/erpnext,gangadharkadam/vlinkerp,saurabh6790/omnit-app,gangadharkadam/letzerp,gangadhar-kadam/verve_erp,mbauskar/alec_frappe5_erpnext,Yellowen/Owrang,saurabh6790/medapp,hatwar/focal-erpnext,gangadhar-kadam/laganerp,susuchina/ERPNEXT,saurabh6790/trufil_app,indictranstech/biggift-erpnext,Tejal011089/Medsyn2_app,gangadhar-kadam/verve_test_erp,dieface/erpnext,mbauskar/omnitech-demo-erpnext,gangadharkadam/johnerp,tmimori/erpnext,rohitwaghchaure/erpnext_smart,saurabh6790/test_final_med_app,mbauskar/sapphire-erpnext,Tejal011089/digitales_erpnext | patches/january_2013/stock_reconciliation_patch.py | patches/january_2013/stock_reconciliation_patch.py | import webnotes
def execute():
webnotes.reload_doc("stock", "doctype", "stock_ledger_entry")
rename_fields()
move_remarks_to_comments()
store_stock_reco_json()
def rename_fields():
args = [["Stock Ledger Entry", "bin_aqat", "qty_after_transaction"],
["Stock Ledger Entry", "fcfs_stack", "stock_queue"]]
for doctype, old_fieldname, new_fieldname in args:
webnotes.conn.sql("""update `tab%s` set `%s`=`%s`""" %
(doctype, new_fieldname, old_fieldname))
def move_remarks_to_comments():
from webnotes.utils import get_fullname
result = webnotes.conn.sql("""select name, remark, modified_by from `tabStock Reconciliation`
where ifnull(remark, '')!=''""")
fullname_map = {}
for reco, remark, modified_by in result:
webnotes.model_wrapper([{
"doctype": "Comment",
"comment": remark,
"comment_by": modified_by,
"comment_by_fullname": fullname_map.setdefault(modified_by, get_fullname(modified_by)),
"comment_doctype": "Stock Reconciliation",
"comment_docname": reco
}]).insert()
def store_stock_reco_json():
import os
import json
from webnotes.utils.datautils import read_csv_content
from webnotes.utils import get_base_path
files_path = os.path.join(get_base_path(), "public", "files")
list_of_files = os.listdir(files_path)
replaced_list_of_files = [f.replace("-", "") for f in list_of_files]
for reco, file_list in webnotes.conn.sql("""select name, file_list
from `tabStock Reconciliation`"""):
if file_list:
file_list = file_list.split("\n")
stock_reco_file = file_list[0].split(",")[1]
stock_reco_file_path = os.path.join(files_path, stock_reco_file)
if not os.path.exists(stock_reco_file_path):
if stock_reco_file in replaced_list_of_files:
stock_reco_file_path = os.path.join(files_path,
list_of_files[replaced_list_of_files.index(stock_reco_file)])
else:
stock_reco_file_path = ""
if stock_reco_file_path:
with open(stock_reco_file_path, "r") as open_reco_file:
content = open_reco_file.read()
content = read_csv_content(content)
reconciliation_json = json.dumps(content, separators=(',', ': '))
webnotes.conn.sql("""update `tabStock Reconciliation`
set reconciliation_json=%s where name=%s""", (reconciliation_json, reco))
| import webnotes
def execute():
webnotes.reload_doc("stock", "doctype", "stock_ledger_entry")
rename_fields()
move_remarks_to_comments()
store_stock_reco_json()
def rename_fields():
args = [["Stock Ledger Entry", "bin_aqat", "qty_after_transaction"],
["Stock Ledger Entry", "fcfs_stack", "stock_queue"]]
for doctype, old_fieldname, new_fieldname in args:
webnotes.conn.sql("""update `tab%s` set `%s`=`%s`""" %
(doctype, new_fieldname, old_fieldname))
def move_remarks_to_comments():
from webnotes.utils import get_fullname
result = webnotes.conn.sql("""select name, remark, modified_by from `tabStock Reconciliation`
where ifnull(remark, '')!=''""")
fullname_map = {}
for reco, remark, modified_by in result:
webnotes.model_wrapper([{
"doctype": "Comment",
"comment": remark,
"comment_by": modified_by,
"comment_by_fullname": fullname_map.setdefault(modified_by, get_fullname(modified_by)),
"comment_doctype": "Stock Reconciliation",
"comment_docname": reco
}]).insert()
def store_stock_reco_json():
import os
import json
from webnotes.utils.datautils import read_csv_content
from webnotes.utils import get_base_path
files_path = os.path.join(get_base_path(), "public", "files")
list_of_files = os.listdir(files_path)
replaced_list_of_files = [f.replace("-", "") for f in list_of_files]
for reco, file_list in webnotes.conn.sql("""select name, file_list
from `tabStock Reconciliation`"""):
if file_list:
file_list = file_list.split("\n")
stock_reco_file = file_list[0].split(",")[1]
stock_reco_file_path = os.path.join(files_path, stock_reco_file)
if not os.path.exists(stock_reco_file_path):
if stock_reco_file in replaced_list_of_files:
stock_reco_file_path = os.path.join(files_path,
list_of_files[replaced_list_of_files.index(stock_reco_file)])
else:
stock_reco_file_path = ""
if stock_reco_file_path:
with open(stock_reco_file_path, "r") as open_reco_file:
content = open_reco_file.read()
content = read_csv_content(content)
reconciliation_json = json.dumps(content, separators=(',', ': '))
webnotes.conn.sql("""update `tabStock Reconciliation`
set reconciliation_json=%s where name=%s""", (reconciliation_json, name))
| agpl-3.0 | Python |
cf44965ab0743e358817255cc4c6714cbb550223 | use asyncio.subprocess in shell cmd | lnmds/jose,Mstrodl/jose,Mstrodl/jose | ext/admin.py | ext/admin.py | import traceback
import asyncio
import logging
from discord.ext import commands
from .common import Cog
log = logging.getLogger(__name__)
class Admin(Cog):
@commands.command(hidden=True)
@commands.is_owner()
async def shutdown(self, ctx):
await ctx.send("dude rip")
#await self.bot.session.close()
await self.bot.logout()
@commands.command(hidden=True)
@commands.is_owner()
async def load(self, ctx, extension_name : str):
"""Loads an extension."""
try:
self.bot.load_extension("ext." + extension_name)
except Exception as e:
await ctx.send(f'```py\n{traceback.format_exc()}\n```')
return
log.info(f'Loaded {extension_name}')
await ctx.send(f':ok_hand: `{extension_name}` loaded.')
@commands.command(hidden=True)
@commands.is_owner()
async def unload(self, ctx, extension_name : str):
"""Unloads an extension."""
self.bot.unload_extension('ext.' + extension_name)
log.info(f'Unloaded {extension_name}')
await ctx.send(f':ok_hand: `{extension_name}` unloaded.')
@commands.command(hidden=True)
@commands.is_owner()
async def reload(self, ctx, extension_name : str):
"""Reloads an extension"""
try:
self.bot.unload_extension('ext.' + extension_name)
self.bot.load_extension('ext.' + extension_name)
except Exception as err:
await ctx.send(f'```{traceback.format_exc()}```')
return
log.info(f'Reloaded {extension_name}')
await ctx.send(f':ok_hand: Reloaded `{extension_name}`')
@commands.command()
@commands.is_owner()
async def shell(self, ctx, *, command: str):
"""Execute shell commands."""
p = await asyncio.create_subprocess_shell(command,
stderr=asyncio.subprocess.PIPE
stdout=asyncio.subprocess.PIPE
)
with ctx.typing:
await p.wait()
result = (await p.stdout.read()).decode("utf-8")
await ctx.send(f"`{command}`: ```{res}```\n")
def setup(bot):
bot.add_cog(Admin(bot))
| import traceback
import subprocess
import logging
from discord.ext import commands
from .common import Cog
log = logging.getLogger(__name__)
class Admin(Cog):
@commands.command(hidden=True)
@commands.is_owner()
async def shutdown(self, ctx):
await ctx.send("dude rip")
#await self.bot.session.close()
await self.bot.logout()
@commands.command(hidden=True)
@commands.is_owner()
async def load(self, ctx, extension_name : str):
"""Loads an extension."""
try:
self.bot.load_extension("ext." + extension_name)
except Exception as e:
await ctx.send(f'```py\n{traceback.format_exc()}\n```')
return
log.info(f'Loaded {extension_name}')
await ctx.send(f':ok_hand: `{extension_name}` loaded.')
@commands.command(hidden=True)
@commands.is_owner()
async def unload(self, ctx, extension_name : str):
"""Unloads an extension."""
self.bot.unload_extension('ext.' + extension_name)
log.info(f'Unloaded {extension_name}')
await ctx.send(f':ok_hand: `{extension_name}` unloaded.')
@commands.command(hidden=True)
@commands.is_owner()
async def reload(self, ctx, extension_name : str):
"""Reloads an extension"""
try:
self.bot.unload_extension('ext.' + extension_name)
self.bot.load_extension('ext.' + extension_name)
except Exception as err:
await ctx.send(f'```{traceback.format_exc()}```')
return
log.info(f'Reloaded {extension_name}')
await ctx.send(f':ok_hand: Reloaded `{extension_name}`')
@commands.command()
@commands.is_owner()
async def shell(self, ctx, *, command: str):
"""Execute shell commands."""
out = subprocess.check_output(command, shell=True, \
stderr=subprocess.STDOUT)
res = out.decode("utf-8")
await ctx.send(f"`{command}`: ```{res}```\n")
def setup(bot):
bot.add_cog(Admin(bot))
| mit | Python |
0dbbb0cb70208a8f0cab62b5e96fd867db880899 | Prepare release number for 18.2.0 | linuxipho/mycroft-core,forslund/mycroft-core,forslund/mycroft-core,MycroftAI/mycroft-core,Dark5ide/mycroft-core,linuxipho/mycroft-core,Dark5ide/mycroft-core,MycroftAI/mycroft-core | mycroft/version/__init__.py | mycroft/version/__init__.py | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from genericpath import exists, isfile
from mycroft.util.log import LOG
# The following lines are replaced during the release process.
# START_VERSION_BLOCK
CORE_VERSION_MAJOR = 18
CORE_VERSION_MINOR = 2
CORE_VERSION_BUILD = -1
# END_VERSION_BLOCK
CORE_VERSION_TUPLE = (CORE_VERSION_MAJOR,
CORE_VERSION_MINOR,
CORE_VERSION_BUILD)
CORE_VERSION_STR = '.'.join(map(str, CORE_VERSION_TUPLE))
class VersionManager(object):
__location = "/opt/mycroft/version.json"
@staticmethod
def get():
if (exists(VersionManager.__location) and
isfile(VersionManager.__location)):
try:
with open(VersionManager.__location) as f:
return json.load(f)
except:
LOG.error("Failed to load version from '%s'"
% VersionManager.__location)
return {"coreVersion": None, "enclosureVersion": None}
def check_version(version_string):
"""
Check if current version is equal or higher than the
version string provided to the function
Args:
version_string (string): version string ('Major.Minor.Build')
"""
version_tuple = tuple(map(int, version_string.split('.')))
return CORE_VERSION_TUPLE >= version_tuple
| # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from genericpath import exists, isfile
from mycroft.util.log import LOG
# The following lines are replaced during the release process.
# START_VERSION_BLOCK
CORE_VERSION_MAJOR = 0
CORE_VERSION_MINOR = 9
CORE_VERSION_BUILD = 19
# END_VERSION_BLOCK
CORE_VERSION_TUPLE = (CORE_VERSION_MAJOR,
CORE_VERSION_MINOR,
CORE_VERSION_BUILD)
CORE_VERSION_STR = '.'.join(map(str, CORE_VERSION_TUPLE))
class VersionManager(object):
__location = "/opt/mycroft/version.json"
@staticmethod
def get():
if (exists(VersionManager.__location) and
isfile(VersionManager.__location)):
try:
with open(VersionManager.__location) as f:
return json.load(f)
except:
LOG.error("Failed to load version from '%s'"
% VersionManager.__location)
return {"coreVersion": None, "enclosureVersion": None}
def check_version(version_string):
"""
Check if current version is equal or higher than the
version string provided to the function
Args:
version_string (string): version string ('Major.Minor.Build')
"""
version_tuple = tuple(map(int, version_string.split('.')))
return CORE_VERSION_TUPLE >= version_tuple
| apache-2.0 | Python |
7de3c5999001c1acd60df193f3bd0029a40af963 | Declare dependency on Tensorflow>=2.0.0a0. It is already available on PyPI (2.0.0 is not available yet). | google/riegeli,google/riegeli,google/riegeli,google/riegeli | python/setup.py | python/setup.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PIP package setup for Riegeli."""
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
class FakeNonEmptyList(list):
"""This class is needed in order to create OS specific wheels.
Trick setuptools into thinking that we have an extension module. We do have
extension modules, but they are compiled with bazel, so declaring their
sources in ext_modules would be misleading.
"""
def __len__(self):
return 1
setuptools.setup(
name='riegeli',
version='0.0.1',
description='File format for storing a sequence of records',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/google/riegeli',
author='Google LLC',
author_email='compression-dev@google.com',
license='Apache License, Version 2.0',
install_requires=[
'enum34;python_version<"3.4"',
'protobuf',
],
extras_require={
'tensorflow': ['tensorflow>=2.0.0a0'],
},
packages=setuptools.find_packages(),
include_package_data=True,
package_data={'': ['**/*.so']},
ext_modules=FakeNonEmptyList(),
classifiers=[
'Programming Language :: Python',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PIP package setup for Riegeli."""
import setuptools
from setuptools import dist
with open('README.md', 'r') as fh:
long_description = fh.read()
class BinaryDistribution(dist.Distribution):
"""This class is needed in order to create OS specific wheels."""
def has_ext_modules(self):
return True
setuptools.setup(
name='riegeli',
version='0.0.1',
description='File format for storing a sequence of records',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/google/riegeli',
author='Google LLC',
author_email='compression-dev@google.com',
license='Apache License, Version 2.0',
install_requires=[
'enum34;python_version<"3.4"',
'protobuf',
],
extras_require={
'tensorflow': ['tensorflow>=2.0.0'],
},
packages=setuptools.find_packages(),
include_package_data=True,
package_data={'': ['**/*.so']},
distclass=BinaryDistribution,
classifiers=[
'Programming Language :: Python',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| apache-2.0 | Python |
54a47c67172bfaf0f01168d7ffa5d113fe1291d2 | Make executable | Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed | uniname.py | uniname.py | #!/usr/bin/env python3
import sys, unicodedata
for ch in sys.argv[1]: print(ascii(ch), unicodedata.name(ch))
| import sys, unicodedata
for ch in sys.argv[1]: print(ascii(ch), unicodedata.name(ch))
| mit | Python |
56611198e486bde21e4eff954b8c16f00713d8fc | add spacing around operators | newville/scikit-image,keflavich/scikit-image,SamHames/scikit-image,Midafi/scikit-image,almarklein/scikit-image,newville/scikit-image,oew1v07/scikit-image,michaelaye/scikit-image,blink1073/scikit-image,youprofit/scikit-image,vighneshbirodkar/scikit-image,emmanuelle/scikits.image,GaZ3ll3/scikit-image,bennlich/scikit-image,SamHames/scikit-image,bsipocz/scikit-image,jwiggins/scikit-image,rjeli/scikit-image,ofgulban/scikit-image,chintak/scikit-image,keflavich/scikit-image,michaelpacer/scikit-image,ofgulban/scikit-image,WarrenWeckesser/scikits-image,ClinicalGraphics/scikit-image,bsipocz/scikit-image,rjeli/scikit-image,emmanuelle/scikits.image,Britefury/scikit-image,chintak/scikit-image,Hiyorimi/scikit-image,SamHames/scikit-image,emmanuelle/scikits.image,paalge/scikit-image,emon10005/scikit-image,robintw/scikit-image,GaZ3ll3/scikit-image,emmanuelle/scikits.image,pratapvardhan/scikit-image,almarklein/scikit-image,jwiggins/scikit-image,SamHames/scikit-image,paalge/scikit-image,michaelpacer/scikit-image,michaelaye/scikit-image,chintak/scikit-image,vighneshbirodkar/scikit-image,bennlich/scikit-image,vighneshbirodkar/scikit-image,ClinicalGraphics/scikit-image,WarrenWeckesser/scikits-image,youprofit/scikit-image,warmspringwinds/scikit-image,Midafi/scikit-image,ofgulban/scikit-image,ajaybhat/scikit-image,ajaybhat/scikit-image,chriscrosscutler/scikit-image,juliusbierk/scikit-image,robintw/scikit-image,Britefury/scikit-image,rjeli/scikit-image,dpshelio/scikit-image,Hiyorimi/scikit-image,paalge/scikit-image,emon10005/scikit-image,chriscrosscutler/scikit-image,juliusbierk/scikit-image,chintak/scikit-image,blink1073/scikit-image,oew1v07/scikit-image,almarklein/scikit-image,dpshelio/scikit-image,pratapvardhan/scikit-image,warmspringwinds/scikit-image,almarklein/scikit-image | skimage/detection/tests/test_template.py | skimage/detection/tests/test_template.py | import numpy as np
from skimage.detection import match_template
from numpy.random import randn
def test_template():
size = 100
image = np.zeros((400, 400), dtype=np.float32)
target = np.tri(size) + np.tri(size)[::-1]
target = target.astype(np.float32)
target_positions = [(50, 50), (200, 200)]
for x, y in target_positions:
image[x:x + size, y:y + size] = target
image += randn(400, 400) * 2
for method in ["norm-corr", "norm-coeff"]:
result = match_template(image, target, method=method)
delta = 5
found_positions = []
# find the targets
for i in range(50):
index = np.argmax(result)
y, x = np.unravel_index(index, result.shape)
if not found_positions:
found_positions.append((x, y))
for position in found_positions:
distance = np.sqrt((x - position[0]) ** 2 +
(y - position[1]) ** 2)
if distance > delta:
found_positions.append((x, y))
result[y, x] = 0
if len(found_positions) == len(target_positions):
break
for x, y in target_positions:
print x, y
found = False
for position in found_positions:
distance = np.sqrt((x - position[0]) ** 2 +
(y - position[1]) ** 2)
if distance < delta:
found = True
assert found
if __name__ == "__main__":
from numpy import testing
testing.run_module_suite()
| import numpy as np
from skimage.detection import match_template
from numpy.random import randn
def test_template():
size = 100
image = np.zeros((400, 400), dtype=np.float32)
target = np.tri(size) + np.tri(size)[::-1]
target = target.astype(np.float32)
target_positions = [(50, 50), (200, 200)]
for x, y in target_positions:
image[x:x+size, y:y+size] = target
image += randn(400, 400)*2
for method in ["norm-corr", "norm-coeff"]:
result = match_template(image, target, method=method)
delta = 5
found_positions = []
# find the targets
for i in range(50):
index = np.argmax(result)
y, x = np.unravel_index(index, result.shape)
if not found_positions:
found_positions.append((x, y))
for position in found_positions:
distance = np.sqrt((x - position[0]) ** 2 +
(y - position[1]) ** 2)
if distance > delta:
found_positions.append((x, y))
result[y, x] = 0
if len(found_positions) == len(target_positions):
break
for x, y in target_positions:
print x, y
found = False
for position in found_positions:
distance = np.sqrt((x - position[0]) ** 2 +
(y - position[1]) ** 2)
if distance < delta:
found = True
assert found
if __name__ == "__main__":
from numpy import testing
testing.run_module_suite()
| bsd-3-clause | Python |
f044a7baec91922ee85de91c0a6fe4d475ea49ae | fix TypeError | wathsalav/xos,cboling/xos,opencord/xos,xmaruto/mcord,zdw/xos,open-cloud/xos,wathsalav/xos,wathsalav/xos,wathsalav/xos,zdw/xos,jermowery/xos,cboling/xos,xmaruto/mcord,xmaruto/mcord,xmaruto/mcord,opencord/xos,cboling/xos,jermowery/xos,zdw/xos,jermowery/xos,jermowery/xos,zdw/xos,open-cloud/xos,opencord/xos,open-cloud/xos,cboling/xos,cboling/xos | planetstack/observer/steps/sync_external_routes.py | planetstack/observer/steps/sync_external_routes.py | import os
import base64
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
class SyncExternalRoutes(OpenStackSyncStep):
# XXX what does this provide?
provides=[]
requested_interval = 86400 # This step is slow like a pig. Let's run it infrequently
def call(self):
routes = self.driver.get_external_routes()
subnets = self.driver.shell.quantum.list_subnets()['subnets']
for subnet in subnets:
try:
self.driver.add_external_route(subnet, routes)
except:
logger.log_exc("failed to add external route for subnet %s" % subnet)
| import os
import base64
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
class SyncExternalRoutes(OpenStackSyncStep):
# XXX what does this provide?
provides=[]
requested_interval = 86400 # This step is slow like a pig. Let's run it infrequently
def __init__(self):
pass
def call(self):
routes = self.driver.get_external_routes()
subnets = self.driver.shell.quantum.list_subnets()['subnets']
for subnet in subnets:
try:
self.driver.add_external_route(subnet, routes)
except:
logger.log_exc("failed to add external route for subnet %s" % subnet)
| apache-2.0 | Python |
c33addf039dab34d1fdb1a8c0c16213f5f7c384e | consolidate does who knows what? | Tiger66639/new-csx-extractor,Tiger66639/new-csx-extractor,SeerLabs/new-csx-extractor,SeerLabs/new-csx-extractor,Tiger66639/new-csx-extractor,SeerLabs/new-csx-extractor,Tiger66639/new-csx-extractor,SeerLabs/new-csx-extractor | extractor.py | extractor.py | from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter
import extraction.utils as utils
import subprocess32 as subprocess
import os
import requests
import re
class GrobidExtractor(Extractor):
def extract(self, data, dep_results):
files = {'input': data}
vars = {}
url = 'http://localhost:8080/processFulltextDocument'
try:
resp = requests.post(url, files=files, data=vars)
except requests.exceptions.RequestException as ex:
raise RunnableError('Request to Grobid server failed')
if resp.status_code != 200:
raise RunnableError('Grobid returned status {0}'.format(resp.status_code))
results = resp.text
result_str = '\n'.join(results.split('\n')[1:])
return result_str
class PlainTextExtractor(Extractor):
@staticmethod
def dependencies():
return [GrobidExtractor]
def extract(self, data, dep_results):
xml_text = dep_results[GrobidExtractor]
remove_tags = re.compile(r'\s*<.*?>', re.DOTALL | re.UNICODE)
plain_text = remove_tags.sub('\n', xml_text)
return plain_text
class AcademicPaperFilter(Filter):
@staticmethod
def dependencies():
return [PlainTextExtractor]
def filter(self, data, dep_results):
plain_text = dep_results[PlainTextExtractor]
return ('REFERENCES' in plain_text or
'References' in plain_text or
'Bibliography' in plain_text or
'BIBLIOGRAPHY' in plain_text
)
class TableExtractor(Extractor):
@staticmethod
def dependencies():
return [AcademicPaperFilter]
def extract(self, data, dep_results):
return 'TODO'
if __name__ == '__main__':
runner = ExtractionRunner()
runner.add_runnable(GrobidExtractor, include_in_output=False)
runner.add_runnable(PlainTextExtractor)
runner.add_runnable(AcademicPaperFilter)
runner.add_runnable(TableExtractor)
file_path = os.path.expanduser('~/testpdfs/012.251.000.pdf')
print runner.run_from_file(file_path, pretty=True)
| from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter
import extraction.utils as utils
import subprocess32 as subprocess
import os
import requests
import re
class GrobidExtractor(Extractor):
def extract(self, data, dep_results):
files = {'input': data}
vars = {'consolidate': 1}
url = 'http://localhost:8080/processFulltextDocument'
try:
resp = requests.post(url, files=files, data=vars)
except requests.exceptions.RequestException as ex:
raise RunnableError('Request to Grobid server failed')
if resp.status_code != 200:
raise RunnableError('Grobid returned status {0}'.format(resp.status_code))
results = resp.text
result_str = '\n'.join(results.split('\n')[1:])
return result_str
class PlainTextExtractor(Extractor):
@staticmethod
def dependencies():
return [GrobidExtractor]
def extract(self, data, dep_results):
xml_text = dep_results[GrobidExtractor]
remove_tags = re.compile(r'\s*<.*?>', re.DOTALL | re.UNICODE)
plain_text = remove_tags.sub('\n', xml_text)
return plain_text
class AcademicPaperFilter(Filter):
@staticmethod
def dependencies():
return [PlainTextExtractor]
def filter(self, data, dep_results):
plain_text = dep_results[PlainTextExtractor]
return ('REFERENCES' in plain_text or
'References' in plain_text or
'Bibliography' in plain_text or
'BIBLIOGRAPHY' in plain_text
)
class TableExtractor(Extractor):
@staticmethod
def dependencies():
return [AcademicPaperFilter]
def extract(self, data, dep_results):
return 'TODO'
if __name__ == '__main__':
runner = ExtractionRunner()
runner.add_runnable(GrobidExtractor, include_in_output=False)
runner.add_runnable(PlainTextExtractor)
runner.add_runnable(AcademicPaperFilter)
runner.add_runnable(TableExtractor)
file_path = os.path.expanduser('~/testpdfs/012.251.000.pdf')
print runner.run_from_file(file_path, pretty=True)
| apache-2.0 | Python |
38f01ab7e63e40f3c2f6abe5747e89e9f3d13330 | Add pallette test code | barry-scott/scm-workbench,barry-scott/git-workbench,barry-scott/scm-workbench,barry-scott/scm-workbench,barry-scott/git-workbench | Source/Scm/Experiment/palette.py | Source/Scm/Experiment/palette.py | import sys
from PyQt5 import QtWidgets
from PyQt5 import QtCore
app = QtWidgets.QApplication(sys.argv)
#app.setStyle('fusion')
palette = app.palette()
print( 'desktopSettingsAware: %r' % (app.desktopSettingsAware(),) )
def colorAsString( brush ):
color = brush.color()
grey = (color.redF() * 0.3) + (color.greenF() * 0.59) + (color.blueF() * 0.11)
return '%.2f,%.2f,%.2f,%.2f %.2f %.2f' % (color.redF(), color.greenF(), color.blueF(), color.alphaF(), grey, color.lightnessF())
for arg in ('text', 'window', 'windowText', 'base', 'alternateBase', 'highlight', 'highlightedText'):
print( '%20s: %r' % (arg, colorAsString( getattr( palette, arg )() ), ) )
| import sys
from PyQt5 import QtWidgets
from PyQt5 import QtCore
app = QtWidgets.QApplication(sys.argv)
palette = app.palette()
def colorAsString( brush ):
color = brush.color()
grey = (color.redF() * 0.3) + (color.greenF() * 0.59) + (color.blueF() * 0.11)
return '%.2f,%.2f,%.2f,%.2f %.2f %.2f' % (color.redF(), color.greenF(), color.blueF(), color.alphaF(), grey, color.lightnessF())
for arg in ('text', 'window', 'windowText', 'base', 'alternateBase', 'highlight', 'highlightedText'):
print( '%20s: %r' % (arg, colorAsString( getattr( palette, arg )() ), ) )
| apache-2.0 | Python |
6abcf430d026fec949a08f2059448e357d3491d9 | Increase serialized_project performance | akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr | akvo/rest/cache.py | akvo/rest/cache.py | #!/usr/bin/env python3
from akvo.cache import cache_with_key, delete_cache_data
from akvo.rest.serializers import ProjectDirectorySerializer
from akvo.rsr.models.project import Project, project_directory_cache_key
PROJECT_DIRECTORY_CACHE = 'database'
# NOTE: The data doesn't timeout, since we expect the data to be invalidated
# when the data is updated from the project editor. Also, the script to fill the
# cache should be able to clear the cache and create new values.
@cache_with_key(project_directory_cache_key, timeout=None, cache_name=PROJECT_DIRECTORY_CACHE)
def serialized_project(project_id):
project = Project.objects.only(
'id', 'title', 'subtitle',
'current_image',
'project_plan_summary',
'primary_location__id',
'primary_location__latitude',
'primary_location__longitude',
'primary_organisation__id',
'primary_organisation__name',
'primary_organisation__long_name'
).select_related(
'primary_location',
'primary_organisation',
).prefetch_related(
'locations',
'locations__country',
'recipient_countries',
'partners',
).get(pk=project_id)
return ProjectDirectorySerializer(project).data
def delete_project_from_project_directory_cache(project_id):
delete_cache_data(project_directory_cache_key(project_id), PROJECT_DIRECTORY_CACHE)
| #!/usr/bin/env python3
from akvo.cache import cache_with_key, delete_cache_data
from akvo.rest.serializers import ProjectDirectorySerializer
from akvo.rsr.models.project import Project, project_directory_cache_key
PROJECT_DIRECTORY_CACHE = 'database'
# NOTE: The data doesn't timeout, since we expect the data to be invalidated
# when the data is updated from the project editor. Also, the script to fill the
# cache should be able to clear the cache and create new values.
@cache_with_key(project_directory_cache_key, timeout=None, cache_name=PROJECT_DIRECTORY_CACHE)
def serialized_project(project_id):
project = Project.objects.only(
'id', 'title', 'subtitle',
'primary_location__id',
'primary_organisation__id',
'primary_organisation__name',
'primary_organisation__long_name'
).select_related(
'primary_location',
'primary_organisation',
).prefetch_related(
'locations',
'locations__country',
'recipient_countries',
'partners',
).get(pk=project_id)
return ProjectDirectorySerializer(project).data
def delete_project_from_project_directory_cache(project_id):
delete_cache_data(project_directory_cache_key(project_id), PROJECT_DIRECTORY_CACHE)
| agpl-3.0 | Python |
2bccad88152272af36c13973098695efd52a6bdd | Fix incorrect matcher test | spacy-io/spaCy,spacy-io/spaCy,honnibal/spaCy,recognai/spaCy,recognai/spaCy,aikramer2/spaCy,recognai/spaCy,aikramer2/spaCy,aikramer2/spaCy,recognai/spaCy,recognai/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,aikramer2/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,spacy-io/spaCy,honnibal/spaCy,spacy-io/spaCy,recognai/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy | spacy/tests/regression/test_issue1450.py | spacy/tests/regression/test_issue1450.py | from __future__ import unicode_literals
import pytest
from ...matcher import Matcher
from ...tokens import Doc
from ...vocab import Vocab
@pytest.mark.parametrize(
'string,start,end',
[
('a', 0, 1),
('a b', 0, 2),
('a c', 0, 1),
('a b c', 0, 2),
('a b b c', 0, 3),
('a b b', 0, 3),
]
)
def test_issue1450_matcher_end_zero_plus(string, start, end):
'''Test matcher works when patterns end with * operator.
Original example (rewritten to avoid model usage)
nlp = spacy.load('en_core_web_sm')
matcher = Matcher(nlp.vocab)
matcher.add(
"TSTEND",
on_match_1,
[
{TAG: "JJ", LOWER: "new"},
{TAG: "NN", 'OP': "*"}
]
)
doc = nlp(u'Could you create a new ticket for me?')
print([(w.tag_, w.text, w.lower_) for w in doc])
matches = matcher(doc)
print(matches)
assert len(matches) == 1
assert matches[0][1] == 4
assert matches[0][2] == 5
'''
matcher = Matcher(Vocab())
matcher.add(
"TSTEND",
None,
[
{'ORTH': "a"},
{'ORTH': "b", 'OP': "*"}
]
)
doc = Doc(Vocab(), words=string.split())
matches = matcher(doc)
if start is None or end is None:
assert matches == []
print(matches)
assert matches[-1][1] == start
assert matches[-1][2] == end
| from __future__ import unicode_literals
import pytest
from ...matcher import Matcher
from ...tokens import Doc
from ...vocab import Vocab
@pytest.mark.parametrize(
'string,start,end',
[
('a', 0, 1),
('a b', 0, 2),
('a c', 0, 1),
('a b c', 0, 2),
('a b b c', 0, 2),
('a b b', 0, 2),
]
)
def test_issue1450_matcher_end_zero_plus(string, start, end):
'''Test matcher works when patterns end with * operator.
Original example (rewritten to avoid model usage)
nlp = spacy.load('en_core_web_sm')
matcher = Matcher(nlp.vocab)
matcher.add(
"TSTEND",
on_match_1,
[
{TAG: "JJ", LOWER: "new"},
{TAG: "NN", 'OP': "*"}
]
)
doc = nlp(u'Could you create a new ticket for me?')
print([(w.tag_, w.text, w.lower_) for w in doc])
matches = matcher(doc)
print(matches)
assert len(matches) == 1
assert matches[0][1] == 4
assert matches[0][2] == 5
'''
matcher = Matcher(Vocab())
matcher.add(
"TSTEND",
None,
[
{'ORTH': "a"},
{'ORTH': "b", 'OP': "*"}
]
)
doc = Doc(Vocab(), words=string.split())
matches = matcher(doc)
if start is None or end is None:
assert matches == []
assert matches[-1][1] == start
assert matches[-1][2] == end
| mit | Python |
f172df0a255d49a4a0d389089e4a1f7cc16b7a29 | Configure logging of output so that it is tied with the debug flag | jdgwartney/boundary-plugin-shell,jdgwartney/boundary-plugin-shell,boundary/boundary-plugin-shell,boundary/boundary-plugin-shell | exec_proc.py | exec_proc.py | #!/usr/bin/env python
# Copyright 2014 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen,PIPE
import shlex
import logging
from string import replace
class ExecProc:
def __init__(self):
self.command = None
self.debug = False
def setDebug(self,debug):
self.debug = debug
def setCommand(self,command):
if type(command) != str:
raise ValueError
self.command = command
def execute(self):
if self.command == None:
raise ValueError
args = shlex.split(self.command)
if self.debug == True:
logging.info("command=\"%s\"",args)
p = Popen(args,stdout=PIPE)
o,e = p.communicate()
if self.debug == True:
logging.info("before: " + ':'.join(x.encode('hex') for x in o))
# Remove carriage returns from output
#o = replace(o,"\r","")
if self.debug == True:
logging.info("after: " + ':'.join(x.encode('hex') for x in o))
if self.debug == True:
logging.info("output=\"%s\"",o)
logging.info(':'.join(x.encode('hex') for x in o))
return o
| #!/usr/bin/env python
# Copyright 2014 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen,PIPE
import shlex
import logging
from string import replace
class ExecProc:
def __init__(self):
self.command = None
self.debug = False
def setDebug(self,debug):
self.debug = debug
def setCommand(self,command):
if type(command) != str:
raise ValueError
self.command = command
def execute(self):
if self.command == None:
raise ValueError
# Remove Carriage Returns
args = shlex.split(self.command)
if self.debug == True:
logging.info("command=\"%s\"",args)
p = Popen(args,stdout=PIPE)
o,e = p.communicate()
logging.info("before: " + ':'.join(x.encode('hex') for x in o))
o = replace(o,"\r","")
logging.info("after: " + ':'.join(x.encode('hex') for x in o))
if self.debug == True:
logging.info("output=\"%s\"",o)
logging.info(':'.join(x.encode('hex') for x in o))
return o
| apache-2.0 | Python |
91087bd04ae10a01c6042aba6f89982da24c0e9f | add decorator with arguments | helloTC/LearnPython | new_usage/decorator_suppress_error.py | new_usage/decorator_suppress_error.py | #!/usr/bin/env python
# coding=utf-8
import functools
def suppress_errors_raw(func):
"""
Automatically silence errors that occur within a function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
pass
return wrapper
def suppress_errors_arguments(func = None, log_func = None):
"""
With arguments
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if log_func is not None:
log_func(str(e))
return wrapper
if func is None:
return decorator
else:
return decorator(func)
def print_logger(message):
print(message)
# @suppress_errors_raw
@suppress_errors_arguments(log_func = print_logger)
def divide_zeros():
print('Running function...')
return 1.0/0
if __name__ == '__main__':
divide_zeros()
| #!/usr/bin/env python
# coding=utf-8
import functools
def suppress_errors(func):
"""
Automatically silence errors that occur within a function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
pass
return wrapper
@suppress_errors
def divide_zeros():
print('Running function...')
return 1.0/0
if __name__ == '__main__':
divide_zeros()
| mit | Python |
91fb8f3825f521c52bed82fbfe1969bda5b22534 | Test Cases | bw8932/inf1340_2015_asst1 | exercise2.py | exercise2.py | #!/usr/bin/env python
""" Assignment 1, Exercise 2, INF1340, Fall, 2015. Name that shape.
This module contains one function name_that_shape(). It prompts the user
to input the number of sides in a shape and outputs the name of the shape.
"""
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
def name_that_shape():
"""
For a given number of sides in a regular polygon, returns the shape name
Inputs: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, J
Expected Outputs: Error, Error, Error, triangle, quadrilateral, pentagon, hexagon,
heptagon, octagon, nonagon, decagon, Error, Error
Actual Outputs: Error, Error, Error, triangle, quadrilateral, pentagon, hexagon,
heptagon, octagon, nonagon, decagon, Error, Error
Errors: None, unless user inputs letter instead of number, when it returns "invalid literal"
"""
# user inputs polygon's sides
sides = (raw_input("How many sides does the polygon have?"))
# program prints name of shape
if sides == "3":
print ("triangle")
elif sides == "4":
print ("quadrilateral")
elif sides == "5":
print ("pentagon")
elif sides == "6":
print ("hexagon")
elif sides == "7":
print ("heptagon")
elif sides == "8":
print ("octagon")
elif sides == "9":
print ("nonagon")
elif sides == "10":
print ("decagon")
else:
print ("Error")
name_that_shape() | #!/usr/bin/env python
""" Assignment 1, Exercise 2, INF1340, Fall, 2015. Name that shape.
This module contains one function name_that_shape(). It prompts the user
to input the number of sides in a shape and outputs the name of the shape.
"""
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
def name_that_shape():
"""
For a given number of sides in a regular polygon, returns the shape name
Inputs: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, J
Expected Outputs: Error, Error, Error, Pentagon, Decagon, Error,
Actual Outputs: Error, Pentagon, Decagon, Error, invalid literal
Errors: None, unless user inputs letter instead of number, when it returns "invalid literal"
"""
# user inputs polygon's sides
sides = (raw_input("How many sides does the polygon have?"))
# program prints name of shape
if sides == "3":
print ("triangle")
elif sides == "4":
print ("quadrilateral")
elif sides == "5":
print ("pentagon")
elif sides == "6":
print ("hexagon")
elif sides == "7":
print ("heptagon")
elif sides == "8":
print ("octagon")
elif sides == "9":
print ("nonagon")
elif sides == "10":
print ("decagon")
else:
print ("Error")
# name_that_shape() | mit | Python |
c83688e4081197d8f381535d51af41b79d1aee23 | Fix incorrect reference to function in same module | djc/runa,djc/runa,djc/runa,djc/runa | runac/util.py | runac/util.py | import os, sys
BASE = os.path.dirname(os.path.dirname(__file__))
CORE_DIR = os.path.join(BASE, 'core')
IGNORE = {'pos'}
if sys.version_info[0] < 3:
def keys(d):
return d.iterkeys()
def values(d):
return d.itervalues()
def items(d):
return d.iteritems()
else:
def keys(d):
return d.keys()
def values(d):
return d.values()
def items(d):
return d.items()
class AttribRepr(object):
'''Helper class to provide a nice __repr__ for other classes'''
def __repr__(self):
contents = sorted(items(self.__dict__))
show = ('%s=%r' % (k, v) for (k, v) in contents if k not in IGNORE)
return '<%s(%s)>' % (self.__class__.__name__, ', '.join(show))
def error(fn, msg, pos):
'''Helper function to print useful error messages.
Tries to mangle location information and message into a layout that's
easy to read and provides good data about the underlying error message.
This is in a separate function because of the differences between Error
and ParseError, which both need this functionality.'''
if pos is None:
return '%s: %s\n' % (fn, msg)
col = len(pos[2][:pos[0][1]].replace('\t', ' ' * 4)) + 1
desc = '%s [%s.%s]: %s' % (fn, pos[0][0] + 1, col, msg)
if not pos[2]:
return desc + '\n'
line = pos[2].replace('\t', ' ' * 4).rstrip()
spaces = pos[0][1] + 3 * min(pos[0][1], pos[2].count('\t'))
return '\n'.join((desc, line, ' ' * spaces + '^')) + '\n'
class Error(Exception):
'''Error class used for throwing user errors from the compiler'''
def __init__(self, node, msg):
Exception.__init__(self, msg)
self.node = node
self.msg = msg
def show(self):
fn = os.path.basename(self.node.pos[3])
return error(fn, self.msg, getattr(self.node, 'pos', None))
class ParseError(Exception):
'''Parse errors, raised from rply's error handling function'''
def __init__(self, fn, t, pos):
self.fn = fn
self.t = t
self.pos = pos
def show(self):
fn = os.path.basename(self.pos[3])
msg = 'unexpected token %s (%r)' % (self.t.name, self.t.value)
return error(fn, msg, self.pos)
| import os, sys
BASE = os.path.dirname(os.path.dirname(__file__))
CORE_DIR = os.path.join(BASE, 'core')
IGNORE = {'pos'}
if sys.version_info[0] < 3:
def keys(d):
return d.iterkeys()
def values(d):
return d.itervalues()
def items(d):
return d.iteritems()
else:
def keys(d):
return d.keys()
def values(d):
return d.values()
def items(d):
return d.items()
class AttribRepr(object):
'''Helper class to provide a nice __repr__ for other classes'''
def __repr__(self):
contents = sorted(util.items(self.__dict__))
show = ('%s=%r' % (k, v) for (k, v) in contents if k not in IGNORE)
return '<%s(%s)>' % (self.__class__.__name__, ', '.join(show))
def error(fn, msg, pos):
'''Helper function to print useful error messages.
Tries to mangle location information and message into a layout that's
easy to read and provides good data about the underlying error message.
This is in a separate function because of the differences between Error
and ParseError, which both need this functionality.'''
if pos is None:
return '%s: %s\n' % (fn, msg)
col = len(pos[2][:pos[0][1]].replace('\t', ' ' * 4)) + 1
desc = '%s [%s.%s]: %s' % (fn, pos[0][0] + 1, col, msg)
if not pos[2]:
return desc + '\n'
line = pos[2].replace('\t', ' ' * 4).rstrip()
spaces = pos[0][1] + 3 * min(pos[0][1], pos[2].count('\t'))
return '\n'.join((desc, line, ' ' * spaces + '^')) + '\n'
class Error(Exception):
'''Error class used for throwing user errors from the compiler'''
def __init__(self, node, msg):
Exception.__init__(self, msg)
self.node = node
self.msg = msg
def show(self):
fn = os.path.basename(self.node.pos[3])
return error(fn, self.msg, getattr(self.node, 'pos', None))
class ParseError(Exception):
'''Parse errors, raised from rply's error handling function'''
def __init__(self, fn, t, pos):
self.fn = fn
self.t = t
self.pos = pos
def show(self):
fn = os.path.basename(self.pos[3])
msg = 'unexpected token %s (%r)' % (self.t.name, self.t.value)
return error(fn, msg, self.pos)
| mit | Python |
4f47691b876b74e8763a68981d22ceedf2889f9c | Revise doc string and add time complexity | bowen0701/algorithms_data_structures | alg_bubble_sort.py | alg_bubble_sort.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def bubble_sort(a_list):
"""Bubble Sort algortihm.
Time complexity: O(n^2).
"""
for pass_num in reversed(range(len(a_list))):
for i in range(pass_num):
if a_list[i] > a_list[i + 1]:
a_list[i + 1], a_list[i] = a_list[i], a_list[i + 1]
def bubble_sort_short(a_list):
"""Bubble Short algorithm with early stop."""
exchange_bool = True
pass_num = len(a_list) - 1
while pass_num > 0 and exchange_bool:
exchange_bool = False
for i in range(pass_num):
if a_list[i] > a_list[i + 1]:
exchange_bool = True
a_list[i + 1], a_list[i] = a_list[i], a_list[i + 1]
pass_num -= 1
def main():
a_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print('a_list: {}'.format(a_list))
print('By bubble sort: ')
bubble_sort(a_list)
print(a_list)
print('By short_bubble sort: ')
bubble_sort_short(a_list)
print(a_list)
if __name__ == '__main__':
main()
| from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def bubble_sort(a_list):
"""Bubble Sort algortihm.
Concept:
- Start from the item at the 1st slot to check
if it is bigger than the next one. If yes, swap these two items.
- Then check the following successive pair and swap them if needed.
- Iterate the procedure over the length of the list.
"""
for pass_num in reversed(range(len(a_list))):
for i in range(pass_num):
if a_list[i] > a_list[i + 1]:
a_list[i + 1], a_list[i] = a_list[i], a_list[i + 1]
def bubble_sort_short(a_list):
"""Bubble Short algorithm with early stop.
After some bubble sort iterations,
if there are no swapped pairs, stop the further iterations.
"""
exchange_bool = True
pass_num = len(a_list) - 1
while pass_num > 0 and exchange_bool:
exchange_bool = False
for i in range(pass_num):
if a_list[i] > a_list[i + 1]:
exchange_bool = True
a_list[i + 1], a_list[i] = a_list[i], a_list[i + 1]
pass_num -= 1
def main():
a_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print('a_list: {}'.format(a_list))
print('By bubble sort: ')
bubble_sort(a_list)
print(a_list)
print('By short_bubble sort: ')
bubble_sort_short(a_list)
print(a_list)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
0acdaffa94f47a0e7f03e6bfe599a65858a13b3e | remove enabled from Provider admin | iXioN/django-all-access,iXioN/django-all-access | allaccess/admin.py | allaccess/admin.py | from django.contrib import admin
from .models import Provider, AccountAccess
class ProviderAdmin(admin.ModelAdmin):
"Admin customization for OAuth providers."
list_display = ('name', 'enabled', 'site',)
list_filter = ('name', 'site', )
class AccountAccessAdmin(admin.ModelAdmin):
"Admin customization for accounts."
list_display = (
'__str__', 'provider', 'user', 'created', 'modified',)
list_filter = ('provider', 'created', 'modified', )
admin.site.register(Provider, ProviderAdmin)
admin.site.register(AccountAccess, AccountAccessAdmin)
| from django.contrib import admin
from .models import Provider, AccountAccess
class ProviderAdmin(admin.ModelAdmin):
"Admin customization for OAuth providers."
list_display = ('name', 'enabled', 'site',)
list_filter = ('name', 'enabled', 'site', )
class AccountAccessAdmin(admin.ModelAdmin):
"Admin customization for accounts."
list_display = (
'__str__', 'provider', 'user', 'created', 'modified',)
list_filter = ('provider', 'created', 'modified', )
admin.site.register(Provider, ProviderAdmin)
admin.site.register(AccountAccess, AccountAccessAdmin)
| bsd-2-clause | Python |
98cd06e56d05c23c69cc6abfda2c90c31f45a328 | remove debug-print | ChristianKniep/docker-terminal,ChristianKniep/docker-terminal | usr/local/bin/qnib-setup.py | usr/local/bin/qnib-setup.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Setup a qnib/terminal container
Usage:
qnib-setup.py [options]
qnib-setup.py (-h | --help)
qnib-setup.py --version
Options:
-h --help Show this screen.
--version Show version.
--loglevel, -L=<str> Loglevel
(ERROR, CRITICAL, WARN, INFO, DEBUG)
--log2stdout, -l Log to stdout, otherwise to logfile.
--logfile, -f=<path> Logfile to log to (default: <scriptname>.log)
--cfg, -c=<path> Configuration file. [default: /etc/qnib-setup.cfg]
"""
from qnibsetup import QnibConfig, QnibSetup
try:
from docopt import docopt
except ImportError:
HAVE_DOCOPT = False
else:
HAVE_DOCOPT = True
__author__ = 'Christian Kniep <christian()qnib.org>'
__copyright__ = 'Copyright 2014 Christian Kniep'
__license__ = """MIT License (http://opensource.org/licenses/MIT)"""
def main():
""" main function """
options = None
if HAVE_DOCOPT:
options = docopt(__doc__, version='Test Script 0.1')
qcfg = QnibConfig(options)
qs =QnibSetup(qcfg)
qs.run()
if __name__ == "__main__":
main()
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Setup a qnib/terminal container
Usage:
qnib-setup.py [options]
qnib-setup.py (-h | --help)
qnib-setup.py --version
Options:
-h --help Show this screen.
--version Show version.
--loglevel, -L=<str> Loglevel
(ERROR, CRITICAL, WARN, INFO, DEBUG)
--log2stdout, -l Log to stdout, otherwise to logfile.
--logfile, -f=<path> Logfile to log to (default: <scriptname>.log)
--cfg, -c=<path> Configuration file. [default: /etc/qnib-setup.cfg]
"""
from qnibsetup import QnibConfig, QnibSetup
try:
from docopt import docopt
except ImportError:
HAVE_DOCOPT = False
else:
HAVE_DOCOPT = True
__author__ = 'Christian Kniep <christian()qnib.org>'
__copyright__ = 'Copyright 2014 Christian Kniep'
__license__ = """MIT License (http://opensource.org/licenses/MIT)"""
def main():
""" main function """
options = None
if HAVE_DOCOPT:
options = docopt(__doc__, version='Test Script 0.1')
qcfg = QnibConfig(options)
print qcfg
qs =QnibSetup(qcfg)
qs.run()
if __name__ == "__main__":
main()
| mit | Python |
2eeae8c7173a158bee3fd26d453728b603e7ba85 | Update location_timestamp in AmbulanceUpdateView. | EMSTrack/WebServerAndClient,EMSTrack/WebServerAndClient,EMSTrack/WebServerAndClient | ambulance/forms.py | ambulance/forms.py | from django import forms
from django.contrib.gis.forms import PointField
from django.utils import timezone
from emstrack.forms import LeafletPointWidget
from .models import Ambulance, Call
class AmbulanceCreateForm(forms.ModelForm):
location = PointField(
widget = LeafletPointWidget(attrs={'map_width': 500,
'map_height': 300})
)
class Meta:
model = Ambulance
fields = [ 'identifier', 'capability', 'status', 'comment', 'location' ]
class AmbulanceUpdateForm(AmbulanceCreateForm):
def clean_location_timestamp(self):
print('clean_location_timestamp')
# if updating location
if 'location' in self.changed_data:
# update timestamp as well
self.cleaned_data['location_timestamp'] = timezone.now()
return self.cleaned_data['location_timestamp']
# front end team to choose which fields to display?
class CallCreateForm(forms.ModelForm):
class Meta:
model = Call
fields = '__all__'
# class AmbulanceStatusCreateForm(forms.ModelForm):
# class Meta:
# model = AmbulanceStatus
# fields = '__all__'
# class AmbulanceStatusUpdateForm(forms.ModelForm):
# class Meta:
# model = AmbulanceStatus
# fields = '__all__'
# class AmbulanceCapabilityCreateForm(forms.ModelForm):
# class Meta:
# model = AmbulanceCapability
# fields = '__all__'
| from django import forms
from django.contrib.gis.forms import PointField
from django.utils import timezone
from emstrack.forms import LeafletPointWidget
from .models import Ambulance, Call
class AmbulanceCreateForm(forms.ModelForm):
location = PointField(
widget = LeafletPointWidget(attrs={'map_width': 500,
'map_height': 300})
)
class Meta:
model = Ambulance
fields = [ 'identifier', 'capability', 'status', 'comment', 'location' ]
class AmbulanceUpdateForm(AmbulanceCreateForm):
def clean(self):
# get cleaned data
data = super().clean()
# if updating location
if 'location' in self.changed_data:
# update timestamp as well
data['location_timestamp'] = timezone.now()
self.changed_data.append('location_timestamp')
# call super
return data
# front end team to choose which fields to display?
class CallCreateForm(forms.ModelForm):
class Meta:
model = Call
fields = '__all__'
# class AmbulanceStatusCreateForm(forms.ModelForm):
# class Meta:
# model = AmbulanceStatus
# fields = '__all__'
# class AmbulanceStatusUpdateForm(forms.ModelForm):
# class Meta:
# model = AmbulanceStatus
# fields = '__all__'
# class AmbulanceCapabilityCreateForm(forms.ModelForm):
# class Meta:
# model = AmbulanceCapability
# fields = '__all__'
| bsd-3-clause | Python |
ed45c8201977aecde226b2e9b060820a8fd677c3 | Remove test for deprecated createmultsig option | guncoin/guncoin,AkioNak/bitcoin,tjps/bitcoin,myriadteam/myriadcoin,tjps/bitcoin,tecnovert/particl-core,vmp32k/litecoin,jamesob/bitcoin,achow101/bitcoin,jtimon/bitcoin,particl/particl-core,DigitalPandacoin/pandacoin,MarcoFalke/bitcoin,kazcw/bitcoin,cdecker/bitcoin,andreaskern/bitcoin,paveljanik/bitcoin,Kogser/bitcoin,andreaskern/bitcoin,Flowdalic/bitcoin,ericshawlinux/bitcoin,guncoin/guncoin,ElementsProject/elements,jambolo/bitcoin,MeshCollider/bitcoin,DigitalPandacoin/pandacoin,dscotese/bitcoin,yenliangl/bitcoin,BTCGPU/BTCGPU,EthanHeilman/bitcoin,qtumproject/qtum,Kogser/bitcoin,guncoin/guncoin,Xekyo/bitcoin,sstone/bitcoin,nikkitan/bitcoin,domob1812/namecore,pataquets/namecoin-core,fujicoin/fujicoin,thrasher-/litecoin,domob1812/namecore,yenliangl/bitcoin,n1bor/bitcoin,CryptArc/bitcoin,jtimon/bitcoin,midnightmagic/bitcoin,jtimon/bitcoin,kallewoof/bitcoin,r8921039/bitcoin,sstone/bitcoin,DigitalPandacoin/pandacoin,GlobalBoost/GlobalBoost,afk11/bitcoin,joshrabinowitz/bitcoin,ahmedbodi/vertcoin,h4x3rotab/BTCGPU,OmniLayer/omnicore,Kogser/bitcoin,midnightmagic/bitcoin,Christewart/bitcoin,Sjors/bitcoin,lateminer/bitcoin,bitcoin/bitcoin,fujicoin/fujicoin,midnightmagic/bitcoin,prusnak/bitcoin,anditto/bitcoin,Xekyo/bitcoin,jnewbery/bitcoin,litecoin-project/litecoin,RHavar/bitcoin,digibyte/digibyte,cdecker/bitcoin,fanquake/bitcoin,jtimon/bitcoin,TheBlueMatt/bitcoin,untrustbank/litecoin,ElementsProject/elements,domob1812/bitcoin,fanquake/bitcoin,AkioNak/bitcoin,domob1812/bitcoin,JeremyRubin/bitcoin,GlobalBoost/GlobalBoost,fujicoin/fujicoin,mm-s/bitcoin,bitcoinsSG/bitcoin,namecoin/namecore,mitchellcash/bitcoin,rnicoll/bitcoin,nikkitan/bitcoin,EthanHeilman/bitcoin,vertcoin/vertcoin,gjhiggins/vcoincore,lateminer/bitcoin,tjps/bitcoin,GroestlCoin/bitcoin,litecoin-project/litecoin,bitcoinknots/bitcoin,jonasschnelli/bitcoin,untrustbank/litecoin,kallewoof/bitcoin,mitchellcash/bitcoin,afk11/bitcoin,jtimon/bitcoin,thrasher-/litecoin,jonasschnelli/bitcoin,Flowdalic/bitcoin,JeremyRubin/bitcoin,h4x3rotab/BTCGPU,prusnak/bitcoin,cdecker/bitcoin,achow101/bitcoin,CryptArc/bitcoin,paveljanik/bitcoin,jnewbery/bitcoin,namecoin/namecoin-core,jlopp/statoshi,jmcorgan/bitcoin,apoelstra/bitcoin,bitcoinknots/bitcoin,MarcoFalke/bitcoin,tecnovert/particl-core,bitcoinsSG/bitcoin,rawodb/bitcoin,bitcoin/bitcoin,peercoin/peercoin,jlopp/statoshi,kallewoof/bitcoin,domob1812/huntercore,ahmedbodi/vertcoin,r8921039/bitcoin,jlopp/statoshi,Bushstar/UFO-Project,DigitalPandacoin/pandacoin,domob1812/huntercore,thrasher-/litecoin,bespike/litecoin,jmcorgan/bitcoin,myriadcoin/myriadcoin,MarcoFalke/bitcoin,Christewart/bitcoin,jnewbery/bitcoin,vmp32k/litecoin,jonasschnelli/bitcoin,kazcw/bitcoin,guncoin/guncoin,paveljanik/bitcoin,midnightmagic/bitcoin,droark/bitcoin,globaltoken/globaltoken,bitcoinsSG/bitcoin,practicalswift/bitcoin,Christewart/bitcoin,jambolo/bitcoin,n1bor/bitcoin,guncoin/guncoin,paveljanik/bitcoin,bitcoin/bitcoin,sstone/bitcoin,GroestlCoin/GroestlCoin,fanquake/bitcoin,mruddy/bitcoin,stamhe/bitcoin,jtimon/bitcoin,RHavar/bitcoin,domob1812/huntercore,h4x3rotab/BTCGPU,kazcw/bitcoin,vmp32k/litecoin,rnicoll/dogecoin,joshrabinowitz/bitcoin,AkioNak/bitcoin,lbryio/lbrycrd,achow101/bitcoin,practicalswift/bitcoin,instagibbs/bitcoin,GroestlCoin/bitcoin,tecnovert/particl-core,sebrandon1/bitcoin,rnicoll/dogecoin,domob1812/bitcoin,nikkitan/bitcoin,CryptArc/bitcoin,OmniLayer/omnicore,bespike/litecoin,mm-s/bitcoin,wellenreiter01/Feathercoin,TheBlueMatt/bitcoin,ahmedbodi/vertcoin,monacoinproject/monacoin,Sjors/bitcoin,OmniLayer/omnicore,FeatherCoin/Feathercoin,Kogser/bitcoin,JeremyRubin/bitcoin,jambolo/bitcoin,domob1812/namecore,domob1812/namecore,monacoinproject/monacoin,donaloconnor/bitcoin,h4x3rotab/BTCGPU,namecoin/namecoin-core,wellenreiter01/Feathercoin,n1bor/bitcoin,lbryio/lbrycrd,pstratem/bitcoin,fujicoin/fujicoin,peercoin/peercoin,BTCGPU/BTCGPU,myriadteam/myriadcoin,n1bor/bitcoin,sipsorcery/bitcoin,afk11/bitcoin,TheBlueMatt/bitcoin,myriadcoin/myriadcoin,lateminer/bitcoin,ericshawlinux/bitcoin,bespike/litecoin,gjhiggins/vcoincore,domob1812/bitcoin,qtumproject/qtum,tecnovert/particl-core,namecoin/namecoin-core,vertcoin/vertcoin,bespike/litecoin,lateminer/bitcoin,qtumproject/qtum,myriadteam/myriadcoin,afk11/bitcoin,GroestlCoin/bitcoin,peercoin/peercoin,randy-waterhouse/bitcoin,GlobalBoost/GlobalBoost,pstratem/bitcoin,donaloconnor/bitcoin,paveljanik/bitcoin,mm-s/bitcoin,stamhe/bitcoin,FeatherCoin/Feathercoin,pstratem/bitcoin,ajtowns/bitcoin,EthanHeilman/bitcoin,tjps/bitcoin,digibyte/digibyte,GlobalBoost/GlobalBoost,RHavar/bitcoin,jamesob/bitcoin,midnightmagic/bitcoin,joshrabinowitz/bitcoin,rawodb/bitcoin,mitchellcash/bitcoin,CryptArc/bitcoin,JeremyRubin/bitcoin,OmniLayer/omnicore,lbryio/lbrycrd,yenliangl/bitcoin,jmcorgan/bitcoin,untrustbank/litecoin,dscotese/bitcoin,bitcoinsSG/bitcoin,fujicoin/fujicoin,namecoin/namecore,litecoin-project/litecoin,myriadcoin/myriadcoin,alecalve/bitcoin,untrustbank/litecoin,prusnak/bitcoin,TheBlueMatt/bitcoin,mruddy/bitcoin,myriadteam/myriadcoin,alecalve/bitcoin,dscotese/bitcoin,practicalswift/bitcoin,ryanofsky/bitcoin,FeatherCoin/Feathercoin,lbryio/lbrycrd,OmniLayer/omnicore,GlobalBoost/GlobalBoost,vertcoin/vertcoin,Flowdalic/bitcoin,kallewoof/bitcoin,digibyte/digibyte,MarcoFalke/bitcoin,andreaskern/bitcoin,domob1812/huntercore,tjps/bitcoin,n1bor/bitcoin,sstone/bitcoin,fanquake/bitcoin,bitcoinknots/bitcoin,afk11/bitcoin,digibyte/digibyte,Kogser/bitcoin,pstratem/bitcoin,sebrandon1/bitcoin,domob1812/bitcoin,gjhiggins/vcoincore,thrasher-/litecoin,Bushstar/UFO-Project,ahmedbodi/vertcoin,MarcoFalke/bitcoin,ahmedbodi/vertcoin,ajtowns/bitcoin,rnicoll/bitcoin,Bushstar/UFO-Project,litecoin-project/litecoin,yenliangl/bitcoin,lbryio/lbrycrd,achow101/bitcoin,ajtowns/bitcoin,MeshCollider/bitcoin,Flowdalic/bitcoin,MeshCollider/bitcoin,pataquets/namecoin-core,mitchellcash/bitcoin,nikkitan/bitcoin,qtumproject/qtum,particl/particl-core,tjps/bitcoin,vertcoin/vertcoin,Christewart/bitcoin,untrustbank/litecoin,CryptArc/bitcoin,monacoinproject/monacoin,AkioNak/bitcoin,jlopp/statoshi,Sjors/bitcoin,namecoin/namecore,kazcw/bitcoin,droark/bitcoin,bitcoin/bitcoin,Kogser/bitcoin,vmp32k/litecoin,Kogser/bitcoin,thrasher-/litecoin,EthanHeilman/bitcoin,droark/bitcoin,sipsorcery/bitcoin,cdecker/bitcoin,randy-waterhouse/bitcoin,jnewbery/bitcoin,andreaskern/bitcoin,jamesob/bitcoin,andreaskern/bitcoin,ajtowns/bitcoin,randy-waterhouse/bitcoin,rnicoll/bitcoin,GroestlCoin/GroestlCoin,lateminer/bitcoin,jambolo/bitcoin,h4x3rotab/BTCGPU,myriadcoin/myriadcoin,jmcorgan/bitcoin,TheBlueMatt/bitcoin,sipsorcery/bitcoin,particl/particl-core,bespike/litecoin,ryanofsky/bitcoin,achow101/bitcoin,Christewart/bitcoin,EthanHeilman/bitcoin,Kogser/bitcoin,namecoin/namecore,mitchellcash/bitcoin,monacoinproject/monacoin,peercoin/peercoin,globaltoken/globaltoken,particl/particl-core,donaloconnor/bitcoin,namecoin/namecore,GroestlCoin/GroestlCoin,instagibbs/bitcoin,fujicoin/fujicoin,namecoin/namecoin-core,apoelstra/bitcoin,pataquets/namecoin-core,GroestlCoin/bitcoin,namecoin/namecore,donaloconnor/bitcoin,practicalswift/bitcoin,jambolo/bitcoin,sipsorcery/bitcoin,instagibbs/bitcoin,gjhiggins/vcoincore,Christewart/bitcoin,nikkitan/bitcoin,vertcoin/vertcoin,r8921039/bitcoin,globaltoken/globaltoken,apoelstra/bitcoin,myriadcoin/myriadcoin,EthanHeilman/bitcoin,myriadteam/myriadcoin,particl/particl-core,vmp32k/litecoin,stamhe/bitcoin,GroestlCoin/bitcoin,cdecker/bitcoin,jnewbery/bitcoin,ericshawlinux/bitcoin,r8921039/bitcoin,practicalswift/bitcoin,MeshCollider/bitcoin,qtumproject/qtum,ElementsProject/elements,bitcoin/bitcoin,jmcorgan/bitcoin,jamesob/bitcoin,mruddy/bitcoin,DigitalPandacoin/pandacoin,joshrabinowitz/bitcoin,rawodb/bitcoin,OmniLayer/omnicore,vmp32k/litecoin,particl/particl-core,monacoinproject/monacoin,DigitalPandacoin/pandacoin,gjhiggins/vcoincore,randy-waterhouse/bitcoin,RHavar/bitcoin,rawodb/bitcoin,wellenreiter01/Feathercoin,peercoin/peercoin,wellenreiter01/Feathercoin,globaltoken/globaltoken,Xekyo/bitcoin,ryanofsky/bitcoin,andreaskern/bitcoin,peercoin/peercoin,fanquake/bitcoin,rnicoll/dogecoin,bespike/litecoin,namecoin/namecoin-core,r8921039/bitcoin,myriadcoin/myriadcoin,droark/bitcoin,donaloconnor/bitcoin,kazcw/bitcoin,instagibbs/bitcoin,paveljanik/bitcoin,globaltoken/globaltoken,midnightmagic/bitcoin,mitchellcash/bitcoin,Kogser/bitcoin,bitcoin/bitcoin,ahmedbodi/vertcoin,sstone/bitcoin,achow101/bitcoin,randy-waterhouse/bitcoin,TheBlueMatt/bitcoin,rawodb/bitcoin,rnicoll/bitcoin,anditto/bitcoin,Sjors/bitcoin,apoelstra/bitcoin,jonasschnelli/bitcoin,pataquets/namecoin-core,pataquets/namecoin-core,mruddy/bitcoin,untrustbank/litecoin,droark/bitcoin,nikkitan/bitcoin,h4x3rotab/BTCGPU,namecoin/namecoin-core,tecnovert/particl-core,globaltoken/globaltoken,dscotese/bitcoin,ElementsProject/elements,RHavar/bitcoin,lbryio/lbrycrd,Xekyo/bitcoin,Bushstar/UFO-Project,GlobalBoost/GlobalBoost,Bushstar/UFO-Project,practicalswift/bitcoin,kallewoof/bitcoin,prusnak/bitcoin,AkioNak/bitcoin,sebrandon1/bitcoin,gjhiggins/vcoincore,jamesob/bitcoin,jlopp/statoshi,Kogser/bitcoin,rnicoll/dogecoin,GroestlCoin/GroestlCoin,jamesob/bitcoin,rawodb/bitcoin,Kogser/bitcoin,Xekyo/bitcoin,domob1812/namecore,mruddy/bitcoin,joshrabinowitz/bitcoin,domob1812/bitcoin,CryptArc/bitcoin,BTCGPU/BTCGPU,prusnak/bitcoin,ericshawlinux/bitcoin,GroestlCoin/GroestlCoin,anditto/bitcoin,JeremyRubin/bitcoin,BTCGPU/BTCGPU,afk11/bitcoin,pstratem/bitcoin,Flowdalic/bitcoin,wellenreiter01/Feathercoin,yenliangl/bitcoin,alecalve/bitcoin,lateminer/bitcoin,tecnovert/particl-core,dscotese/bitcoin,stamhe/bitcoin,Flowdalic/bitcoin,fanquake/bitcoin,jmcorgan/bitcoin,apoelstra/bitcoin,MarcoFalke/bitcoin,jlopp/statoshi,thrasher-/litecoin,r8921039/bitcoin,pataquets/namecoin-core,wellenreiter01/Feathercoin,monacoinproject/monacoin,qtumproject/qtum,instagibbs/bitcoin,mm-s/bitcoin,sstone/bitcoin,pstratem/bitcoin,GroestlCoin/GroestlCoin,mruddy/bitcoin,stamhe/bitcoin,n1bor/bitcoin,rnicoll/bitcoin,kazcw/bitcoin,domob1812/namecore,bitcoinknots/bitcoin,JeremyRubin/bitcoin,rnicoll/bitcoin,guncoin/guncoin,litecoin-project/litecoin,FeatherCoin/Feathercoin,Bushstar/UFO-Project,rnicoll/dogecoin,prusnak/bitcoin,Kogser/bitcoin,donaloconnor/bitcoin,domob1812/huntercore,sipsorcery/bitcoin,mm-s/bitcoin,BTCGPU/BTCGPU,digibyte/digibyte,joshrabinowitz/bitcoin,bitcoinknots/bitcoin,FeatherCoin/Feathercoin,myriadteam/myriadcoin,sebrandon1/bitcoin,ElementsProject/elements,sebrandon1/bitcoin,yenliangl/bitcoin,vertcoin/vertcoin,instagibbs/bitcoin,jambolo/bitcoin,GroestlCoin/bitcoin,RHavar/bitcoin,ryanofsky/bitcoin,jonasschnelli/bitcoin,Sjors/bitcoin,alecalve/bitcoin,dscotese/bitcoin,anditto/bitcoin,GlobalBoost/GlobalBoost,AkioNak/bitcoin,ryanofsky/bitcoin,sebrandon1/bitcoin,sipsorcery/bitcoin,alecalve/bitcoin,BTCGPU/BTCGPU,ajtowns/bitcoin,FeatherCoin/Feathercoin,anditto/bitcoin,ericshawlinux/bitcoin,ericshawlinux/bitcoin,litecoin-project/litecoin,Xekyo/bitcoin,digibyte/digibyte,bitcoinsSG/bitcoin,mm-s/bitcoin,ryanofsky/bitcoin,apoelstra/bitcoin,cdecker/bitcoin,anditto/bitcoin,kallewoof/bitcoin,bitcoinsSG/bitcoin,ElementsProject/elements,domob1812/huntercore,droark/bitcoin,MeshCollider/bitcoin,alecalve/bitcoin,qtumproject/qtum,stamhe/bitcoin,ajtowns/bitcoin,lbryio/lbrycrd,randy-waterhouse/bitcoin,MeshCollider/bitcoin | test/functional/rpc_deprecated.py | test/functional/rpc_deprecated.py | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import BitcoinTestFramework
class DeprecatedRpcTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], []]
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
# RPC methods with and without the -deprecatedrpc flags. For example:
#
# self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
# assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
# self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
#
# There are currently no deprecated RPC methods in master, so this
# test is currently empty.
pass
if __name__ == '__main__':
DeprecatedRpcTest().main()
| #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=createmultisig"]]
def run_test(self):
self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
if __name__ == '__main__':
DeprecatedRpcTest().main()
| mit | Python |
488edda7c20f00dbca36e477d1d400655eb37f9d | increment version | RasaHQ/rasa_nlu,RasaHQ/rasa_nlu,RasaHQ/rasa_nlu | rasa/version.py | rasa/version.py | __version__ = "1.3.1a1"
| __version__ = "1.3"
| apache-2.0 | Python |
23719083123110afb864c9d7cbbf33ae5503f667 | Fix ADMX template format for Windows Server 2008 | wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser | tools/grit/grit/format/policy_templates/writer_configuration.py | tools/grit/grit/format/policy_templates/writer_configuration.py | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def GetConfigurationForBuild(defines):
'''Returns a configuration dictionary for the given build that contains
build-specific settings and information.
Args:
defines: Definitions coming from the build system.
Raises:
Exception: If 'defines' contains an unknown build-type.
'''
# The prefix of key names in config determines which writer will use their
# corresponding values:
# win: Both ADM and ADMX.
# mac: Only plist.
# admx: Only ADMX.
# none/other: Used by all the writers.
if '_chromium' in defines:
config = {
'build': 'chromium',
'app_name': 'Chromium',
'frame_name': 'Chromium Frame',
'os_name': 'Chromium OS',
'win_reg_key_name': 'Software\\Policies\\Chromium',
'win_category_path': ['chromium'],
'admx_namespace': 'Chromium.Policies.Chromium',
'admx_prefix': 'chromium',
}
elif '_google_chrome' in defines:
config = {
'build': 'chrome',
'app_name': 'Google Chrome',
'frame_name': 'Google Chrome Frame',
'os_name': 'Google Chrome OS',
'win_reg_key_name': 'Software\\Policies\\Google\\Chrome',
'win_category_path': ['google', 'googlechrome'],
'admx_namespace': 'Google.Policies.Chrome',
'admx_prefix': 'chrome',
}
else:
raise Exception('Unknown build')
config['win_group_policy_class'] = 'Both'
config['win_supported_os'] = 'SUPPORTED_WINXPSP2'
if 'mac_bundle_id' in defines:
config['mac_bundle_id'] = defines['mac_bundle_id']
return config
| # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def GetConfigurationForBuild(defines):
'''Returns a configuration dictionary for the given build that contains
build-specific settings and information.
Args:
defines: Definitions coming from the build system.
Raises:
Exception: If 'defines' contains an unknown build-type.
'''
# The prefix of key names in config determines which writer will use their
# corresponding values:
# win: Both ADM and ADMX.
# mac: Only plist.
# admx: Only ADMX.
# none/other: Used by all the writers.
if '_chromium' in defines:
config = {
'build': 'chromium',
'app_name': 'Chromium',
'frame_name': 'Chromium Frame',
'os_name': 'Chromium OS',
'win_reg_key_name': 'Software\\Policies\\Chromium',
'win_category_path': ['chromium'],
'admx_namespace': 'Chromium.Policies.Chromium',
'admx_prefix': 'chromium',
}
elif '_google_chrome' in defines:
config = {
'build': 'chrome',
'app_name': 'Google Chrome',
'frame_name': 'Google Chrome Frame',
'os_name': 'Google Chrome OS',
'win_reg_key_name': 'Software\\Policies\\Google\\Chrome',
'win_category_path': ['google', 'googlechrome'],
'admx_namespace': 'Google.Policies.Chrome',
'admx_prefix': 'chrome',
}
else:
raise Exception('Unknown build')
config['win_group_policy_class'] = 'both'
config['win_supported_os'] = 'SUPPORTED_WINXPSP2'
if 'mac_bundle_id' in defines:
config['mac_bundle_id'] = defines['mac_bundle_id']
return config
| bsd-3-clause | Python |
2716ae180cb3a3f14678bac5f27e4d1514feb965 | fix version | thestick613/python-thundercache | docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'ThunderCache'
year = '2017'
author = u'Tudor Aursulesei'
copyright = '{0}, {1}'.format(year, author)
version = release = u'0.1.1'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/thestick613/python-thundercache/issues/%s', '#'),
'pr': ('https://github.com/thestick613/python-thundercache/pull/%s', 'PR #'),
}
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only set the theme if we're building docs locally
html_theme = 'sphinx_rtd_theme'
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'ThunderCache'
year = '2017'
author = u'Tudor Aursulesei'
copyright = '{0}, {1}'.format(year, author)
version = release = u'0.1.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/thestick613/python-thundercache/issues/%s', '#'),
'pr': ('https://github.com/thestick613/python-thundercache/pull/%s', 'PR #'),
}
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only set the theme if we're building docs locally
html_theme = 'sphinx_rtd_theme'
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| bsd-2-clause | Python |
f024362b28280353b56e011190ed700190311f43 | Update copyright | jaraco/jaraco.itertools | docs/conf.py | docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import datetime
if 'check_output' not in dir(subprocess):
import subprocess32 as subprocess
extensions = [
'sphinx.ext.autodoc',
'rst.linker',
]
# General information about the project.
root = os.path.join(os.path.dirname(__file__), '..')
setup_script = os.path.join(root, 'setup.py')
fields = ['--name', '--version', '--url', '--author']
dist_info_cmd = [sys.executable, setup_script] + fields
output_bytes = subprocess.check_output(dist_info_cmd, cwd=root)
project, version, url, author = output_bytes.decode('utf-8').strip().split('\n')
origin_date = datetime.date(2005,1,1)
today = datetime.date.today()
copyright = '{origin_date.year}-{today.year} {author}'.format(**locals())
# The full version, including alpha/beta/rc tags.
release = version
master_doc = 'index'
link_files = {
'../CHANGES.rst': dict(
using=dict(
GH='https://github.com',
project=project,
url=url,
),
replace=[
dict(
pattern=r"(Issue )?#(?P<issue>\d+)",
url='{url}/issues/{issue}',
),
dict(
pattern=r"^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n",
with_scm="{text}\n{rev[timestamp]:%d %b %Y}\n",
),
dict(
pattern=r"PEP[- ](?P<pep_number>\d+)",
url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',
),
],
),
}
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import datetime
if 'check_output' not in dir(subprocess):
import subprocess32 as subprocess
extensions = [
'sphinx.ext.autodoc',
'rst.linker',
]
# General information about the project.
root = os.path.join(os.path.dirname(__file__), '..')
setup_script = os.path.join(root, 'setup.py')
fields = ['--name', '--version', '--url', '--author']
dist_info_cmd = [sys.executable, setup_script] + fields
output_bytes = subprocess.check_output(dist_info_cmd, cwd=root)
project, version, url, author = output_bytes.decode('utf-8').strip().split('\n')
origin_date = datetime.date(2017,1,1)
today = datetime.date.today()
copyright = '{origin_date.year}-{today.year} {author}'.format(**locals())
# The full version, including alpha/beta/rc tags.
release = version
master_doc = 'index'
link_files = {
'../CHANGES.rst': dict(
using=dict(
GH='https://github.com',
project=project,
url=url,
),
replace=[
dict(
pattern=r"(Issue )?#(?P<issue>\d+)",
url='{url}/issues/{issue}',
),
dict(
pattern=r"^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n",
with_scm="{text}\n{rev[timestamp]:%d %b %Y}\n",
),
dict(
pattern=r"PEP[- ](?P<pep_number>\d+)",
url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',
),
],
),
}
| mit | Python |
5a13c6e39e877c31acf34e51f8b268ee9b945665 | add google analytics | davidtsadler/ebay-sdk-php | docs/conf.py | docs/conf.py | import sys
import os
import shlex
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
project = u'eBay SDK for PHP'
version = '1.x'
copyright = u'2016, David T. Sadler'
author = u'David T. Sadler'
master_doc = 'index'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
language = None
exclude_patterns = ['_build']
todo_include_todos = False
html_static_path = ['_static']
htmlhelp_basename = 'eBaySDKforPHPdoc'
html_favicon = 'favicon.ico'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'eBaySDKforPHP.tex', u'eBay SDK for PHP Documentation',
u'David T. Sadler', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ebaysdkforphp', u'eBay SDK for PHP Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'eBaySDKforPHP', u'eBay SDK for PHP Documentation',
author, 'eBaySDKforPHP', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- HTML theme settings ----------------------------------------------
html_show_source_link = False
html_sidebars = {
'**': ['sidebarlogo.html',
'globaltoc.html',
'localtoc.html',
'searchbox.html']
}
import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
extensions.append('guzzle_sphinx_theme')
html_theme_options = {
'project_nav_name': 'eBay SDK for PHP',
'base_url': 'http://devbay.net/sdk/guides/',
'google_analytics_account': 'UA-51857511-1'
}
| import sys
import os
import shlex
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
project = u'eBay SDK for PHP'
version = '1.x'
copyright = u'2016, David T. Sadler'
author = u'David T. Sadler'
master_doc = 'index'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
language = None
exclude_patterns = ['_build']
todo_include_todos = False
html_static_path = ['_static']
htmlhelp_basename = 'eBaySDKforPHPdoc'
html_favicon = 'favicon.ico'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'eBaySDKforPHP.tex', u'eBay SDK for PHP Documentation',
u'David T. Sadler', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ebaysdkforphp', u'eBay SDK for PHP Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'eBaySDKforPHP', u'eBay SDK for PHP Documentation',
author, 'eBaySDKforPHP', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- HTML theme settings ----------------------------------------------
html_show_source_link = False
html_sidebars = {
'**': ['sidebarlogo.html',
'globaltoc.html',
'localtoc.html',
'searchbox.html']
}
import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
extensions.append('guzzle_sphinx_theme')
html_theme_options = {
'project_nav_name': 'eBay SDK for PHP',
'base_url': 'http://devbay.net/sdk/guides/'
}
| apache-2.0 | Python |
e22f770ae7590f664b39552ae76a06b3631284c6 | update copyright date on docs | rdhyee/waterbutler,TomBaxter/waterbutler,RCOSDP/waterbutler,Johnetordoff/waterbutler,felliott/waterbutler,CenterForOpenScience/waterbutler | docs/conf.py | docs/conf.py | # Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import waterbutler # noqa
import waterbutler.server # noqa
# import waterbutler.providers # noqa
master_doc = "index"
project = "WaterButler"
copyright = "2016, Center For Open Science"
version = release = waterbutler.__version__
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
primary_domain = 'py'
default_role = 'py:obj'
autodoc_member_order = "bysource"
autoclass_content = "both"
# Without this line sphinx includes a copy of object.__init__'s docstring
# on any class that doesn't define __init__.
# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
autodoc_docstring_signature = False
coverage_skip_undoc_in_source = True
coverage_ignore_modules = []
# I wish this could go in a per-module file...
coverage_ignore_classes = []
coverage_ignore_functions = []
html_favicon = 'favicon.ico'
latex_documents = [
('documentation', False),
]
# HACK: sphinx has limited support for substitutions with the |version|
# variable, but there doesn't appear to be any way to use this in a link
# target.
# http://stackoverflow.com/questions/1227037/substitutions-inside-links-in-rest-sphinx
# The extlink extension can be used to do link substitutions, but it requires a
# portion of the url to be literally contained in the document. Therefore,
# this link must be referenced as :current_tarball:`z`
extlinks = {}
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4', None),
'tornado': ('http://www.tornadoweb.org/en/stable/', None),
'aiohttp': ('https://aiohttp.readthedocs.org/en/v0.14.1/', None),
}
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| # Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import waterbutler # noqa
import waterbutler.server # noqa
# import waterbutler.providers # noqa
master_doc = "index"
project = "WaterButler"
copyright = "2014, Center For Open Science"
version = release = waterbutler.__version__
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
primary_domain = 'py'
default_role = 'py:obj'
autodoc_member_order = "bysource"
autoclass_content = "both"
# Without this line sphinx includes a copy of object.__init__'s docstring
# on any class that doesn't define __init__.
# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
autodoc_docstring_signature = False
coverage_skip_undoc_in_source = True
coverage_ignore_modules = []
# I wish this could go in a per-module file...
coverage_ignore_classes = []
coverage_ignore_functions = []
html_favicon = 'favicon.ico'
latex_documents = [
('documentation', False),
]
# HACK: sphinx has limited support for substitutions with the |version|
# variable, but there doesn't appear to be any way to use this in a link
# target.
# http://stackoverflow.com/questions/1227037/substitutions-inside-links-in-rest-sphinx
# The extlink extension can be used to do link substitutions, but it requires a
# portion of the url to be literally contained in the document. Therefore,
# this link must be referenced as :current_tarball:`z`
extlinks = {}
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4', None),
'tornado': ('http://www.tornadoweb.org/en/stable/', None),
'aiohttp': ('https://aiohttp.readthedocs.org/en/v0.14.1/', None),
}
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| apache-2.0 | Python |
412bf0d7ea04671e2aed7861bccaa240418af74b | remove user profile by default | mgpepe/django-heroku-15,mgpepe/django-heroku-15 | DjMainApp/models.py | DjMainApp/models.py | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
"""
# Uncomment those lines after you have synced your database to get user profile functionality
##############################################################
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name="profile")
pass_reset_code = models.CharField(max_length=200, null=True, blank=True, default=None)
# Create automatically a User Profile
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
"""
| from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name="profile")
pass_reset_code = models.CharField(max_length=200, null=True, blank=True, default=None)
# Create automatically a User Profile
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User) | apache-2.0 | Python |
f8c22c35088293f899da263db0f7e74955101281 | Apply black to conf.py. | aaugustin/django-sesame,aaugustin/django-sesame | docs/conf.py | docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import datetime
# -- Project information -----------------------------------------------------
project = "django-sesame"
copyright = "2012-{}, Aymeric Augustin".format(datetime.date.today().year)
author = "Aymeric Augustin"
# The full version, including alpha/beta/rc tags
release = "2.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinxcontrib.spelling",
]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build"]
# Workaround for https://github.com/readthedocs/readthedocs.org/issues/2569.
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["custom.css"]
html_sidebars = {"**": []}
| # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import datetime
# -- Project information -----------------------------------------------------
project = 'django-sesame'
copyright = '2012-{}, Aymeric Augustin'.format(datetime.date.today().year)
author = 'Aymeric Augustin'
# The full version, including alpha/beta/rc tags
release = '2.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.spelling',
]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build']
# Workaround for https://github.com/readthedocs/readthedocs.org/issues/2569.
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['custom.css']
html_sidebars = {'**': []}
| bsd-3-clause | Python |
09c3b752154478b15a45d11f08d46a5003f174ec | Revert keyword optimization for current release | codeforeurope/Change-By-Us,watchcat/cbu-rotterdam,codeforeurope/Change-By-Us,codeforeurope/Change-By-Us,codeforeurope/Change-By-Us,watchcat/cbu-rotterdam,localprojects/Change-By-Us,watchcat/cbu-rotterdam,watchcat/cbu-rotterdam,watchcat/cbu-rotterdam,localprojects/Change-By-Us,localprojects/Change-By-Us,localprojects/Change-By-Us | giveaminute/keywords.py | giveaminute/keywords.py | """
:copyright: (c) 2011 Local Projects, all rights reserved
:license: Affero GNU GPL v3, see LICENSE for more details.
"""
# find keywords in a string
def getKeywords(db, s):
sql = "select keyword from keyword"
data = list(db.query(sql))
words = []
for d in data:
if (d.keyword in s):
words.append(d.keyword)
return words
| """
:copyright: (c) 2011 Local Projects, all rights reserved
:license: Affero GNU GPL v3, see LICENSE for more details.
"""
from framework.controller import log
# find keywords in a string
def getKeywords(db, s):
"""Get all matches for passed in string in keyword tables
:param db: database handle
:param s: string to look for
:returns list of matching keywords
"""
words = []
if isinstance(s, str): s = [s]
if not isinstance(s, list):
log.error("getKeywords requested for a non-string, non-list value: %s. Cannot process!" % s)
else:
words = list(db.query("select keyword from keyword where keyword in $lookfor", vars=dict(lookfor=s)))
return words
| agpl-3.0 | Python |
441e9e378a467cc69af96c8c465a1a351e667326 | Fix Python 2.7 failures | stscieisenhamer/glue,saimn/glue,saimn/glue,stscieisenhamer/glue | glue/_plugin_helpers.py | glue/_plugin_helpers.py | # The following funtion is a thin wrapper around iter_entry_points. The reason it
# is in this separate file is that when making the Mac app, py2app doesn't
# support entry points, so we replace this function with a version that has the
# entry points we want hardcoded. If this function was in glue/main.py, the
# reference to the iter_plugin_entry_points function in load_plugin would be
# evaluated at compile time rather than at runtime, so the patched version
# wouldn't be used.
import os
from collections import defaultdict
def iter_plugin_entry_points():
from pkg_resources import iter_entry_points
return iter_entry_points(group='glue.plugins', name=None)
CFG_DIR = os.path.join(os.path.expanduser('~'), '.glue')
PLUGIN_CFG = os.path.join(CFG_DIR, 'plugins.cfg')
class PluginConfig(object):
def __init__(self, plugins={}):
self.plugins = defaultdict(lambda: True)
self.plugins.update(plugins)
def __str__(self):
string = ""
for plugin in sorted(self.plugins):
string += "{0}: {1}\n".format(plugin, self.plugins[plugin])
return string
@classmethod
def load(cls):
from .external.six.moves import configparser
config = configparser.ConfigParser()
read = config.read(PLUGIN_CFG)
if len(read) == 0 or not config.has_section('plugins'):
return cls()
plugins = {}
for name, enabled in config.items('plugins'):
plugins[name] = bool(int(enabled))
self = cls(plugins=plugins)
return self
def save(self):
from .external.six.moves import configparser
config = configparser.ConfigParser()
config['plugins'] = {}
for key in sorted(self.plugins):
config['plugins'][key] = str(int(self.plugins[key]))
if not os.path.exists(CFG_DIR):
os.mkdir(CFG_DIR)
with open(PLUGIN_CFG, 'w') as fout:
config.write(fout)
| # The following funtion is a thin wrapper around iter_entry_points. The reason it
# is in this separate file is that when making the Mac app, py2app doesn't
# support entry points, so we replace this function with a version that has the
# entry points we want hardcoded. If this function was in glue/main.py, the
# reference to the iter_plugin_entry_points function in load_plugin would be
# evaluated at compile time rather than at runtime, so the patched version
# wouldn't be used.
import os
from collections import defaultdict
def iter_plugin_entry_points():
from pkg_resources import iter_entry_points
return iter_entry_points(group='glue.plugins', name=None)
CFG_DIR = os.path.join(os.path.expanduser('~'), '.glue')
PLUGIN_CFG = os.path.join(CFG_DIR, 'plugins.cfg')
class PluginConfig(object):
def __init__(self, plugins={}):
self.plugins = defaultdict(lambda: True)
self.plugins.update(plugins)
def __str__(self):
string = ""
for plugin in sorted(self.plugins):
string += "{0}: {1}\n".format(plugin, self.plugins[plugin])
return string
@classmethod
def load(cls):
from .external.six.moves import configparser
config = configparser.ConfigParser()
read = config.read(PLUGIN_CFG)
if len(read) == 0 or not 'plugins' in config:
return cls()
plugins = {}
for key in sorted(config['plugins']):
plugins[key] = bool(int(config['plugins'][key]))
self = cls(plugins=plugins)
return self
def save(self):
from .external.six.moves import configparser
config = configparser.ConfigParser()
config['plugins'] = {}
for key in sorted(self.plugins):
config['plugins'][key] = str(int(self.plugins[key]))
if not os.path.exists(CFG_DIR):
os.mkdir(CFG_DIR)
with open(PLUGIN_CFG, 'w') as fout:
config.write(fout)
| bsd-3-clause | Python |
d7001ccab0879e17308bf2dc945b5fd3b726be27 | Write the critical multiplier or the range when the damage gets converted into a String | bkittelmann/statblock | statblock/dice.py | statblock/dice.py | from random import random
class Die:
"""
Abstracts the random dice throw. Roll will produce the result.
The die can be further parametrized by a multiplicator and/or
a modifier, like 2 * Die(8) +4.
"""
def __init__(self, number, multiplicator=1, modifier=0):
self.number = number
self.multiplicator = multiplicator
self.modifier = modifier
def roll(self):
return self.multiplicator * random.choice(range(1, self.number + 1)) + self.modifier
def __rmul__(self, other):
return Die(self.number, multiplicator=other, modifier=self.modifier)
def __add__(self, other):
return Die(self.number, multiplicator=self.multiplicator, modifier=other)
def __call__(self):
return self.roll()
def __eq__(self, other):
return (other.number == self.number and
other.multiplicator == self.multiplicator and
other.modifier == self.modifier)
@classmethod
def parse(cls, text):
return cls.__new__()
def __repr__(self):
base = "%sd%s" % (self.multiplicator, self.number)
if self.modifier > 0:
return base + ("+%s" % self.modifier)
return base
d4 = Die(4)
d6 = Die(6)
d8 = Die(8)
d10 = Die(10)
d12 = Die(12)
d20 = Die(20)
d100 = Die(100) | from random import random
class Die:
"""
Abstracts the random dice throw. Roll will produce the result.
The die can be further parametrized by a multiplicator and/or
a modifier, like 2 * Die(8) +4.
"""
def __init__(self, number, multiplicator=1, modifier=0):
self.number = number
self.multiplicator = multiplicator
self.modifier = modifier
def roll(self):
return self.multiplicator * random.choice(range(1, self.number + 1)) + self.modifier
def __rmul__(self, other):
return Die(self.number, multiplicator=other, modifier=self.modifier)
def __add__(self, other):
return Die(self.number, multiplicator=self.multiplicator, modifier=other)
def __call__(self):
return self.roll()
def __eq__(self, other):
return (other.number == self.number and
other.multiplicator == self.multiplicator and
other.modifier == self.modifier)
@classmethod
def parse(cls, text):
return cls.__new__()
def __repr__(self):
return "%sd%s+%s" % (self.multiplicator, self.number, self.modifier)
d4 = Die(4)
d6 = Die(6)
d8 = Die(8)
d10 = Die(10)
d12 = Die(12)
d20 = Die(20)
d100 = Die(100) | mit | Python |
67cbb212675057266686736c060fc617ca8644b2 | Fix UserMessage.id sequence calculation in 0240. | zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip | zerver/migrations/0240_usermessage_migrate_bigint_id_into_id.py | zerver/migrations/0240_usermessage_migrate_bigint_id_into_id.py | # Generated by Django 1.11.23 on 2019-08-23 21:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0239_usermessage_copy_id_to_bigint_id"),
]
operations = [
migrations.RunSQL(
"""
DROP TRIGGER zerver_usermessage_bigint_id_to_id_trigger ON zerver_usermessage;
DROP FUNCTION zerver_usermessage_bigint_id_to_id_trigger_function();
ALTER TABLE zerver_usermessage ALTER COLUMN bigint_id SET NOT NULL;
ALTER TABLE zerver_usermessage DROP CONSTRAINT zerver_usermessage_pkey;
DROP SEQUENCE zerver_usermessage_id_seq CASCADE;
ALTER TABLE zerver_usermessage RENAME COLUMN id to id_old;
ALTER TABLE zerver_usermessage RENAME COLUMN bigint_id to id;
ALTER TABLE zerver_usermessage ADD CONSTRAINT zerver_usermessage_pkey PRIMARY KEY USING INDEX zerver_usermessage_bigint_id_idx;
CREATE SEQUENCE zerver_usermessage_id_seq;
SELECT setval(
'zerver_usermessage_id_seq',
GREATEST(
(SELECT max(id) FROM zerver_usermessage),
(SELECT max(id) FROM zerver_archivedusermessage)
)
);
ALTER TABLE zerver_usermessage ALTER COLUMN id SET DEFAULT NEXTVAL('zerver_usermessage_id_seq');
ALTER TABLE zerver_usermessage ALTER COLUMN id_old DROP NOT NULL;
""",
state_operations=[
# This just tells Django to understand executing the above SQL as if it just ran the operations below,
# so that it knows these model changes are handled and doesn't to generate them on its own
# in the future makemigration calls.
migrations.RemoveField(
model_name="usermessage",
name="bigint_id",
),
migrations.AlterField(
model_name="usermessage",
name="id",
field=models.BigAutoField(primary_key=True, serialize=False),
),
],
),
]
| # Generated by Django 1.11.23 on 2019-08-23 21:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0239_usermessage_copy_id_to_bigint_id"),
]
operations = [
migrations.RunSQL(
"""
DROP TRIGGER zerver_usermessage_bigint_id_to_id_trigger ON zerver_usermessage;
DROP FUNCTION zerver_usermessage_bigint_id_to_id_trigger_function();
ALTER TABLE zerver_usermessage ALTER COLUMN bigint_id SET NOT NULL;
ALTER TABLE zerver_usermessage DROP CONSTRAINT zerver_usermessage_pkey;
DROP SEQUENCE zerver_usermessage_id_seq CASCADE;
ALTER TABLE zerver_usermessage RENAME COLUMN id to id_old;
ALTER TABLE zerver_usermessage RENAME COLUMN bigint_id to id;
ALTER TABLE zerver_usermessage ADD CONSTRAINT zerver_usermessage_pkey PRIMARY KEY USING INDEX zerver_usermessage_bigint_id_idx;
CREATE SEQUENCE zerver_usermessage_id_seq;
SELECT SETVAL('zerver_usermessage_id_seq', (SELECT MAX(id)+1 FROM zerver_usermessage));
ALTER TABLE zerver_usermessage ALTER COLUMN id SET DEFAULT NEXTVAL('zerver_usermessage_id_seq');
ALTER TABLE zerver_usermessage ALTER COLUMN id_old DROP NOT NULL;
""",
state_operations=[
# This just tells Django to understand executing the above SQL as if it just ran the operations below,
# so that it knows these model changes are handled and doesn't to generate them on its own
# in the future makemigration calls.
migrations.RemoveField(
model_name="usermessage",
name="bigint_id",
),
migrations.AlterField(
model_name="usermessage",
name="id",
field=models.BigAutoField(primary_key=True, serialize=False),
),
],
),
]
| apache-2.0 | Python |
1df5bf573bf36b94a2bafbd16870053805a80e6e | set complex example as default | firemark/pixelopolis,firemark/pixelopolis,firemark/pixelopolis,firemark/pixelopolis | web/web.py | web/web.py | from flask import Flask, request, render_template, url_for
from subprocess import run, TimeoutExpired, PIPE
from base64 import b64encode
app = Flask(__name__)
FILES = {
'Cube': 'examples/cube.css',
'Pyramid': 'examples/pyramid.css',
'Triangle': 'examples/triangle.css',
'Fillers': 'examples/fillers.css',
'Series': 'examples/series.css',
'Random': 'examples/random.css',
'Complex': 'examples/complex.css',
}
@app.route('/', methods=['GET'])
def get():
data = get_data_from_file()
return render_and_run(data)
@app.route('/examples/<filename>', methods=['GET'])
def get_example(filename):
data = get_data_from_file(filename)
return render_and_run(data)
@app.route('/', methods=['POST'])
def post():
data = request.form['data']
return render_and_run(data)
def get_data_from_file(filename=None) -> str:
if not FILES:
return ''
if filename is None:
filename = 'Complex'
path = FILES.get(filename)
if path is None:
return ''
with open(path) as fp:
return fp.read()
def render_index(errors=None, data='', img=None):
errors = errors or []
return render_template(
'index.html',
errors=errors,
data=data,
files=FILES,
img=img,
)
def run_pixelopolis(data):
try:
proc = run(
args=['../pixelopolis', '-', '-'],
input=data.encode('utf-8'),
stdout=PIPE,
stderr=PIPE,
timeout=10,
)
except TimeoutExpired:
return ['timeout ;_;'], None
if proc.stdout:
img = b64encode(proc.stdout).decode('utf-8')
else:
img = None
errors = proc.stderr.decode('utf-8').strip().splitlines()
if proc.returncode != 0:
errors = [f'sth is wrong (errocode {proc.returncode})'] + errors
return errors, img
def render_and_run(data):
errors, img = run_pixelopolis(data)
return render_index(
data=data,
errors=errors,
img=img,
)
| from flask import Flask, request, render_template, url_for
from subprocess import run, TimeoutExpired, PIPE
from base64 import b64encode
app = Flask(__name__)
FILES = {
'Cube': 'examples/cube.css',
'Pyramid': 'examples/pyramid.css',
'Triangle': 'examples/triangle.css',
'Fillers': 'examples/fillers.css',
'Series': 'examples/series.css',
'Random': 'examples/random.css',
'Complex': 'examples/complex.css',
}
@app.route('/', methods=['GET'])
def get():
data = get_data_from_file()
return render_and_run(data)
@app.route('/examples/<filename>', methods=['GET'])
def get_example(filename):
data = get_data_from_file(filename)
return render_and_run(data)
@app.route('/', methods=['POST'])
def post():
data = request.form['data']
return render_and_run(data)
def get_data_from_file(filename=None) -> str:
if not FILES:
return ''
if filename is None:
filename = 'Cube'
path = FILES.get(filename)
if path is None:
return ''
with open(path) as fp:
return fp.read()
def render_index(errors=None, data='', img=None):
errors = errors or []
return render_template(
'index.html',
errors=errors,
data=data,
files=FILES,
img=img,
)
def run_pixelopolis(data):
try:
proc = run(
args=['../pixelopolis', '-', '-'],
input=data.encode('utf-8'),
stdout=PIPE,
stderr=PIPE,
timeout=10,
)
except TimeoutExpired:
return ['timeout ;_;'], None
if proc.stdout:
img = b64encode(proc.stdout).decode('utf-8')
else:
img = None
errors = proc.stderr.decode('utf-8').strip().splitlines()
if proc.returncode != 0:
errors = [f'sth is wrong (errocode {proc.returncode})'] + errors
return errors, img
def render_and_run(data):
errors, img = run_pixelopolis(data)
return render_index(
data=data,
errors=errors,
img=img,
)
| mit | Python |
25ed1e5f6a263fcd61e6d5fcb67220f72664efc6 | remove final blank line. | eReuse/grd,eReuse/grd | grd/tests/test_event.py | grd/tests/test_event.py | from django.test import TestCase
from grd.models import Event
class EventTest(TestCase):
fixtures = ['agents.json', 'devices.json', 'events.json', 'users.json']
def test_event_representation(self):
for event in Event.objects.all():
self.assertIsNotNone(repr(event))
| from django.test import TestCase
from grd.models import Event
class EventTest(TestCase):
fixtures = ['agents.json', 'devices.json', 'events.json', 'users.json']
def test_event_representation(self):
for event in Event.objects.all():
self.assertIsNotNone(repr(event))
| agpl-3.0 | Python |
95f7c6cba7c4077053899e3ca01c8ffd3172873c | Add view mixin for working with filters in templates | stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten | grouprise/core/views.py | grouprise/core/views.py | import json
import django
from django_filters.views import FilterMixin
from rules.contrib.views import PermissionRequiredMixin
class PermissionMixin(PermissionRequiredMixin):
@property
def raise_exception(self):
return self.request.user.is_authenticated
class TemplateFilterMixin(FilterMixin):
def get_context_data(self, **kwargs):
filterset_class = self.get_filterset_class()
self.filterset = self.get_filterset(filterset_class)
if not self.filterset.is_bound or self.filterset.is_valid() or not self.get_strict():
self.object_list = self.filterset.qs
else:
self.object_list = self.filterset.queryset.none()
return super().get_context_data(filter=self.filterset, object_list=self.object_list)
class AppConfig:
def __init__(self):
self._settings = {}
self._defaults = {}
def add_setting(self, name, value):
self._settings[name] = value
return self
def add_default(self, name, value):
self._defaults[name] = value
return self
def serialize(self):
conf = {}
conf.update(self._defaults)
conf.update(self._settings)
return json.dumps(conf)
app_config = AppConfig()
class Markdown(django.views.generic.TemplateView):
template_name = 'core/markdown.html'
| import json
import django
from rules.contrib.views import PermissionRequiredMixin
class PermissionMixin(PermissionRequiredMixin):
@property
def raise_exception(self):
return self.request.user.is_authenticated
class AppConfig:
def __init__(self):
self._settings = {}
self._defaults = {}
def add_setting(self, name, value):
self._settings[name] = value
return self
def add_default(self, name, value):
self._defaults[name] = value
return self
def serialize(self):
conf = {}
conf.update(self._defaults)
conf.update(self._settings)
return json.dumps(conf)
app_config = AppConfig()
class Markdown(django.views.generic.TemplateView):
template_name = 'core/markdown.html'
| agpl-3.0 | Python |
4173a7f4db268e9370484cf93fee2d9f0e913d6e | Update deprecated --auth usage instructions. #146 | denmojo/pygrow,grow/grow,denmojo/pygrow,grow/pygrow,grow/grow,denmojo/pygrow,grow/pygrow,denmojo/pygrow,grow/pygrow,grow/grow,grow/grow | grow/commands/deploy.py | grow/commands/deploy.py | from grow.common import utils
from grow.deployments.destinations import base
from grow.deployments.stats import stats
from grow.pods import pods
from grow.pods import storage
import click
import os
@click.command()
@click.argument('deployment_name', required=False, default='default')
@click.argument('pod_path', default='.')
@click.option('--preprocess/--no-preprocess', default=True, is_flag=True,
help='Whether to run preprocessors.')
@click.option('--confirm/--noconfirm', '-c/-f', default=True, is_flag=True,
help='Whether to confirm prior to deployment.')
@click.option('--test/--notest', default=True, is_flag=True,
help='Whether to run deployment tests.')
@click.option('--test_only', default=False, is_flag=True,
help='Only run the deployment tests.')
@click.option('--auth',
help='(deprecated) --auth must now be specified'
' before deploy. Usage: grow --auth=user@example.com deploy')
@click.pass_context
def deploy(context, deployment_name, pod_path, preprocess, confirm, test,
test_only, auth):
"""Deploys a pod to a destination."""
if auth:
text = ('--auth must now be specified before deploy. Usage:'
' grow --auth=user@example.com deploy')
raise click.ClickException(text)
auth = context.parent.params.get('auth')
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
try:
pod = pods.Pod(root, storage=storage.FileStorage)
deployment = pod.get_deployment(deployment_name)
if auth:
deployment.login(auth)
if preprocess:
pod.preprocess()
if test_only:
deployment.test()
return
paths_to_contents = deployment.dump(pod)
repo = utils.get_git_repo(pod.root)
stats_obj = stats.Stats(pod, paths_to_contents=paths_to_contents)
deployment.deploy(paths_to_contents, stats=stats_obj, repo=repo,
confirm=confirm, test=test)
except base.Error as e:
raise click.ClickException(str(e))
except pods.Error as e:
raise click.ClickException(str(e))
| from grow.common import utils
from grow.deployments.destinations import base
from grow.deployments.stats import stats
from grow.pods import pods
from grow.pods import storage
import click
import os
@click.command()
@click.argument('deployment_name', required=False, default='default')
@click.argument('pod_path', default='.')
@click.option('--preprocess/--no-preprocess', default=True, is_flag=True,
help='Whether to run preprocessors.')
@click.option('--confirm/--noconfirm', '-c/-f', default=True, is_flag=True,
help='Whether to confirm prior to deployment.')
@click.option('--test/--notest', default=True, is_flag=True,
help='Whether to run deployment tests.')
@click.option('--test_only', default=False, is_flag=True,
help='Only run the deployment tests.')
@click.option('--auth',
help='(deprecated) The --auth flag must now be specified'
' before "build", e.g.:'
' grow --auth=user@example.com build')
@click.pass_context
def deploy(context, deployment_name, pod_path, preprocess, confirm, test, test_only, auth):
"""Deploys a pod to a destination."""
if auth:
text = ('--auth must be specified before "build", e.g.:'
' grow --auth=user@example.com build')
raise click.ClickException(text)
auth = context.parent.params.get('auth')
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
try:
pod = pods.Pod(root, storage=storage.FileStorage)
deployment = pod.get_deployment(deployment_name)
if auth:
deployment.login(auth)
if preprocess:
pod.preprocess()
if test_only:
deployment.test()
return
paths_to_contents = deployment.dump(pod)
repo = utils.get_git_repo(pod.root)
stats_obj = stats.Stats(pod, paths_to_contents=paths_to_contents)
deployment.deploy(paths_to_contents, stats=stats_obj, repo=repo,
confirm=confirm, test=test)
except base.Error as e:
raise click.ClickException(str(e))
except pods.Error as e:
raise click.ClickException(str(e))
| mit | Python |
d7190980ad5edecd3625af4ba6d8a7bdbb66b810 | Change start page text. | talavis/kimenu | flask_app.py | flask_app.py | from flask import Flask
from flask_caching import Cache
import main
from flask import Flask
from flask_caching import Cache
import main
app = Flask(__name__)
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
@app.route('/')
def display_available():
content = ('<html>' +
'<head>' +
'<title>Restaurant Menu Parser</title>' +
'</head>' +
'<body>' +
'<p><a href="ki">Campus Solna (KI)</a></p>' +
'<p><a href="uu">Campus Uppsala (BMC)</a></p>' +
'</body>' +
'</html>')
return content
@app.route('/ki')
@cache.cached(timeout=3600)
def make_menu_ki():
return main.gen_ki_menu()
@app.route('/uu')
@cache.cached(timeout=3600)
def make_menu_uu():
return main.gen_uu_menu()
| from flask import Flask
from flask_caching import Cache
import main
from flask import Flask
from flask_caching import Cache
import main
app = Flask(__name__)
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
@app.route('/')
def display_available():
content = ('<html>' +
'<head>' +
'<title>Restaurant Menu Parser</title>' +
'</head>' +
'<body>' +
'<p><a href="ki">KI (Solna)</a></p>' +
'<p><a href="uu">UU (BMC)</a></p>' +
'</body>' +
'</html>')
return content
@app.route('/ki')
@cache.cached(timeout=3600)
def make_menu_ki():
return main.gen_ki_menu()
@app.route('/uu')
@cache.cached(timeout=3600)
def make_menu_uu():
return main.gen_uu_menu()
| bsd-3-clause | Python |
a53941e1eee016acbddf9db3096b2b3e42953276 | Update HexStats.py | Vlek/plugins | HexChat/HexStats.py | HexChat/HexStats.py | import hexchat
#Based on Weechat's Weestats: https://weechat.org/scripts/source/weestats.py.html/
#By Filip H.F. 'FiXato' Slagter <fixato [at] gmail [dot] com>
__module_name__ = 'HexStats'
__module_version__ = '0.0.1'
__module_description__ = 'Displays HexChat Wide User Statistics'
__module_author__ = 'Vlek'
def stats(word, word_to_eol, userdata):
print( getstats() )
return hexchat.EAT_ALL
def printstats(word, word_to_eol, userdata):
context = hexchat.find_context()
context.command('say {}'.format( getstats() ))
return hexchat.EAT_ALL
def getstats():
chans = hexchat.get_list('channels')
types = [i.type for i in chans]
channels = types.count(2)
contextlist = [i.context for i in chans if i.type == 2]
ops = []
for context in contextlist:
ops += [user.prefix for user in context.get_list('users') if user.nick == hexchat.get_info('nick')]
#ops = ops.count('@')
servers = types.count(1)
queries = types.count(3)
return 'Stats: {} channels ({} OPs), {} servers, {} queries'.format( channels, ops,
servers, queries )
hexchat.hook_command("stats", stats, help="/stats displays HexChat user statistics")
hexchat.hook_command("printstats", printstats, help="/printstats Says HexChat user statistics in current context")
| import hexchat
#Based on Weechat's Weestats: https://weechat.org/scripts/source/weestats.py.html/
#By Filip H.F. 'FiXato' Slagter <fixato [at] gmail [dot] com>
__module_name__ = 'HexStats'
__module_version__ = '0.0.1'
__module_description__ = 'Displays HexChat Wide User Statistics'
__module_author__ = 'Vlek'
def stats(word, word_to_eol, userdata):
context = hexchat.find_context()
chans = hexchat.get_list('channels')
types = [i.type for i in chans]
channels = types.count(2)
ops = 0
servers = types.count(1)
queries = types.count(3)
context.prnt('Stats: {} channels ({} OPs), {} servers, {} queries'.format( channels, ops,
servers, queries ))
return hexchat.EAT_ALL
hexchat.hook_command("stats", stats, help="/stats displays HexChat user statistics")
| mit | Python |
e4ef6d13caa70f91c51c2cb30462754f117e8ddf | Correct mixer.set_volume() docstring | rawdlite/mopidy,dbrgn/mopidy,glogiotatidis/mopidy,bencevans/mopidy,diandiankan/mopidy,ali/mopidy,quartz55/mopidy,jmarsik/mopidy,vrs01/mopidy,pacificIT/mopidy,diandiankan/mopidy,tkem/mopidy,quartz55/mopidy,hkariti/mopidy,ali/mopidy,tkem/mopidy,mokieyue/mopidy,adamcik/mopidy,ZenithDK/mopidy,dbrgn/mopidy,swak/mopidy,glogiotatidis/mopidy,kingosticks/mopidy,ali/mopidy,bacontext/mopidy,pacificIT/mopidy,jcass77/mopidy,bacontext/mopidy,vrs01/mopidy,tkem/mopidy,mopidy/mopidy,SuperStarPL/mopidy,rawdlite/mopidy,kingosticks/mopidy,adamcik/mopidy,pacificIT/mopidy,dbrgn/mopidy,diandiankan/mopidy,swak/mopidy,quartz55/mopidy,quartz55/mopidy,swak/mopidy,mopidy/mopidy,ali/mopidy,bencevans/mopidy,bacontext/mopidy,vrs01/mopidy,mopidy/mopidy,mokieyue/mopidy,hkariti/mopidy,ZenithDK/mopidy,vrs01/mopidy,jmarsik/mopidy,mokieyue/mopidy,jodal/mopidy,diandiankan/mopidy,jcass77/mopidy,kingosticks/mopidy,ZenithDK/mopidy,jmarsik/mopidy,dbrgn/mopidy,jodal/mopidy,hkariti/mopidy,glogiotatidis/mopidy,SuperStarPL/mopidy,SuperStarPL/mopidy,adamcik/mopidy,pacificIT/mopidy,bencevans/mopidy,jodal/mopidy,swak/mopidy,rawdlite/mopidy,hkariti/mopidy,rawdlite/mopidy,bencevans/mopidy,bacontext/mopidy,tkem/mopidy,mokieyue/mopidy,jmarsik/mopidy,SuperStarPL/mopidy,glogiotatidis/mopidy,jcass77/mopidy,ZenithDK/mopidy | mopidy/core/mixer.py | mopidy/core/mixer.py | from __future__ import absolute_import, unicode_literals
import logging
logger = logging.getLogger(__name__)
class MixerController(object):
pykka_traversable = True
def __init__(self, mixer):
self._mixer = mixer
def get_volume(self):
"""Get the volume.
Integer in range [0..100] or :class:`None` if unknown.
The volume scale is linear.
"""
if self._mixer is not None:
return self._mixer.get_volume().get()
def set_volume(self, volume):
"""Set the volume.
The volume is defined as an integer in range [0..100].
The volume scale is linear.
Returns :class:`True` if call is successful, otherwise :class:`False`.
"""
if self._mixer is None:
return False
else:
return self._mixer.set_volume(volume).get()
def get_mute(self):
"""Get mute state.
:class:`True` if muted, :class:`False` unmuted, :class:`None` if
unknown.
"""
if self._mixer is None:
return False
else:
return self._mixer.get_mute().get()
def set_mute(self, mute):
"""Set mute state.
:class:`True` to mute, :class:`False` to unmute.
Returns :class:`True` if call is successful, otherwise :class:`False`.
"""
if self._mixer is None:
return False
else:
return self._mixer.set_mute(bool(mute)).get()
| from __future__ import absolute_import, unicode_literals
import logging
logger = logging.getLogger(__name__)
class MixerController(object):
pykka_traversable = True
def __init__(self, mixer):
self._mixer = mixer
def get_volume(self):
"""Get the volume.
Integer in range [0..100] or :class:`None` if unknown.
The volume scale is linear.
"""
if self._mixer is not None:
return self._mixer.get_volume().get()
def set_volume(self, volume):
"""Set the volume.
The volume is defined as an integer in range [0..100] or :class:`None`
if the mixer is disabled.
The volume scale is linear.
"""
if self._mixer is None:
return False
else:
return self._mixer.set_volume(volume).get()
def get_mute(self):
"""Get mute state.
:class:`True` if muted, :class:`False` unmuted, :class:`None` if
unknown.
"""
if self._mixer is None:
return False
else:
return self._mixer.get_mute().get()
def set_mute(self, mute):
"""Set mute state.
:class:`True` to mute, :class:`False` to unmute.
Returns :class:`True` if call is successful, otherwise :class:`False`.
"""
if self._mixer is None:
return False
else:
return self._mixer.set_mute(bool(mute)).get()
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.