hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
06fc4eba810668b2d11ff7c5397013dfe56f2902 | 276 | py | Python | artgate/platform/__init__.py | Fassty/artgate | f1f853e9eec985fcd883dd27a0a5f6a610660e50 | [
"MIT"
] | null | null | null | artgate/platform/__init__.py | Fassty/artgate | f1f853e9eec985fcd883dd27a0a5f6a610660e50 | [
"MIT"
] | null | null | null | artgate/platform/__init__.py | Fassty/artgate | f1f853e9eec985fcd883dd27a0a5f6a610660e50 | [
"MIT"
] | null | null | null | from artgate.platform.base import AbstractEnvConnector
from artgate.platform.android import *
from artgate.platform.ios import *
from artgate.platform.linux import LinuxEnvConnector
from artgate.platform.macos import *
from artgate.platform.windows import WindowsEnvConnector
| 39.428571 | 56 | 0.858696 |
7856795e38fe6feb70245449f478552ae5377199 | 222,834 | py | Python | pywikibot/page/__init__.py | Partlo/RoboCade | 89c49b3f793b96aeb9e75672fd150872eb52aa11 | [
"MIT"
] | null | null | null | pywikibot/page/__init__.py | Partlo/RoboCade | 89c49b3f793b96aeb9e75672fd150872eb52aa11 | [
"MIT"
] | null | null | null | pywikibot/page/__init__.py | Partlo/RoboCade | 89c49b3f793b96aeb9e75672fd150872eb52aa11 | [
"MIT"
] | null | null | null | """
Objects representing various types of MediaWiki, including Wikibase, pages.
This module also includes objects:
* Property: a type of semantic data.
* Claim: an instance of a semantic assertion.
* Revision: a single change to a wiki page.
* FileInfo: a structure holding imageinfo of latest rev. of FilePage
* Link: an internal or interwiki link in wikitext.
"""
#
# (C) Pywikibot team, 2008-2021
#
# Distributed under the terms of the MIT license.
#
import logging
import os.path
import re
import unicodedata
from collections import Counter, OrderedDict, defaultdict
from contextlib import suppress
from html.entities import name2codepoint
from http import HTTPStatus
from itertools import chain
from typing import Any, Optional, Union
from urllib.parse import quote_from_bytes
from warnings import warn
import pywikibot
from pywikibot import config, i18n, textlib
from pywikibot.backports import Dict, Iterable, List, Tuple
from pywikibot.comms import http
from pywikibot.exceptions import (
APIError,
AutoblockUserError,
EntityTypeUnknownError,
Error,
InterwikiRedirectPageError,
InvalidPageError,
InvalidTitleError,
IsNotRedirectPageError,
IsRedirectPageError,
NoMoveTargetError,
NoPageError,
NotEmailableError,
NoUsernameError,
NoWikibaseEntityError,
OtherPageSaveError,
PageSaveRelatedError,
SectionError,
SiteDefinitionError,
UnknownExtensionError,
UserRightsError,
WikiBaseError,
)
from pywikibot.family import Family
from pywikibot.page._collections import (
AliasesDict,
ClaimCollection,
LanguageDict,
SiteLinkCollection,
)
from pywikibot.page._decorators import allow_asynchronous
from pywikibot.page._revision import Revision
from pywikibot.site import DataSite, Namespace
from pywikibot.tools import (
ComparableMixin,
compute_file_hash,
deprecate_arg,
deprecated,
deprecated_args,
first_upper,
is_ip_address,
issue_deprecation_warning,
ModuleDeprecationWrapper,
redirect_func,
remove_last_args,
)
PROTOCOL_REGEX = r'\Ahttps?://'
__all__ = (
'BasePage',
'Page',
'FilePage',
'Category',
'User',
'WikibasePage',
'ItemPage',
'Property',
'PropertyPage',
'Claim',
'Revision',
'FileInfo',
'BaseLink',
'Link',
'SiteLink',
'SiteLinkCollection',
'html2unicode',
'UnicodeToAsciiHtml',
'unicode2html',
'url2unicode',
)
logger = logging.getLogger('pywiki.wiki.page')
# Note: Link objects (defined later on) represent a wiki-page's title, while
# Page objects (defined here) represent the page itself, including its
# contents.
class BasePage(ComparableMixin):
"""
BasePage: Base object for a MediaWiki page.
This object only implements internally methods that do not require
reading from or writing to the wiki. All other methods are delegated
to the Site object.
Will be subclassed by Page, WikibasePage, and FlowPage.
"""
_cache_attrs = (
'_text', '_pageid', '_catinfo', '_templates', '_protection',
'_contentmodel', '_langlinks', '_isredir', '_coords',
'_preloadedtext', '_timestamp', '_applicable_protections',
'_flowinfo', '_quality', '_pageprops', '_revid', '_quality_text',
'_pageimage', '_item', '_lintinfo',
)
def __init__(self, source, title='', ns=0):
"""
Instantiate a Page object.
Three calling formats are supported:
- If the first argument is a Page, create a copy of that object.
This can be used to convert an existing Page into a subclass
object, such as Category or FilePage. (If the title is also
given as the second argument, creates a copy with that title;
this is used when pages are moved.)
- If the first argument is a Site, create a Page on that Site
using the second argument as the title (may include a section),
and the third as the namespace number. The namespace number is
mandatory, even if the title includes the namespace prefix. This
is the preferred syntax when using an already-normalized title
obtained from api.php or a database dump. WARNING: may produce
invalid objects if page title isn't in normal form!
- If the first argument is a BaseLink, create a Page from that link.
This is the preferred syntax when using a title scraped from
wikitext, URLs, or another non-normalized source.
:param source: the source of the page
:type source: pywikibot.page.BaseLink (or subclass),
pywikibot.page.Page (or subclass), or pywikibot.page.Site
:param title: normalized title of the page; required if source is a
Site, ignored otherwise
:type title: str
:param ns: namespace number; required if source is a Site, ignored
otherwise
:type ns: int
"""
if title is None:
raise ValueError('Title cannot be None.')
if isinstance(source, pywikibot.site.BaseSite):
self._link = Link(title, source=source, default_namespace=ns)
self._revisions = {}
elif isinstance(source, Page):
# copy all of source's attributes to this object
# without overwriting non-None values
self.__dict__.update((k, v) for k, v in source.__dict__.items()
if k not in self.__dict__
or self.__dict__[k] is None)
if title:
# overwrite title
self._link = Link(title, source=source.site,
default_namespace=ns)
elif isinstance(source, BaseLink):
self._link = source
self._revisions = {}
else:
raise Error(
"Invalid argument type '{}' in Page initializer: {}"
.format(type(source), source))
@property
def site(self):
"""Return the Site object for the wiki on which this Page resides.
:rtype: pywikibot.Site
"""
return self._link.site
def version(self):
"""
Return MediaWiki version number of the page site.
This is needed to use @need_version() decorator for methods of
Page objects.
"""
return self.site.version()
@property
def image_repository(self):
"""Return the Site object for the image repository."""
return self.site.image_repository()
@property
def data_repository(self):
"""Return the Site object for the data repository."""
return self.site.data_repository()
def namespace(self):
"""
Return the namespace of the page.
:return: namespace of the page
:rtype: pywikibot.Namespace
"""
return self._link.namespace
@property
def content_model(self):
"""
Return the content model for this page.
If it cannot be reliably determined via the API,
None is returned.
"""
if not hasattr(self, '_contentmodel'):
self.site.loadpageinfo(self)
return self._contentmodel
@property
def depth(self):
"""Return the depth/subpage level of the page."""
if not hasattr(self, '_depth'):
# Check if the namespace allows subpages
if self.namespace().subpages:
self._depth = self.title().count('/')
else:
# Does not allow subpages, which means depth is always 0
self._depth = 0
return self._depth
@property
def pageid(self) -> int:
"""
Return pageid of the page.
:return: pageid or 0 if page does not exist
"""
if not hasattr(self, '_pageid'):
self.site.loadpageinfo(self)
return self._pageid
@deprecated_args(
savetitle='as_url', withNamespace='with_ns',
withSection='with_section', forceInterwiki='force_interwiki',
asUrl='as_url', asLink='as_link', allowInterwiki='allow_interwiki')
def title(self, *, underscore=False, with_ns=True,
with_section=True, as_url=False, as_link=False,
allow_interwiki=True, force_interwiki=False, textlink=False,
as_filename=False, insite=None, without_brackets=False) -> str:
"""
Return the title of this Page, as a string.
:param underscore: (not used with as_link) if true, replace all ' '
characters with '_'
:param with_ns: if false, omit the namespace prefix. If this
option is false and used together with as_link return a labeled
link like [[link|label]]
:param with_section: if false, omit the section
:param as_url: (not used with as_link) if true, quote title as if in an
URL
:param as_link: if true, return the title in the form of a wikilink
:param allow_interwiki: (only used if as_link is true) if true, format
the link as an interwiki link if necessary
:param force_interwiki: (only used if as_link is true) if true, always
format the link as an interwiki link
:param textlink: (only used if as_link is true) if true, place a ':'
before Category: and Image: links
:param as_filename: (not used with as_link) if true, replace any
characters that are unsafe in filenames
:param insite: (only used if as_link is true) a site object where the
title is to be shown. Default is the current family/lang given by
-family and -lang or -site option i.e. config.family and
config.mylang
:param without_brackets: (cannot be used with as_link) if true, remove
the last pair of brackets(usually removes disambiguation brackets).
"""
title = self._link.canonical_title()
label = self._link.title
if with_section and self.section():
section = '#' + self.section()
else:
section = ''
if as_link:
if insite:
target_code = insite.code
target_family = insite.family.name
else:
target_code = config.mylang
target_family = config.family
if force_interwiki \
or (allow_interwiki
and (self.site.family.name != target_family
or self.site.code != target_code)):
if self.site.family.name not in (
target_family, self.site.code):
title = '{site.family.name}:{site.code}:{title}'.format(
site=self.site, title=title)
else:
# use this form for sites like commons, where the
# code is the same as the family name
title = '{}:{}'.format(self.site.code, title)
elif textlink and (self.is_filepage() or self.is_categorypage()):
title = ':{}'.format(title)
elif self.namespace() == 0 and not section:
with_ns = True
if with_ns:
return '[[{}{}]]'.format(title, section)
return '[[{}{}|{}]]'.format(title, section, label)
if not with_ns and self.namespace() != 0:
title = label + section
else:
title += section
if without_brackets:
brackets_re = r'\s+\([^()]+?\)$'
title = re.sub(brackets_re, '', title)
if underscore or as_url:
title = title.replace(' ', '_')
if as_url:
encoded_title = title.encode(self.site.encoding())
title = quote_from_bytes(encoded_title, safe='')
if as_filename:
# Replace characters that are not possible in file names on some
# systems, but still are valid in MediaWiki titles:
# Unix: /
# MediaWiki: /:\
# Windows: /:\"?*
# Spaces are possible on most systems, but are bad for URLs.
for forbidden in ':*?/\\" ':
title = title.replace(forbidden, '_')
return title
@remove_last_args(('decode', 'underscore'))
def section(self) -> Optional[str]:
"""
Return the name of the section this Page refers to.
The section is the part of the title following a '#' character, if
any. If no section is present, return None.
"""
try:
section = self._link.section
except AttributeError:
section = None
return section
def __str__(self) -> str:
"""Return a string representation."""
return self.title(as_link=True, force_interwiki=True)
def __repr__(self) -> str:
"""Return a more complete string representation."""
return '{}({!r})'.format(self.__class__.__name__, self.title())
def _cmpkey(self):
"""
Key for comparison of Page objects.
Page objects are "equal" if and only if they are on the same site
and have the same normalized title, including section if any.
Page objects are sortable by site, namespace then title.
"""
return (self.site, self.namespace(), self.title())
def __hash__(self):
"""
A stable identifier to be used as a key in hash-tables.
This relies on the fact that the string
representation of an instance cannot change after the construction.
"""
return hash(self._cmpkey())
def full_url(self):
"""Return the full URL."""
return self.site.base_url(self.site.article_path
+ self.title(as_url=True))
def autoFormat(self):
"""
Return :py:obj:`date.getAutoFormat` dictName and value, if any.
Value can be a year, date, etc., and dictName is 'YearBC',
'Year_December', or another dictionary name. Please note that two
entries may have exactly the same autoFormat, but be in two
different namespaces, as some sites have categories with the
same names. Regular titles return (None, None).
"""
if not hasattr(self, '_autoFormat'):
from pywikibot import date
self._autoFormat = date.getAutoFormat(
self.site.lang,
self.title(with_ns=False)
)
return self._autoFormat
def isAutoTitle(self):
"""Return True if title of this Page is in the autoFormat dict."""
return self.autoFormat()[0] is not None
@remove_last_args(['sysop'])
def get(self, force: bool = False, get_redirect: bool = False) -> str:
"""Return the wiki-text of the page.
This will retrieve the page from the server if it has not been
retrieved yet, or if force is True. This can raise the following
exceptions that should be caught by the calling code:
:exception pywikibot.exceptions.NoPageError: The page does not exist
:exception pywikibot.exceptions.IsRedirectPageError: The page is a
redirect. The argument of the exception is the title of the page
it redirects to.
:exception pywikibot.exceptions.SectionError: The section does not
exist on a page with a # link
:param force: reload all page attributes, including errors.
:param get_redirect: return the redirect text, do not follow the
redirect, do not raise an exception.
"""
if force:
del self.latest_revision_id
if hasattr(self, '_bot_may_edit'):
del self._bot_may_edit
try:
self._getInternals()
except IsRedirectPageError:
if not get_redirect:
raise
return self.latest_revision.text
def _latest_cached_revision(self):
"""Get the latest revision if cached and has text, otherwise None."""
if (hasattr(self, '_revid') and self._revid in self._revisions
and self._revisions[self._revid].text is not None):
return self._revisions[self._revid]
return None
def _getInternals(self):
"""
Helper function for get().
Stores latest revision in self if it doesn't contain it, doesn't think.
* Raises exceptions from previous runs.
* Stores new exceptions in _getexception and raises them.
"""
# Raise exceptions from previous runs
if hasattr(self, '_getexception'):
raise self._getexception
# If not already stored, fetch revision
if self._latest_cached_revision() is None:
try:
self.site.loadrevisions(self, content=True)
except (NoPageError, SectionError) as e:
self._getexception = e
raise
# self._isredir is set by loadrevisions
if self._isredir:
self._getexception = IsRedirectPageError(self)
raise self._getexception
@remove_last_args(['sysop'])
def getOldVersion(self, oldid,
force: bool = False, get_redirect: bool = False) -> str:
"""
Return text of an old revision of this page; same options as get().
:param oldid: The revid of the revision desired.
"""
if force or oldid not in self._revisions \
or self._revisions[oldid].text is None:
self.site.loadrevisions(self,
content=True,
revids=oldid)
# TODO: what about redirects, errors?
return self._revisions[oldid].text
def permalink(self, oldid=None, percent_encoded: bool = True,
with_protocol: bool = False) -> str:
"""Return the permalink URL of an old revision of this page.
:param oldid: The revid of the revision desired.
:param percent_encoded: if false, the link will be provided
without title uncoded.
:param with_protocol: if true, http or https prefixes will be
included before the double slash.
"""
if percent_encoded:
title = self.title(as_url=True)
else:
title = self.title(as_url=False).replace(' ', '_')
return '{}//{}{}/index.php?title={}&oldid={}'.format(
self.site.protocol() + ':' if with_protocol else '',
self.site.hostname(),
self.site.scriptpath(),
title,
oldid if oldid is not None else self.latest_revision_id)
@property
def latest_revision_id(self):
"""Return the current revision id for this page."""
if not hasattr(self, '_revid'):
self.revisions()
return self._revid
@latest_revision_id.deleter
def latest_revision_id(self):
"""
Remove the latest revision id set for this Page.
All internal cached values specifically for the latest revision
of this page are cleared.
The following cached values are not cleared:
- text property
- page properties, and page coordinates
- lastNonBotUser
- isDisambig and isCategoryRedirect status
- langlinks, templates and deleted revisions
"""
# When forcing, we retry the page no matter what:
# * Old exceptions do not apply any more
# * Deleting _revid to force reload
# * Deleting _redirtarget, that info is now obsolete.
for attr in ['_redirtarget', '_getexception', '_revid']:
if hasattr(self, attr):
delattr(self, attr)
@latest_revision_id.setter
def latest_revision_id(self, value):
"""Set the latest revision for this Page."""
del self.latest_revision_id
self._revid = value
@property
def latest_revision(self):
"""Return the current revision for this page."""
rev = self._latest_cached_revision()
if rev is not None:
return rev
with suppress(StopIteration):
return next(self.revisions(content=True, total=1))
raise InvalidPageError(self)
@property
def text(self) -> str:
"""
Return the current (edited) wikitext, loading it if necessary.
:return: text of the page
"""
if getattr(self, '_text', None) is not None:
return self._text
try:
return self.get(get_redirect=True)
except NoPageError:
# TODO: what other exceptions might be returned?
return ''
@text.setter
def text(self, value: Optional[str]):
"""Update the current (edited) wikitext.
:param value: New value or None
"""
try:
self.botMayEdit() # T262136, T267770
except Exception as e:
# dry tests aren't able to make an API call
# but are rejected by an Exception; ignore it then.
if not str(e).startswith('DryRequest rejecting request:'):
raise
del self.text
self._text = None if value is None else str(value)
@text.deleter
def text(self):
"""Delete the current (edited) wikitext."""
if hasattr(self, '_text'):
del self._text
if hasattr(self, '_expanded_text'):
del self._expanded_text
if hasattr(self, '_raw_extracted_templates'):
del self._raw_extracted_templates
def preloadText(self) -> str:
"""
The text returned by EditFormPreloadText.
See API module "info".
Application: on Wikisource wikis, text can be preloaded even if
a page does not exist, if an Index page is present.
"""
self.site.loadpageinfo(self, preload=True)
return self._preloadedtext
def _get_parsed_page(self):
"""Retrieve parsed text (via action=parse) and cache it."""
# Get (cached) parsed text.
if not hasattr(self, '_parsed_text'):
self._parsed_text = self.site.get_parsed_page(self)
return self._parsed_text
def properties(self, force=False) -> dict:
"""
Return the properties of the page.
:param force: force updating from the live site
"""
if not hasattr(self, '_pageprops') or force:
self._pageprops = {} # page may not have pageprops (see T56868)
self.site.loadpageprops(self)
return self._pageprops
def defaultsort(self, force=False) -> Optional[str]:
"""
Extract value of the {{DEFAULTSORT:}} magic word from the page.
:param force: force updating from the live site
"""
return self.properties(force=force).get('defaultsort')
@deprecate_arg('refresh', 'force')
def expand_text(self, force=False, includecomments=False) -> str:
"""Return the page text with all templates and parser words expanded.
:param force: force updating from the live site
:param includecomments: Also strip comments if includecomments
parameter is not True.
"""
if not hasattr(self, '_expanded_text') or (
self._expanded_text is None) or force:
if not self.text:
self._expanded_text = ''
return ''
self._expanded_text = self.site.expand_text(
self.text,
title=self.title(with_section=False),
includecomments=includecomments)
return self._expanded_text
def userName(self) -> str:
"""Return name or IP address of last user to edit page."""
return self.latest_revision.user
def isIpEdit(self) -> bool:
"""Return True if last editor was unregistered."""
return self.latest_revision.anon
def lastNonBotUser(self) -> str:
"""
Return name or IP address of last human/non-bot user to edit page.
Determine the most recent human editor out of the last revisions.
If it was not able to retrieve a human user, returns None.
If the edit was done by a bot which is no longer flagged as 'bot',
i.e. which is not returned by Site.botusers(), it will be returned
as a non-bot edit.
"""
if hasattr(self, '_lastNonBotUser'):
return self._lastNonBotUser
self._lastNonBotUser = None
for entry in self.revisions():
if entry.user and (not self.site.isBot(entry.user)):
self._lastNonBotUser = entry.user
break
return self._lastNonBotUser
@remove_last_args(('datetime', ))
def editTime(self):
"""Return timestamp of last revision to page.
:rtype: pywikibot.Timestamp
"""
return self.latest_revision.timestamp
def exists(self) -> bool:
"""Return True if page exists on the wiki, even if it's a redirect.
If the title includes a section, return False if this section isn't
found.
"""
with suppress(AttributeError):
return self.pageid > 0
raise InvalidPageError(self)
@property
def oldest_revision(self):
"""
Return the first revision of this page.
:rtype: :py:obj:`Revision`
"""
return next(self.revisions(reverse=True, total=1))
def isRedirectPage(self):
"""Return True if this is a redirect, False if not or not existing."""
return self.site.page_isredirect(self)
def isStaticRedirect(self, force: bool = False) -> bool:
"""
Determine whether the page is a static redirect.
A static redirect must be a valid redirect, and contain the magic
word __STATICREDIRECT__.
:param force: Bypass local caching
"""
if self.isRedirectPage():
static_keys = self.site.getmagicwords('staticredirect')
text = self.get(get_redirect=True, force=force)
if static_keys:
for key in static_keys:
if key in text:
return True
return False
def isCategoryRedirect(self) -> bool:
"""Return True if this is a category redirect page, False otherwise."""
if not self.is_categorypage():
return False
if not hasattr(self, '_catredirect'):
self._catredirect = False
catredirs = self.site.category_redirects()
for template, args in self.templatesWithParams():
if template.title(with_ns=False) in catredirs:
if args:
# Get target (first template argument)
p = pywikibot.Page(
self.site, args[0].strip(), Namespace.CATEGORY)
if p.namespace() == Namespace.CATEGORY:
self._catredirect = p.title()
else:
pywikibot.warning(
'Category redirect target {} on {} is not a '
'category'.format(p.title(as_link=True),
self.title(as_link=True)))
else:
pywikibot.warning(
'No target found for category redirect on '
+ self.title(as_link=True))
break
return bool(self._catredirect)
def getCategoryRedirectTarget(self):
"""
If this is a category redirect, return the target category title.
:rtype: pywikibot.page.Category
"""
if self.isCategoryRedirect():
return Category(Link(self._catredirect, self.site))
raise IsNotRedirectPageError(self)
def isTalkPage(self):
"""Return True if this page is in any talk namespace."""
ns = self.namespace()
return ns >= 0 and ns % 2 == 1
def toggleTalkPage(self):
"""
Return other member of the article-talk page pair for this Page.
If self is a talk page, returns the associated content page;
otherwise, returns the associated talk page. The returned page need
not actually exist on the wiki.
:return: Page or None if self is a special page.
:rtype: typing.Optional[pywikibot.Page]
"""
ns = self.namespace()
if ns < 0: # Special page
return None
title = self.title(with_ns=False)
new_ns = ns + (1, -1)[self.isTalkPage()]
return Page(self.site,
'{}:{}'.format(self.site.namespace(new_ns), title))
def is_categorypage(self):
"""Return True if the page is a Category, False otherwise."""
return self.namespace() == 14
def is_filepage(self):
"""Return True if this is a file description page, False otherwise."""
return self.namespace() == 6
@remove_last_args(['get_Index'])
def isDisambig(self) -> bool:
"""
Return True if this is a disambiguation page, False otherwise.
By default, it uses the the Disambiguator extension's result. The
identification relies on the presence of the __DISAMBIG__ magic word
which may also be transcluded.
If the Disambiguator extension isn't activated for the given site,
the identification relies on the presence of specific templates.
First load a list of template names from the Family file;
if the value in the Family file is None or no entry was made, look for
the list on [[MediaWiki:Disambiguationspage]]. If this page does not
exist, take the MediaWiki message. 'Template:Disambig' is always
assumed to be default, and will be appended regardless of its
existence.
"""
if self.site.has_extension('Disambiguator'):
# If the Disambiguator extension is loaded, use it
return 'disambiguation' in self.properties()
if not hasattr(self.site, '_disambigtemplates'):
try:
default = set(self.site.family.disambig('_default'))
except KeyError:
default = {'Disambig'}
try:
distl = self.site.family.disambig(self.site.code,
fallback=False)
except KeyError:
distl = None
if distl is None:
disambigpages = Page(self.site,
'MediaWiki:Disambiguationspage')
if disambigpages.exists():
disambigs = {link.title(with_ns=False)
for link in disambigpages.linkedPages()
if link.namespace() == 10}
elif self.site.has_mediawiki_message('disambiguationspage'):
message = self.site.mediawiki_message(
'disambiguationspage').split(':', 1)[1]
# add the default template(s) for default mw message
# only
disambigs = {first_upper(message)} | default
else:
disambigs = default
self.site._disambigtemplates = disambigs
else:
# Normalize template capitalization
self.site._disambigtemplates = {first_upper(t) for t in distl}
templates = {tl.title(with_ns=False) for tl in self.templates()}
disambigs = set()
# always use cached disambig templates
disambigs.update(self.site._disambigtemplates)
# see if any template on this page is in the set of disambigs
disambig_in_page = disambigs.intersection(templates)
return self.namespace() != 10 and bool(disambig_in_page)
@deprecated_args(withTemplateInclusion='with_template_inclusion',
onlyTemplateInclusion='only_template_inclusion',
redirectsOnly='filter_redirects')
def getReferences(self,
follow_redirects: bool = True,
with_template_inclusion: bool = True,
only_template_inclusion: bool = False,
filter_redirects: bool = False,
namespaces=None,
total: Optional[int] = None,
content: bool = False):
"""
Return an iterator all pages that refer to or embed the page.
If you need a full list of referring pages, use
``pages = list(s.getReferences())``
:param follow_redirects: if True, also iterate pages that link to a
redirect pointing to the page.
:param with_template_inclusion: if True, also iterate pages where self
is used as a template.
:param only_template_inclusion: if True, only iterate pages where self
is used as a template.
:param filter_redirects: if True, only iterate redirects to self.
:param namespaces: only iterate pages in these namespaces
:param total: iterate no more than this number of pages in total
:param content: if True, retrieve the content of the current version
of each referring page (default False)
:rtype: typing.Iterable[pywikibot.Page]
"""
# N.B.: this method intentionally overlaps with backlinks() and
# embeddedin(). Depending on the interface, it may be more efficient
# to implement those methods in the site interface and then combine
# the results for this method, or to implement this method and then
# split up the results for the others.
return self.site.pagereferences(
self,
follow_redirects=follow_redirects,
filter_redirects=filter_redirects,
with_template_inclusion=with_template_inclusion,
only_template_inclusion=only_template_inclusion,
namespaces=namespaces,
total=total,
content=content
)
@deprecated_args(followRedirects='follow_redirects',
filterRedirects='filter_redirects')
def backlinks(self,
follow_redirects: bool = True,
filter_redirects: Optional[bool] = None,
namespaces=None,
total: Optional[int] = None,
content: bool = False):
"""
Return an iterator for pages that link to this page.
:param follow_redirects: if True, also iterate pages that link to a
redirect pointing to the page.
:param filter_redirects: if True, only iterate redirects; if False,
omit redirects; if None, do not filter
:param namespaces: only iterate pages in these namespaces
:param total: iterate no more than this number of pages in total
:param content: if True, retrieve the content of the current version
of each referring page (default False)
"""
return self.site.pagebacklinks(
self,
follow_redirects=follow_redirects,
filter_redirects=filter_redirects,
namespaces=namespaces,
total=total,
content=content
)
def embeddedin(self,
filter_redirects: Optional[bool] = None,
namespaces=None,
total: Optional[int] = None,
content: bool = False):
"""
Return an iterator for pages that embed this page as a template.
:param filter_redirects: if True, only iterate redirects; if False,
omit redirects; if None, do not filter
:param namespaces: only iterate pages in these namespaces
:param total: iterate no more than this number of pages in total
:param content: if True, retrieve the content of the current version
of each embedding page (default False)
"""
return self.site.page_embeddedin(
self,
filter_redirects=filter_redirects,
namespaces=namespaces,
total=total,
content=content
)
def protection(self) -> dict:
"""Return a dictionary reflecting page protections."""
return self.site.page_restrictions(self)
def applicable_protections(self) -> set:
"""
Return the protection types allowed for that page.
If the page doesn't exist it only returns "create". Otherwise it
returns all protection types provided by the site, except "create".
It also removes "upload" if that page is not in the File namespace.
It is possible, that it returns an empty set, but only if original
protection types were removed.
:return: set of str
"""
# New API since commit 32083235eb332c419df2063cf966b3400be7ee8a
if self.site.mw_version >= '1.25wmf14':
self.site.loadpageinfo(self)
return self._applicable_protections
p_types = set(self.site.protection_types())
if not self.exists():
return {'create'} if 'create' in p_types else set()
p_types.remove('create') # no existing page allows that
if not self.is_filepage(): # only file pages allow upload
p_types.remove('upload')
return p_types
def has_permission(self, action: str = 'edit') -> bool:
"""Determine whether the page can be modified.
Return True if the bot has the permission of needed restriction level
for the given action type.
:param action: a valid restriction type like 'edit', 'move'
:raises ValueError: invalid action parameter
"""
return self.site.page_can_be_edited(self, action)
@deprecated("Page.has_permission('edit')", since='20200208')
def canBeEdited(self): # pragma: no cover
"""DEPRECATED. Determine whether the page may be edited."""
return self.has_permission()
def botMayEdit(self) -> bool:
"""
Determine whether the active bot is allowed to edit the page.
This will be True if the page doesn't contain {{bots}} or {{nobots}}
or any other template from edit_restricted_templates list
in x_family.py file, or it contains them and the active bot is allowed
to edit this page. (This method is only useful on those sites that
recognize the bot-exclusion protocol; on other sites, it will always
return True.)
The framework enforces this restriction by default. It is possible
to override this by setting ignore_bot_templates=True in
user-config.py, or using page.put(force=True).
"""
if not hasattr(self, '_bot_may_edit'):
self._bot_may_edit = self._check_bot_may_edit()
return self._bot_may_edit
def _check_bot_may_edit(self, module: Optional[str] = None) -> bool:
"""A botMayEdit helper method.
@param module: The module name to be restricted. Defaults to
pywikibot.calledModuleName().
"""
if not hasattr(self, 'templatesWithParams'):
return True
if config.ignore_bot_templates: # Check the "master ignore switch"
return True
username = self.site.username()
try:
templates = self.templatesWithParams()
except (NoPageError, IsRedirectPageError, SectionError):
return True
# go through all templates and look for any restriction
restrictions = set(self.site.get_edit_restricted_templates())
if module is None:
module = pywikibot.calledModuleName()
# also add archive templates for non-archive bots
if module != 'archivebot':
restrictions.update(self.site.get_archived_page_templates())
# multiple bots/nobots templates are allowed
for template, params in templates:
title = template.title(with_ns=False)
if title in restrictions:
return False
if title not in ('Bots', 'Nobots'):
continue
try:
key, sep, value = params[0].partition('=')
except IndexError:
key, sep, value = '', '', ''
names = set()
else:
if not sep:
key, value = value, key
key = key.strip()
names = {name.strip() for name in value.split(',')}
if len(params) > 1:
pywikibot.warning(
'{{%s|%s}} has more than 1 parameter; taking the first.'
% (title.lower(), '|'.join(params)))
if title == 'Nobots':
if not params:
return False
if key:
pywikibot.error(
'%s parameter for {{nobots}} is not allowed. '
'Edit declined' % key)
return False
if 'all' in names or module in names or username in names:
return False
if title == 'Bots':
if value and not key:
pywikibot.warning(
'{{bots|%s}} is not valid. Ignoring.' % value)
continue
if key and not value:
pywikibot.warning(
'{{bots|%s=}} is not valid. Ignoring.' % key)
continue
if key == 'allow':
if not ('all' in names or username in names):
return False
elif key == 'deny':
if 'all' in names or username in names:
return False
elif key == 'allowscript':
if not ('all' in names or module in names):
return False
elif key == 'denyscript':
if 'all' in names or module in names:
return False
elif key: # ignore unrecognized keys with a warning
pywikibot.warning(
'{{bots|%s}} is not valid. Ignoring.' % params[0])
# no restricting template found
return True
@deprecate_arg('async', 'asynchronous') # T106230
@deprecated_args(comment='summary')
def save(self,
summary: Optional[str] = None,
watch: Union[str, bool, None] = None,
minor: bool = True,
botflag: Optional[bool] = None,
force: bool = False,
asynchronous: bool = False,
callback=None, apply_cosmetic_changes=None,
quiet: bool = False, **kwargs):
"""
Save the current contents of page's text to the wiki.
:param summary: The edit summary for the modification (optional, but
most wikis strongly encourage its use)
:param watch: Specify how the watchlist is affected by this edit, set
to one of "watch", "unwatch", "preferences", "nochange":
* watch: add the page to the watchlist
* unwatch: remove the page from the watchlist
* preferences: use the preference settings (Default)
* nochange: don't change the watchlist
If None (default), follow bot account's default settings
For backward compatibility watch parameter may also be boolean:
if True, add or if False, remove this Page to/from bot
user's watchlist.
:type watch: str, bool (deprecated) or None
:param minor: if True, mark this edit as minor
:param botflag: if True, mark this edit as made by a bot (default:
True if user has bot status, False if not)
:param force: if True, ignore botMayEdit() setting
:param asynchronous: if True, launch a separate thread to save
asynchronously
:param callback: a callable object that will be called after the
page put operation. This object must take two arguments: (1) a
Page object, and (2) an exception instance, which will be None
if the page was saved successfully. The callback is intended for
use by bots that need to keep track of which saves were
successful.
:param apply_cosmetic_changes: Overwrites the cosmetic_changes
configuration value to this value unless it's None.
:type apply_cosmetic_changes: bool or None
:param quiet: enable/disable successful save operation message;
defaults to False.
In asynchronous mode, if True, it is up to the calling bot to
manage the output e.g. via callback.
"""
if not summary:
summary = config.default_edit_summary
if watch is True:
watch = 'watch'
elif watch is False:
watch = 'unwatch'
if not force and not self.botMayEdit():
raise OtherPageSaveError(
self, 'Editing restricted by {{bots}}, {{nobots}} '
"or site's equivalent of {{in use}} template")
self._save(summary=summary, watch=watch, minor=minor, botflag=botflag,
asynchronous=asynchronous, callback=callback,
cc=apply_cosmetic_changes, quiet=quiet, **kwargs)
@allow_asynchronous
def _save(self, summary=None, watch=None, minor=True, botflag=None,
cc=None, quiet=False, **kwargs):
"""Helper function for save()."""
link = self.title(as_link=True)
if cc or (cc is None and config.cosmetic_changes):
summary = self._cosmetic_changes_hook(summary)
done = self.site.editpage(self, summary=summary, minor=minor,
watch=watch, bot=botflag, **kwargs)
if not done:
if not quiet:
pywikibot.warning('Page {} not saved'.format(link))
raise PageSaveRelatedError(self)
if not quiet:
pywikibot.output('Page {} saved'.format(link))
def _cosmetic_changes_hook(self, summary: str) -> str:
"""The cosmetic changes hook.
:param summary: The current edit summary.
:return: Modified edit summary if cosmetic changes has been done,
else the old edit summary.
"""
if self.isTalkPage() or self.content_model != 'wikitext' or \
pywikibot.calledModuleName() in config.cosmetic_changes_deny_script:
return summary
# check if cosmetic_changes is enabled for this page
family = self.site.family.name
if config.cosmetic_changes_mylang_only:
cc = ((family == config.family and self.site.lang == config.mylang)
or self.site.lang in config.cosmetic_changes_enable.get(
family, []))
else:
cc = True
cc = cc and self.site.lang not in config.cosmetic_changes_disable.get(
family, [])
cc = cc and self._check_bot_may_edit('cosmetic_changes')
if not cc:
return summary
old = self.text
pywikibot.log('Cosmetic changes for {}-{} enabled.'
.format(family, self.site.lang))
# cc depends on page directly and via several other imports
from pywikibot.cosmetic_changes import (
CANCEL,
CosmeticChangesToolkit,
)
cc_toolkit = CosmeticChangesToolkit(self, ignore=CANCEL.MATCH)
self.text = cc_toolkit.change(old)
if summary and old.strip().replace(
'\r\n', '\n') != self.text.strip().replace('\r\n', '\n'):
summary += i18n.twtranslate(self.site, 'cosmetic_changes-append',
fallback_prompt='; cosmetic changes')
return summary
@deprecate_arg('async', 'asynchronous') # T106230
@deprecated_args(comment='summary', watchArticle='watch',
minorEdit='minor')
def put(self, newtext, summary=None, watch=None, minor=True, botflag=None,
force=False, asynchronous=False, callback=None, **kwargs):
"""
Save the page with the contents of the first argument as the text.
This method is maintained primarily for backwards-compatibility.
For new code, using Page.save() is preferred. See save() method
docs for all parameters not listed here.
:param newtext: The complete text of the revised page.
:type newtext: str
"""
self.text = newtext
self.save(summary=summary, watch=watch, minor=minor, botflag=botflag,
force=force, asynchronous=asynchronous, callback=callback,
**kwargs)
@deprecated('put(asynchronous=True) or save(asynchronous=True)',
since='20180501')
@deprecated_args(comment='summary', watchArticle='watch',
minorEdit='minor')
def put_async(self, newtext, summary=None, watch=None, minor=True,
botflag=None, force=False, callback=None,
**kwargs): # pragma: no cover
"""
Put page on queue to be saved to wiki asynchronously.
Asynchronous version of put (takes the same arguments), which places
pages on a queue to be saved by a daemon thread. All arguments are
the same as for .put(). This version is maintained solely for
backwards-compatibility.
"""
self.put(newtext, summary=summary, watch=watch,
minor=minor, botflag=botflag, force=force,
asynchronous=True, callback=callback, **kwargs)
def watch(self, unwatch: bool = False) -> bool:
"""
Add or remove this page to/from bot account's watchlist.
:param unwatch: True to unwatch, False (default) to watch.
:return: True if successful, False otherwise.
"""
return self.site.watch(self, unwatch)
def clear_cache(self):
"""Clear the cached attributes of the page."""
self._revisions = {}
for attr in self._cache_attrs:
with suppress(AttributeError):
delattr(self, attr)
def purge(self, **kwargs) -> bool:
"""
Purge the server's cache for this page.
:keyword redirects: Automatically resolve redirects.
:type redirects: bool
:keyword converttitles: Convert titles to other variants if necessary.
Only works if the wiki's content language supports variant
conversion.
:type converttitles: bool
:keyword forcelinkupdate: Update the links tables.
:type forcelinkupdate: bool
:keyword forcerecursivelinkupdate: Update the links table, and update
the links tables for any page that uses this page as a template.
:type forcerecursivelinkupdate: bool
"""
self.clear_cache()
return self.site.purgepages([self], **kwargs)
def touch(self, callback=None, botflag=False, **kwargs):
"""
Make a touch edit for this page.
See save() method docs for all parameters.
The following parameters will be overridden by this method:
- summary, watch, minor, force, asynchronous
Parameter botflag is False by default.
minor and botflag parameters are set to False which prevents hiding
the edit when it becomes a real edit due to a bug.
:note: This discards content saved to self.text.
"""
if self.exists():
# ensure always get the page text and not to change it.
del self.text
summary = i18n.twtranslate(self.site, 'pywikibot-touch',
fallback_prompt='Pywikibot touch edit')
self.save(summary=summary, watch='nochange',
minor=False, botflag=botflag, force=True,
asynchronous=False, callback=callback,
apply_cosmetic_changes=False, nocreate=True, **kwargs)
else:
raise NoPageError(self)
def linkedPages(self, namespaces=None,
total: Optional[int] = None,
content: bool = False):
"""
Iterate Pages that this Page links to.
Only returns pages from "normal" internal links. Image and category
links are omitted unless prefixed with ":". Embedded templates are
omitted (but links within them are returned). All interwiki and
external links are omitted.
:param namespaces: only iterate links in these namespaces
:param namespaces: int, or list of ints
:param total: iterate no more than this number of pages in total
:param content: if True, retrieve the content of the current version
of each linked page (default False)
:return: a generator that yields Page objects.
:rtype: generator
"""
return self.site.pagelinks(self, namespaces=namespaces,
total=total, content=content)
def interwiki(self, expand=True):
"""
Iterate interwiki links in the page text, excluding language links.
:param expand: if True (default), include interwiki links found in
templates transcluded onto this page; if False, only iterate
interwiki links found in this page's own wikitext
:type expand: bool
:return: a generator that yields Link objects
:rtype: generator
"""
# This function does not exist in the API, so it has to be
# implemented by screen-scraping
if expand:
text = self.expand_text()
else:
text = self.text
for linkmatch in pywikibot.link_regex.finditer(
textlib.removeDisabledParts(text)):
linktitle = linkmatch.group('title')
link = Link(linktitle, self.site)
# only yield links that are to a different site and that
# are not language links
try:
if link.site != self.site:
if linktitle.lstrip().startswith(':'):
# initial ":" indicates not a language link
yield link
elif link.site.family != self.site.family:
# link to a different family is not a language link
yield link
except Error:
# ignore any links with invalid contents
continue
def langlinks(self, include_obsolete=False) -> list:
"""
Return a list of all inter-language Links on this page.
:param include_obsolete: if true, return even Link objects whose site
is obsolete
:type include_obsolete: bool
:return: list of Link objects.
"""
# Note: We preload a list of *all* langlinks, including links to
# obsolete sites, and store that in self._langlinks. We then filter
# this list if the method was called with include_obsolete=False
# (which is the default)
if not hasattr(self, '_langlinks'):
self._langlinks = list(self.iterlanglinks(include_obsolete=True))
if include_obsolete:
return self._langlinks
return [i for i in self._langlinks if not i.site.obsolete]
def iterlanglinks(self,
total: Optional[int] = None,
include_obsolete: bool = False):
"""Iterate all inter-language links on this page.
:param total: iterate no more than this number of pages in total
:param include_obsolete: if true, yield even Link object whose site
is obsolete
:return: a generator that yields Link objects.
:rtype: generator
"""
if hasattr(self, '_langlinks'):
return iter(self.langlinks(include_obsolete=include_obsolete))
# XXX We might want to fill _langlinks when the Site
# method is called. If we do this, we'll have to think
# about what will happen if the generator is not completely
# iterated upon.
return self.site.pagelanglinks(self, total=total,
include_obsolete=include_obsolete)
def data_item(self):
"""
Convenience function to get the Wikibase item of a page.
:rtype: pywikibot.page.ItemPage
"""
return ItemPage.fromPage(self)
def templates(self, content: bool = False):
"""
Return a list of Page objects for templates used on this Page.
Template parameters are ignored. This method only returns embedded
templates, not template pages that happen to be referenced through
a normal link.
:param content: if True, retrieve the content of the current version
of each template (default False)
:param content: bool
"""
# Data might have been preloaded
if not hasattr(self, '_templates'):
self._templates = list(self.itertemplates(content=content))
return self._templates
def itertemplates(self,
total: Optional[int] = None,
content: bool = False):
"""
Iterate Page objects for templates used on this Page.
Template parameters are ignored. This method only returns embedded
templates, not template pages that happen to be referenced through
a normal link.
:param total: iterate no more than this number of pages in total
:param content: if True, retrieve the content of the current version
of each template (default False)
:param content: bool
"""
if hasattr(self, '_templates'):
return iter(self._templates)
return self.site.pagetemplates(self, total=total, content=content)
def imagelinks(self, total: Optional[int] = None, content: bool = False):
"""
Iterate FilePage objects for images displayed on this Page.
:param total: iterate no more than this number of pages in total
:param content: if True, retrieve the content of the current version
of each image description page (default False)
:return: a generator that yields FilePage objects.
"""
return self.site.pageimages(self, total=total, content=content)
@deprecated_args(withSortKey='with_sort_key')
def categories(self,
with_sort_key: bool = False,
total: Optional[int] = None,
content: bool = False):
"""
Iterate categories that the article is in.
:param with_sort_key: if True, include the sort key in each Category.
:param total: iterate no more than this number of pages in total
:param content: if True, retrieve the content of the current version
of each category description page (default False)
:return: a generator that yields Category objects.
:rtype: generator
"""
# FIXME: bug T75561: with_sort_key is ignored by Site.pagecategories
if with_sort_key:
raise NotImplementedError('with_sort_key is not implemented')
return self.site.pagecategories(self, total=total, content=content)
def extlinks(self, total: Optional[int] = None):
"""
Iterate all external URLs (not interwiki links) from this page.
:param total: iterate no more than this number of pages in total
:return: a generator that yields str objects containing URLs.
:rtype: generator
"""
return self.site.page_extlinks(self, total=total)
def coordinates(self, primary_only=False):
"""
Return a list of Coordinate objects for points on the page.
Uses the MediaWiki extension GeoData.
:param primary_only: Only return the coordinate indicated to be primary
:return: A list of Coordinate objects or a single Coordinate if
primary_only is True
:rtype: list of Coordinate or Coordinate or None
"""
if not hasattr(self, '_coords'):
self._coords = []
self.site.loadcoordinfo(self)
if primary_only:
for coord in self._coords:
if coord.primary:
return coord
return None
return list(self._coords)
def page_image(self):
"""
Return the most appropriate image on the page.
Uses the MediaWiki extension PageImages.
:return: A FilePage object
:rtype: pywikibot.page.FilePage
"""
if not hasattr(self, '_pageimage'):
self._pageimage = None
self.site.loadpageimage(self)
return self._pageimage
def getRedirectTarget(self):
"""
Return a Page object for the target this Page redirects to.
If this page is not a redirect page, will raise an
IsNotRedirectPageError. This method also can raise a NoPageError.
:rtype: pywikibot.Page
"""
return self.site.getredirtarget(self)
def moved_target(self):
"""
Return a Page object for the target this Page was moved to.
If this page was not moved, it will raise a NoMoveTargetError.
This method also works if the source was already deleted.
:rtype: pywikibot.page.Page
:raises pywikibot.exceptions.NoMoveTargetError: page was not moved
"""
gen = iter(self.site.logevents(logtype='move', page=self, total=1))
try:
lastmove = next(gen)
except StopIteration:
raise NoMoveTargetError(self)
else:
return lastmove.target_page
@deprecated_args(getText='content', reverseOrder='reverse')
def revisions(self,
reverse: bool = False,
total: Optional[int] = None,
content: bool = False,
starttime=None, endtime=None):
"""Generator which loads the version history as Revision instances."""
# TODO: Only request uncached revisions
self.site.loadrevisions(self, content=content, rvdir=reverse,
starttime=starttime, endtime=endtime,
total=total)
return (self._revisions[rev] for rev in
sorted(self._revisions, reverse=not reverse)[:total])
@deprecated_args(reverseOrder='reverse')
def getVersionHistoryTable(self,
reverse: bool = False,
total: Optional[int] = None):
"""Return the version history as a wiki table."""
result = '{| class="wikitable"\n'
result += '! oldid || date/time || username || edit summary\n'
for entry in self.revisions(reverse=reverse, total=total):
result += '|----\n'
result += ('| {r.revid} || {r.timestamp} || {r.user} || '
'<nowiki>{r.comment}</nowiki>\n'.format(r=entry))
result += '|}\n'
return result
def contributors(self,
total: Optional[int] = None,
starttime=None, endtime=None):
"""
Compile contributors of this page with edit counts.
:param total: iterate no more than this number of revisions in total
:param starttime: retrieve revisions starting at this Timestamp
:param endtime: retrieve revisions ending at this Timestamp
:return: number of edits for each username
:rtype: :py:obj:`collections.Counter`
"""
return Counter(rev.user for rev in
self.revisions(total=total,
starttime=starttime, endtime=endtime))
def revision_count(self, contributors=None) -> int:
"""Determine number of edits from contributors.
:param contributors: contributor usernames
:type contributors: iterable of str or pywikibot.User,
a single pywikibot.User, a str or None
:return: number of edits for all provided usernames
"""
cnt = self.contributors()
if not contributors:
return sum(cnt.values())
if isinstance(contributors, User):
contributors = contributors.username
if isinstance(contributors, str):
return cnt[contributors]
return sum(cnt[user.username] if isinstance(user, User) else cnt[user]
for user in contributors)
def merge_history(self, dest, timestamp=None, reason=None):
"""
Merge revisions from this page into another page.
See :py:obj:`APISite.merge_history` for details.
:param dest: Destination page to which revisions will be merged
:type dest: pywikibot.Page
:param timestamp: Revisions from this page dating up to this timestamp
will be merged into the destination page (if not given or False,
all revisions will be merged)
:type timestamp: pywikibot.Timestamp
:param reason: Optional reason for the history merge
:type reason: str
"""
self.site.merge_history(self, dest, timestamp, reason)
@deprecated_args(deleteAndMove='noredirect', movetalkpage='movetalk')
@remove_last_args(['safe'])
def move(self,
newtitle: str,
reason: Optional[str] = None,
movetalk: bool = True,
noredirect: bool = False):
"""
Move this page to a new title.
:param newtitle: The new page title.
:param reason: The edit summary for the move.
:param movetalk: If true, move this page's talk page (if it exists)
:param noredirect: if move succeeds, delete the old page
(usually requires sysop privileges, depending on wiki settings)
"""
if reason is None:
pywikibot.output('Moving {} to [[{}]].'
.format(self.title(as_link=True), newtitle))
reason = pywikibot.input('Please enter a reason for the move:')
return self.site.movepage(self, newtitle, reason,
movetalk=movetalk,
noredirect=noredirect)
@deprecate_arg('quit', 'automatic_quit')
def delete(self,
reason: Optional[str] = None,
prompt: bool = True,
mark: bool = False,
automatic_quit: bool = False):
"""
Delete the page from the wiki. Requires administrator status.
:param reason: The edit summary for the deletion, or rationale
for deletion if requesting. If None, ask for it.
:param prompt: If true, prompt user for confirmation before deleting.
:param mark: If true, and user does not have sysop rights, place a
speedy-deletion request on the page instead. If false, non-sysops
will be asked before marking pages for deletion.
:param automatic_quit: show also the quit option, when asking
for confirmation.
"""
if reason is None:
pywikibot.output('Deleting {}.'.format(self.title(as_link=True)))
reason = pywikibot.input('Please enter a reason for the deletion:')
# If user has 'delete' right, delete the page
if self.site.has_right('delete'):
answer = 'y'
if prompt and not hasattr(self.site, '_noDeletePrompt'):
answer = pywikibot.input_choice(
'Do you want to delete {}?'.format(self.title(
as_link=True, force_interwiki=True)),
[('Yes', 'y'), ('No', 'n'), ('All', 'a')],
'n', automatic_quit=automatic_quit)
if answer == 'a':
answer = 'y'
self.site._noDeletePrompt = True
if answer == 'y':
self.site.delete(self, reason)
return
# Otherwise mark it for deletion
if mark or hasattr(self.site, '_noMarkDeletePrompt'):
answer = 'y'
else:
answer = pywikibot.input_choice(
"Can't delete {}; do you want to mark it for deletion instead?"
.format(self),
[('Yes', 'y'), ('No', 'n'), ('All', 'a')],
'n', automatic_quit=False)
if answer == 'a':
answer = 'y'
self.site._noMarkDeletePrompt = True
if answer == 'y':
template = '{{delete|1=%s}}\n' % reason
# We can't add templates in a wikidata item, so let's use its
# talk page
if isinstance(self, pywikibot.ItemPage):
target = self.toggleTalkPage()
else:
target = self
target.text = template + target.text
target.save(summary=reason)
def has_deleted_revisions(self) -> bool:
"""Return True if the page has deleted revisions.
*New in version 4.2.*
"""
if not hasattr(self, '_has_deleted_revisions'):
gen = self.site.deletedrevs(self, total=1, prop=['ids'])
self._has_deleted_revisions = bool(list(gen))
return self._has_deleted_revisions
def loadDeletedRevisions(self, total: Optional[int] = None, **kwargs):
"""
Retrieve deleted revisions for this Page.
Stores all revisions' timestamps, dates, editors and comments in
self._deletedRevs attribute.
:return: iterator of timestamps (which can be used to retrieve
revisions later on).
:rtype: generator
"""
if not hasattr(self, '_deletedRevs'):
self._deletedRevs = {}
for item in self.site.deletedrevs(self, total=total, **kwargs):
for rev in item.get('revisions', []):
self._deletedRevs[rev['timestamp']] = rev
yield rev['timestamp']
@deprecated_args(retrieveText='content')
def getDeletedRevision(self, timestamp, content=False, **kwargs) -> List:
"""
Return a particular deleted revision by timestamp.
:return: a list of [date, editor, comment, text, restoration
marker]. text will be None, unless content is True (or has
been retrieved earlier). If timestamp is not found, returns
empty list.
"""
if hasattr(self, '_deletedRevs'):
if timestamp in self._deletedRevs and (
not content
or 'content' in self._deletedRevs[timestamp]):
return self._deletedRevs[timestamp]
for item in self.site.deletedrevs(self, start=timestamp,
content=content, total=1, **kwargs):
# should only be one item with one revision
if item['title'] == self.title():
if 'revisions' in item:
return item['revisions'][0]
return []
def markDeletedRevision(self, timestamp, undelete=True):
"""
Mark the revision identified by timestamp for undeletion.
:param undelete: if False, mark the revision to remain deleted.
:type undelete: bool
"""
if not hasattr(self, '_deletedRevs'):
self.loadDeletedRevisions()
if timestamp not in self._deletedRevs:
raise ValueError(
'Timestamp {} is not a deleted revision'
.format(timestamp))
self._deletedRevs[timestamp]['marked'] = undelete
@deprecated_args(comment='reason')
def undelete(self, reason: Optional[str] = None):
"""
Undelete revisions based on the markers set by previous calls.
If no calls have been made since loadDeletedRevisions(), everything
will be restored.
Simplest case::
Page(...).undelete('This will restore all revisions')
More complex::
pg = Page(...)
revs = pg.loadDeletedRevisions()
for rev in revs:
if ... #decide whether to undelete a revision
pg.markDeletedRevision(rev) #mark for undeletion
pg.undelete('This will restore only selected revisions.')
:param reason: Reason for the action.
"""
if hasattr(self, '_deletedRevs'):
undelete_revs = [ts for ts, rev in self._deletedRevs.items()
if 'marked' in rev and rev['marked']]
else:
undelete_revs = []
if reason is None:
warn('Not passing a reason for undelete() is deprecated.',
DeprecationWarning)
pywikibot.output('Undeleting {}.'.format(self.title(as_link=True)))
reason = pywikibot.input(
'Please enter a reason for the undeletion:')
self.site.undelete(self, reason, revision=undelete_revs)
def protect(self,
reason: Optional[str] = None,
protections: Optional[dict] = None,
**kwargs):
"""
Protect or unprotect a wiki page. Requires administrator status.
Valid protection levels are '' (equivalent to 'none'),
'autoconfirmed', 'sysop' and 'all'. 'all' means 'everyone is allowed',
i.e. that protection type will be unprotected.
In order to unprotect a type of permission, the protection level shall
be either set to 'all' or '' or skipped in the protections dictionary.
Expiry of protections can be set via kwargs, see Site.protect() for
details. By default there is no expiry for the protection types.
:param protections: A dict mapping type of protection to protection
level of that type. Allowed protection types for a page can be
retrieved by Page.self.applicable_protections()
Defaults to protections is None, which means unprotect all
protection types.
Example: {'move': 'sysop', 'edit': 'autoconfirmed'}
:param reason: Reason for the action, default is None and will set an
empty string.
"""
protections = protections or {} # protections is converted to {}
reason = reason or '' # None is converted to ''
self.site.protect(self, protections, reason, **kwargs)
@deprecated_args(
comment='summary', oldCat='old_cat', newCat='new_cat',
sortKey='sort_key', inPlace='in_place')
def change_category(
self, old_cat, new_cat, summary=None, sort_key=None, in_place=True,
include=None
) -> bool:
"""
Remove page from oldCat and add it to newCat.
:param old_cat: category to be removed
:type old_cat: pywikibot.page.Category
:param new_cat: category to be added, if any
:type new_cat: pywikibot.page.Category or None
:param summary: string to use as an edit summary
:param sort_key: sortKey to use for the added category.
Unused if newCat is None, or if inPlace=True
If sortKey=True, the sortKey used for oldCat will be used.
:param in_place: if True, change categories in place rather than
rearranging them.
:param include: list of tags not to be disabled by default in relevant
textlib functions, where CategoryLinks can be searched.
:type include: list
:return: True if page was saved changed, otherwise False.
"""
# get list of Category objects the article is in and remove possible
# duplicates
cats = []
for cat in textlib.getCategoryLinks(self.text, site=self.site,
include=include or []):
if cat not in cats:
cats.append(cat)
if not self.has_permission():
pywikibot.output("Can't edit {}, skipping it..."
.format(self.title(as_link=True)))
return False
if old_cat not in cats:
if self.namespace() != 10:
pywikibot.error('{} is not in category {}!'
.format(self.title(as_link=True),
old_cat.title()))
else:
pywikibot.output('{} is not in category {}, skipping...'
.format(self.title(as_link=True),
old_cat.title()))
return False
# This prevents the bot from adding new_cat if it is already present.
if new_cat in cats:
new_cat = None
oldtext = self.text
if in_place or self.namespace() == 10:
newtext = textlib.replaceCategoryInPlace(oldtext, old_cat, new_cat,
site=self.site)
else:
old_cat_pos = cats.index(old_cat)
if new_cat:
if sort_key is True:
# Fetch sort_key from old_cat in current page.
sort_key = cats[old_cat_pos].sortKey
cats[old_cat_pos] = Category(self.site, new_cat.title(),
sort_key=sort_key)
else:
cats.pop(old_cat_pos)
try:
newtext = textlib.replaceCategoryLinks(oldtext, cats)
except ValueError:
# Make sure that the only way replaceCategoryLinks() can return
# a ValueError is in the case of interwiki links to self.
pywikibot.output('Skipping {} because of interwiki link to '
'self'.format(self.title()))
return False
if oldtext != newtext:
try:
self.put(newtext, summary)
return True
except PageSaveRelatedError as error:
pywikibot.output('Page {} not saved: {}'
.format(self.title(as_link=True), error))
except NoUsernameError:
pywikibot.output('Page {} not saved; sysop privileges '
'required.'.format(self.title(as_link=True)))
return False
def is_flow_page(self) -> bool:
"""Whether a page is a Flow page."""
return self.content_model == 'flow-board'
def create_short_link(self,
permalink: bool = False,
with_protocol: bool = True) -> str:
"""
Return a shortened link that points to that page.
If shared_urlshortner_wiki is defined in family config, it'll use
that site to create the link instead of the current wiki.
:param permalink: If true, the link will point to the actual revision
of the page.
:param with_protocol: If true, and if it's not already included,
the link will have http(s) protocol prepended. On Wikimedia wikis
the protocol is already present.
:return: The reduced link.
"""
wiki = self.site
if self.site.family.shared_urlshortner_wiki:
wiki = pywikibot.Site(*self.site.family.shared_urlshortner_wiki)
url = self.permalink() if permalink else self.full_url()
link = wiki.create_short_link(url)
if re.match(PROTOCOL_REGEX, link):
if not with_protocol:
return re.sub(PROTOCOL_REGEX, '', link)
elif with_protocol:
return '{}://{}'.format(wiki.protocol(), link)
return link
class Page(BasePage):
"""Page: A MediaWiki page."""
@deprecated_args(defaultNamespace='ns')
def __init__(self, source, title: str = '', ns=0):
"""Instantiate a Page object."""
if isinstance(source, pywikibot.site.BaseSite):
if not title:
raise ValueError('Title must be specified and not empty '
'if source is a Site.')
super().__init__(source, title, ns)
@property
def raw_extracted_templates(self):
"""
Extract templates using :py:obj:`textlib.extract_templates_and_params`.
Disabled parts and whitespace are stripped, except for
whitespace in anonymous positional arguments.
This value is cached.
:rtype: list of (str, OrderedDict)
"""
if not hasattr(self, '_raw_extracted_templates'):
templates = textlib.extract_templates_and_params(
self.text, True, True)
self._raw_extracted_templates = templates
return self._raw_extracted_templates
def templatesWithParams(self):
"""
Return templates used on this Page.
The templates are extracted by
:py:obj:`textlib.extract_templates_and_params`, with positional
arguments placed first in order, and each named argument
appearing as 'name=value'.
All parameter keys and values for each template are stripped of
whitespace.
:return: a list of tuples with one tuple for each template invocation
in the page, with the template Page as the first entry and a list
of parameters as the second entry.
:rtype: list of (pywikibot.page.Page, list)
"""
# WARNING: may not return all templates used in particularly
# intricate cases such as template substitution
titles = {t.title() for t in self.templates()}
templates = self.raw_extracted_templates
# backwards-compatibility: convert the dict returned as the second
# element into a list in the format used by old scripts
result = []
for template in templates:
try:
link = pywikibot.Link(template[0], self.site,
default_namespace=10)
if link.canonical_title() not in titles:
continue
except Error:
# this is a parser function or magic word, not template name
# the template name might also contain invalid parts
continue
args = template[1]
intkeys = {}
named = {}
positional = []
for key in sorted(args):
try:
intkeys[int(key)] = args[key]
except ValueError:
named[key] = args[key]
for i in range(1, len(intkeys) + 1):
# only those args with consecutive integer keys can be
# treated as positional; an integer could also be used
# (out of order) as the key for a named argument
# example: {{tmp|one|two|5=five|three}}
if i in intkeys:
positional.append(intkeys[i])
else:
for k in intkeys:
if k < 1 or k >= i:
named[str(k)] = intkeys[k]
break
for item in named.items():
positional.append('{}={}'.format(*item))
result.append((pywikibot.Page(link, self.site), positional))
return result
def set_redirect_target(self, target_page, create=False, force=False,
keep_section=False, save=True, **kwargs):
"""
Change the page's text to point to the redirect page.
:param target_page: target of the redirect, this argument is required.
:type target_page: pywikibot.Page or string
:param create: if true, it creates the redirect even if the page
doesn't exist.
:type create: bool
:param force: if true, it set the redirect target even the page
doesn't exist or it's not redirect.
:type force: bool
:param keep_section: if the old redirect links to a section
and the new one doesn't it uses the old redirect's section.
:type keep_section: bool
:param save: if true, it saves the page immediately.
:type save: bool
:param kwargs: Arguments which are used for saving the page directly
afterwards, like 'summary' for edit summary.
"""
if isinstance(target_page, str):
target_page = pywikibot.Page(self.site, target_page)
elif self.site != target_page.site:
raise InterwikiRedirectPageError(self, target_page)
if not self.exists() and not (create or force):
raise NoPageError(self)
if self.exists() and not self.isRedirectPage() and not force:
raise IsNotRedirectPageError(self)
redirect_regex = self.site.redirect_regex
if self.exists():
old_text = self.get(get_redirect=True)
else:
old_text = ''
result = redirect_regex.search(old_text)
if result:
oldlink = result.group(1)
if (keep_section and '#' in oldlink
and target_page.section() is None):
sectionlink = oldlink[oldlink.index('#'):]
target_page = pywikibot.Page(
self.site,
target_page.title() + sectionlink
)
prefix = self.text[:result.start()]
suffix = self.text[result.end():]
else:
prefix = ''
suffix = ''
target_link = target_page.title(as_link=True, textlink=True,
allow_interwiki=False)
target_link = '#{} {}'.format(self.site.redirect(), target_link)
self.text = prefix + target_link + suffix
if save:
self.save(**kwargs)
def get_best_claim(self, prop: str):
"""
Return the first best Claim for this page.
Return the first 'preferred' ranked Claim specified by Wikibase
property or the first 'normal' one otherwise.
*New in version 3.0.*
:param prop: property id, "P###"
:return: Claim object given by Wikibase property number
for this page object.
:rtype: pywikibot.Claim or None
:raises UnknownExtensionError: site has no Wikibase extension
"""
def find_best_claim(claims):
"""Find the first best ranked claim."""
index = None
for i, claim in enumerate(claims):
if claim.rank == 'preferred':
return claim
if index is None and claim.rank == 'normal':
index = i
if index is None:
index = 0
return claims[index]
if not self.site.has_data_repository:
raise UnknownExtensionError(
'Wikibase is not implemented for {}.'.format(self.site))
def get_item_page(func, *args):
try:
item_p = func(*args)
item_p.get()
return item_p
except NoPageError:
return None
except IsRedirectPageError:
return get_item_page(item_p.getRedirectTarget)
item_page = get_item_page(pywikibot.ItemPage.fromPage, self)
if item_page and prop in item_page.claims:
return find_best_claim(item_page.claims[prop])
return None
class FilePage(Page):
"""
A subclass of Page representing a file description page.
Supports the same interface as Page, with some added methods.
"""
def __init__(self, source, title: str = ''):
"""Initializer."""
self._file_revisions = {} # dictionary to cache File history.
super().__init__(source, title, 6)
if self.namespace() != 6:
raise ValueError("'{}' is not in the file namespace!"
.format(self.title()))
def _load_file_revisions(self, imageinfo):
for file_rev in imageinfo:
# filemissing in API response indicates most fields are missing
# see https://gerrit.wikimedia.org/r/c/mediawiki/core/+/533482/
if 'filemissing' in file_rev:
pywikibot.warning("File '{}' contains missing revisions"
.format(self.title()))
continue
file_revision = FileInfo(file_rev)
self._file_revisions[file_revision.timestamp] = file_revision
@property
def latest_file_info(self):
"""
Retrieve and store information of latest Image rev. of FilePage.
At the same time, the whole history of Image is fetched and cached in
self._file_revisions
:return: instance of FileInfo()
"""
if not self._file_revisions:
self.site.loadimageinfo(self, history=True)
latest_ts = max(self._file_revisions)
return self._file_revisions[latest_ts]
@property
def oldest_file_info(self):
"""
Retrieve and store information of oldest Image rev. of FilePage.
At the same time, the whole history of Image is fetched and cached in
self._file_revisions
:return: instance of FileInfo()
"""
if not self._file_revisions:
self.site.loadimageinfo(self, history=True)
oldest_ts = min(self._file_revisions)
return self._file_revisions[oldest_ts]
def get_file_history(self) -> dict:
"""
Return the file's version history.
:return: dictionary with:
key: timestamp of the entry
value: instance of FileInfo()
"""
if not self._file_revisions:
self.site.loadimageinfo(self, history=True)
return self._file_revisions
def getImagePageHtml(self) -> str:
"""Download the file page, and return the HTML, as a string.
Caches the HTML code, so that if you run this method twice on the
same FilePage object, the page will only be downloaded once.
"""
if not hasattr(self, '_imagePageHtml'):
path = '{}/index.php?title={}'.format(self.site.scriptpath(),
self.title(as_url=True))
self._imagePageHtml = http.request(self.site, path).text
return self._imagePageHtml
def get_file_url(self, url_width=None, url_height=None,
url_param=None) -> str:
"""
Return the url or the thumburl of the file described on this page.
Fetch the information if not available.
Once retrieved, thumburl information will also be accessible as
latest_file_info attributes, named as in [1]:
- url, thumburl, thumbwidth and thumbheight
Parameters correspond to iiprops in:
[1] https://www.mediawiki.org/wiki/API:Imageinfo
Parameters validation and error handling left to the API call.
:param url_width: see iiurlwidth in [1]
:param url_height: see iiurlheigth in [1]
:param url_param: see iiurlparam in [1]
:return: latest file url or thumburl
"""
# Plain url is requested.
if url_width is None and url_height is None and url_param is None:
return self.latest_file_info.url
# Thumburl is requested.
self.site.loadimageinfo(self, history=not self._file_revisions,
url_width=url_width, url_height=url_height,
url_param=url_param)
return self.latest_file_info.thumburl
@deprecated('file_is_shared', since='20200618')
def fileIsShared(self) -> bool: # pragma: no cover
"""DEPRECATED. Check if the image is stored on Wikimedia Commons."""
return self.file_is_shared()
def file_is_shared(self) -> bool:
"""Check if the file is stored on any known shared repository."""
# as of now, the only known repositories are commons and wikitravel
# TODO: put the URLs to family file
if not self.site.has_image_repository:
return False
if 'wikitravel_shared' in self.site.shared_image_repository():
return self.latest_file_info.url.startswith(
'https://wikitravel.org/upload/shared/')
# default to commons
return self.latest_file_info.url.startswith(
'https://upload.wikimedia.org/wikipedia/commons/')
def getFileVersionHistoryTable(self):
"""Return the version history in the form of a wiki table."""
lines = []
for info in self.get_file_history().values():
dimension = '{width}×{height} px ({size} bytes)'.format(
**info.__dict__)
lines.append('| {timestamp} || {user} || {dimension} |'
'| <nowiki>{comment}</nowiki>'
.format(dimension=dimension, **info.__dict__))
return ('{| class="wikitable"\n'
'! {{int:filehist-datetime}} || {{int:filehist-user}} |'
'| {{int:filehist-dimensions}} || {{int:filehist-comment}}\n'
'|-\n%s\n|}\n' % '\n|-\n'.join(lines))
def usingPages(self, total: Optional[int] = None, content: bool = False):
"""Yield Pages on which the file is displayed.
:param total: iterate no more than this number of pages in total
:param content: if True, load the current content of each iterated page
(default False)
"""
return self.site.imageusage(self, total=total, content=content)
def upload(self, source: str, **kwargs) -> bool:
"""
Upload this file to the wiki.
keyword arguments are from site.upload() method.
:param source: Path or URL to the file to be uploaded.
:keyword comment: Edit summary; if this is not provided, then
filepage.text will be used. An empty summary is not permitted.
This may also serve as the initial page text (see below).
:keyword text: Initial page text; if this is not set, then
filepage.text will be used, or comment.
:keyword watch: If true, add filepage to the bot user's watchlist
:keyword ignore_warnings: It may be a static boolean, a callable
returning a boolean or an iterable. The callable gets a list of
UploadError instances and the iterable should contain the warning
codes for which an equivalent callable would return True if all
UploadError codes are in thet list. If the result is False it'll
not continue uploading the file and otherwise disable any warning
and reattempt to upload the file. NOTE: If report_success is True
or None it'll raise an UploadError exception if the static
boolean is False.
:type ignore_warnings: bool or callable or iterable of str
:keyword chunk_size: The chunk size in bytesfor chunked uploading (see
https://www.mediawiki.org/wiki/API:Upload#Chunked_uploading). It
will only upload in chunks, if the chunk size is positive but lower
than the file size.
:type chunk_size: int
:keyword _file_key: Reuses an already uploaded file using the filekey.
If None (default) it will upload the file.
:type _file_key: str or None
:keyword _offset: When file_key is not None this can be an integer to
continue a previously canceled chunked upload. If False it treats
that as a finished upload. If True it requests the stash info from
the server to determine the offset. By default starts at 0.
:type _offset: int or bool
:keyword _verify_stash: Requests the SHA1 and file size uploaded and
compares it to the local file. Also verifies that _offset is
matching the file size if the _offset is an int. If _offset is
False if verifies that the file size match with the local file. If
None it'll verifies the stash when a file key and offset is given.
:type _verify_stash: bool or None
:keyword report_success: If the upload was successful it'll print a
success message and if ignore_warnings is set to False it'll
raise an UploadError if a warning occurred. If it's
None (default) it'll be True if ignore_warnings is a bool and False
otherwise. If it's True or None ignore_warnings must be a bool.
:return: It returns True if the upload was successful and False
otherwise.
"""
filename = url = None
if '://' in source:
url = source
else:
filename = source
return self.site.upload(self, source_filename=filename, source_url=url,
**kwargs)
def download(self, filename=None, chunk_size=100 * 1024, revision=None):
"""
Download to filename file of FilePage.
:param filename: filename where to save file:
None: self.title(as_filename=True, with_ns=False)
will be used
str: provided filename will be used.
:type filename: None or str
:param chunk_size: the size of each chunk to be received and
written to file.
:type chunk_size: int
:param revision: file revision to download:
None: self.latest_file_info will be used
FileInfo: provided revision will be used.
:type revision: None or FileInfo
:return: True if download is successful, False otherwise.
:raise IOError: if filename cannot be written for any reason.
"""
if filename is None:
filename = self.title(as_filename=True, with_ns=False)
filename = os.path.expanduser(filename)
if revision is None:
revision = self.latest_file_info
req = http.fetch(revision.url, stream=True)
if req.status_code == HTTPStatus.OK:
try:
with open(filename, 'wb') as f:
for chunk in req.iter_content(chunk_size):
f.write(chunk)
except IOError as e:
raise e
sha1 = compute_file_hash(filename)
return sha1 == revision.sha1
pywikibot.warning(
'Unsuccessfull request ({}): {}'
.format(req.status_code, req.url))
return False
def globalusage(self, total=None):
"""
Iterate all global usage for this page.
:param total: iterate no more than this number of pages in total
:return: a generator that yields Pages also on sites different from
self.site.
:rtype: generator
"""
return self.site.globalusage(self, total=total)
class Category(Page):
"""A page in the Category: namespace."""
@deprecated_args(sortKey='sort_key')
def __init__(self, source, title: str = '', sort_key=None):
"""
Initializer.
All parameters are the same as for Page() Initializer.
"""
self.sortKey = sort_key
super().__init__(source, title, ns=14)
if self.namespace() != 14:
raise ValueError("'{}' is not in the category namespace!"
.format(self.title()))
@deprecated_args(sortKey='sort_key')
def aslink(self, sort_key: Optional[str] = None) -> str:
"""
Return a link to place a page in this Category.
Use this only to generate a "true" category link, not for interwikis
or text links to category pages.
:param sort_key: The sort key for the article to be placed in this
Category; if omitted, default sort key is used.
"""
key = sort_key or self.sortKey
if key is not None:
title_with_sort_key = self.title(with_section=False) + '|' + key
else:
title_with_sort_key = self.title(with_section=False)
return '[[{}]]'.format(title_with_sort_key)
def subcategories(self,
recurse: Union[int, bool] = False,
total: Optional[int] = None,
content: bool = False):
"""
Iterate all subcategories of the current category.
:param recurse: if not False or 0, also iterate subcategories of
subcategories. If an int, limit recursion to this number of
levels. (Example: recurse=1 will iterate direct subcats and
first-level sub-sub-cats, but no deeper.)
:param total: iterate no more than this number of
subcategories in total (at all levels)
:param content: if True, retrieve the content of the current version
of each category description page (default False)
"""
if not isinstance(recurse, bool) and recurse:
recurse = recurse - 1
if not hasattr(self, '_subcats'):
self._subcats = []
for member in self.site.categorymembers(
self, member_type='subcat', total=total, content=content):
subcat = Category(member)
self._subcats.append(subcat)
yield subcat
if total is not None:
total -= 1
if total == 0:
return
if recurse:
for item in subcat.subcategories(
recurse, total=total, content=content):
yield item
if total is not None:
total -= 1
if total == 0:
return
else:
for subcat in self._subcats:
yield subcat
if total is not None:
total -= 1
if total == 0:
return
if recurse:
for item in subcat.subcategories(
recurse, total=total, content=content):
yield item
if total is not None:
total -= 1
if total == 0:
return
@deprecated_args(startFrom='startprefix', startsort=True, endsort=True)
def articles(self,
recurse: Union[int, bool] = False,
total: Optional[int] = None,
content: bool = False,
namespaces: Union[int, List[int]] = None,
sortby: Optional[str] = None,
reverse: bool = False,
starttime=None, endtime=None,
startprefix: Optional[str] = None,
endprefix: Optional[str] = None):
"""
Yield all articles in the current category.
By default, yields all *pages* in the category that are not
subcategories!
:param recurse: if not False or 0, also iterate articles in
subcategories. If an int, limit recursion to this number of
levels. (Example: recurse=1 will iterate articles in first-level
subcats, but no deeper.)
:param total: iterate no more than this number of pages in
total (at all levels)
:param namespaces: only yield pages in the specified namespaces
:param content: if True, retrieve the content of the current version
of each page (default False)
:param sortby: determines the order in which results are generated,
valid values are "sortkey" (default, results ordered by category
sort key) or "timestamp" (results ordered by time page was
added to the category). This applies recursively.
:param reverse: if True, generate results in reverse order
(default False)
:param starttime: if provided, only generate pages added after this
time; not valid unless sortby="timestamp"
:type starttime: pywikibot.Timestamp
:param endtime: if provided, only generate pages added before this
time; not valid unless sortby="timestamp"
:type endtime: pywikibot.Timestamp
:param startprefix: if provided, only generate pages >= this title
lexically; not valid if sortby="timestamp"
:param endprefix: if provided, only generate pages < this title
lexically; not valid if sortby="timestamp"
:rtype: typing.Iterable[pywikibot.Page]
"""
seen = set()
for member in self.site.categorymembers(self,
namespaces=namespaces,
total=total,
content=content,
sortby=sortby,
reverse=reverse,
starttime=starttime,
endtime=endtime,
startprefix=startprefix,
endprefix=endprefix,
member_type=['page', 'file']):
if recurse:
seen.add(hash(member))
yield member
if total is not None:
total -= 1
if total == 0:
return
if recurse:
if not isinstance(recurse, bool) and recurse:
recurse -= 1
for subcat in self.subcategories():
for article in subcat.articles(recurse=recurse,
total=total,
content=content,
namespaces=namespaces,
sortby=sortby,
reverse=reverse,
starttime=starttime,
endtime=endtime,
startprefix=startprefix,
endprefix=endprefix):
hash_value = hash(article)
if hash_value in seen:
continue
seen.add(hash_value)
yield article
if total is not None:
total -= 1
if total == 0:
return
def members(self, recurse: bool = False,
namespaces=None,
total: Optional[int] = None,
content=False):
"""Yield all category contents (subcats, pages, and files).
:rtype: typing.Iterable[pywikibot.Page]
"""
for member in self.site.categorymembers(
self, namespaces=namespaces, total=total, content=content):
yield member
if total is not None:
total -= 1
if total == 0:
return
if recurse:
if not isinstance(recurse, bool) and recurse:
recurse = recurse - 1
for subcat in self.subcategories():
for article in subcat.members(
recurse, namespaces, total=total, content=content):
yield article
if total is not None:
total -= 1
if total == 0:
return
def isEmptyCategory(self) -> bool:
"""Return True if category has no members (including subcategories)."""
ci = self.categoryinfo
return sum(ci[k] for k in ['files', 'pages', 'subcats']) == 0
def isHiddenCategory(self) -> bool:
"""Return True if the category is hidden."""
return 'hiddencat' in self.properties()
@property
def categoryinfo(self) -> dict:
"""
Return a dict containing information about the category.
The dict contains values for:
Numbers of pages, subcategories, files, and total contents.
"""
return self.site.categoryinfo(self)
def newest_pages(self, total=None):
"""
Return pages in a category ordered by the creation date.
If two or more pages are created at the same time, the pages are
returned in the order they were added to the category. The most
recently added page is returned first.
It only allows to return the pages ordered from newest to oldest, as it
is impossible to determine the oldest page in a category without
checking all pages. But it is possible to check the category in order
with the newly added first and it yields all pages which were created
after the currently checked page was added (and thus there is no page
created after any of the cached but added before the currently
checked).
:param total: The total number of pages queried.
:type total: int
:return: A page generator of all pages in a category ordered by the
creation date. From newest to oldest. Note: It currently only
returns Page instances and not a subclass of it if possible. This
might change so don't expect to only get Page instances.
:rtype: generator
"""
def check_cache(latest):
"""Return the cached pages in order and not more than total."""
cached = []
for timestamp in sorted((ts for ts in cache if ts > latest),
reverse=True):
# The complete list can be removed, it'll either yield all of
# them, or only a portion but will skip the rest anyway
cached += cache.pop(timestamp)[:None if total is None else
total - len(cached)]
if total and len(cached) >= total:
break # already got enough
assert total is None or len(cached) <= total, \
'Number of caches is more than total number requested'
return cached
# all pages which have been checked but where created before the
# current page was added, at some point they will be created after
# the current page was added. It saves all pages via the creation
# timestamp. Be prepared for multiple pages.
cache = defaultdict(list)
# TODO: Make site.categorymembers is usable as it returns pages
# There is no total defined, as it's not known how many pages need to
# be checked before the total amount of new pages was found. In worst
# case all pages of a category need to be checked.
for member in pywikibot.data.api.QueryGenerator(
site=self.site, parameters={
'list': 'categorymembers', 'cmsort': 'timestamp',
'cmdir': 'older', 'cmprop': 'timestamp|title',
'cmtitle': self.title()}):
# TODO: Upcast to suitable class
page = pywikibot.Page(self.site, member['title'])
assert page.namespace() == member['ns'], \
'Namespace of the page is not consistent'
cached = check_cache(pywikibot.Timestamp.fromISOformat(
member['timestamp']))
yield from cached
if total is not None:
total -= len(cached)
if total <= 0:
break
cache[page.oldest_revision.timestamp] += [page]
else:
# clear cache
assert total is None or total > 0, \
'As many items as given in total already returned'
yield from check_cache(pywikibot.Timestamp.min)
class User(Page):
"""
A class that represents a Wiki user.
This class also represents the Wiki page User:<username>
"""
@deprecated_args(site='source', name='title')
def __init__(self, source, title=''):
"""
Initializer for a User object.
All parameters are the same as for Page() Initializer.
"""
self._isAutoblock = True
if title.startswith('#'):
title = title[1:]
elif ':#' in title:
title = title.replace(':#', ':')
else:
self._isAutoblock = False
super().__init__(source, title, ns=2)
if self.namespace() != 2:
raise ValueError("'{}' is not in the user namespace!"
.format(self.title()))
if self._isAutoblock:
# This user is probably being queried for purpose of lifting
# an autoblock.
pywikibot.output(
'This is an autoblock ID, you can only use to unblock it.')
@property
def username(self) -> str:
"""
The username.
Convenience method that returns the title of the page with
namespace prefix omitted, which is the username.
"""
if self._isAutoblock:
return '#' + self.title(with_ns=False)
return self.title(with_ns=False)
def isRegistered(self, force: bool = False) -> bool:
"""
Determine if the user is registered on the site.
It is possible to have a page named User:xyz and not have
a corresponding user with username xyz.
The page does not need to exist for this method to return
True.
:param force: if True, forces reloading the data from API
"""
# T135828: the registration timestamp may be None but the key exists
return (not self.isAnonymous()
and 'registration' in self.getprops(force))
def isAnonymous(self) -> bool:
"""Determine if the user is editing as an IP address."""
return is_ip_address(self.username)
def getprops(self, force: bool = False) -> dict:
"""
Return a properties about the user.
:param force: if True, forces reloading the data from API
"""
if force and hasattr(self, '_userprops'):
del self._userprops
if not hasattr(self, '_userprops'):
self._userprops = list(self.site.users([self.username, ]))[0]
if self.isAnonymous():
r = list(self.site.blocks(users=self.username))
if r:
self._userprops['blockedby'] = r[0]['by']
self._userprops['blockreason'] = r[0]['reason']
return self._userprops
def registration(self, force=False):
"""
Fetch registration date for this user.
:param force: if True, forces reloading the data from API
:type force: bool
:rtype: pywikibot.Timestamp or None
"""
if not self.isAnonymous():
reg = self.getprops(force).get('registration')
if reg:
return pywikibot.Timestamp.fromISOformat(reg)
return None
def editCount(self, force: bool = False) -> int:
"""
Return edit count for a registered user.
Always returns 0 for 'anonymous' users.
:param force: if True, forces reloading the data from API
"""
return self.getprops(force).get('editcount', 0)
def isBlocked(self, force: bool = False) -> bool:
"""
Determine whether the user is currently blocked.
:param force: if True, forces reloading the data from API
"""
return 'blockedby' in self.getprops(force)
def isEmailable(self, force: bool = False) -> bool:
"""
Determine whether emails may be send to this user through MediaWiki.
:param force: if True, forces reloading the data from API
"""
return not self.isAnonymous() and 'emailable' in self.getprops(force)
def groups(self, force: bool = False) -> list:
"""
Return a list of groups to which this user belongs.
The list of groups may be empty.
:param force: if True, forces reloading the data from API
:return: groups property
"""
return self.getprops(force).get('groups', [])
def gender(self, force: bool = False) -> str:
"""Return the gender of the user.
:param force: if True, forces reloading the data from API
:return: return 'male', 'female', or 'unknown'
"""
if self.isAnonymous():
return 'unknown'
return self.getprops(force).get('gender', 'unknown')
def rights(self, force: bool = False) -> list:
"""Return user rights.
:param force: if True, forces reloading the data from API
:return: return user rights
"""
return self.getprops(force).get('rights', [])
def getUserPage(self, subpage=''):
"""
Return a Page object relative to this user's main page.
:param subpage: subpage part to be appended to the main
page title (optional)
:type subpage: str
:return: Page object of user page or user subpage
:rtype: pywikibot.Page
"""
if self._isAutoblock:
# This user is probably being queried for purpose of lifting
# an autoblock, so has no user pages per se.
raise AutoblockUserError(
'This is an autoblock ID, you can only use to unblock it.')
if subpage:
subpage = '/' + subpage
return Page(Link(self.title() + subpage, self.site))
def getUserTalkPage(self, subpage=''):
"""
Return a Page object relative to this user's main talk page.
:param subpage: subpage part to be appended to the main
talk page title (optional)
:type subpage: str
:return: Page object of user talk page or user talk subpage
:rtype: pywikibot.Page
"""
if self._isAutoblock:
# This user is probably being queried for purpose of lifting
# an autoblock, so has no user talk pages per se.
raise AutoblockUserError(
'This is an autoblock ID, you can only use to unblock it.')
if subpage:
subpage = '/' + subpage
return Page(Link(self.username + subpage,
self.site, default_namespace=3))
def send_email(self, subject: str, text: str, ccme: bool = False) -> bool:
"""
Send an email to this user via MediaWiki's email interface.
:param subject: the subject header of the mail
:param text: mail body
:param ccme: if True, sends a copy of this email to the bot
:raises NotEmailableError: the user of this User is not emailable
:raises UserRightsError: logged in user does not have 'sendemail' right
:return: operation successful indicator
"""
if not self.isEmailable():
raise NotEmailableError(self)
if not self.site.has_right('sendemail'):
raise UserRightsError("You don't have permission to send mail")
params = {
'action': 'emailuser',
'target': self.username,
'token': self.site.tokens['email'],
'subject': subject,
'text': text,
}
if ccme:
params['ccme'] = 1
mailrequest = self.site._simple_request(**params)
maildata = mailrequest.submit()
if 'emailuser' in maildata:
if maildata['emailuser']['result'] == 'Success':
return True
return False
def block(self, *args, **kwargs):
"""
Block user.
Refer :py:obj:`APISite.blockuser` method for parameters.
:return: None
"""
try:
self.site.blockuser(self, *args, **kwargs)
except APIError as err:
if err.code == 'invalidrange':
raise ValueError('{} is not a valid IP range.'
.format(self.username))
raise err
def unblock(self, reason: Optional[str] = None):
"""
Remove the block for the user.
:param reason: Reason for the unblock.
"""
self.site.unblockuser(self, reason)
def logevents(self, **kwargs):
"""Yield user activities.
:keyword logtype: only iterate entries of this type
(see mediawiki api documentation for available types)
:type logtype: str
:keyword page: only iterate entries affecting this page
:type page: Page or str
:keyword namespace: namespace to retrieve logevents from
:type namespace: int or Namespace
:keyword start: only iterate entries from and after this Timestamp
:type start: Timestamp or ISO date string
:keyword end: only iterate entries up to and through this Timestamp
:type end: Timestamp or ISO date string
:keyword reverse: if True, iterate oldest entries first
(default: newest)
:type reverse: bool
:keyword tag: only iterate entries tagged with this tag
:type tag: str
:keyword total: maximum number of events to iterate
:type total: int
:rtype: iterable
"""
return self.site.logevents(user=self.username, **kwargs)
@property
def last_event(self):
"""Return last user activity.
:return: last user log entry
:rtype: LogEntry or None
"""
return next(iter(self.logevents(total=1)), None)
@deprecated_args(limit='total', namespace='namespaces')
def contributions(self, total: int = 500, **kwargs) -> tuple:
"""
Yield tuples describing this user edits.
Each tuple is composed of a pywikibot.Page object,
the revision id (int), the edit timestamp (as a pywikibot.Timestamp
object), and the comment (str).
Pages returned are not guaranteed to be unique.
:param total: limit result to this number of pages
:keyword start: Iterate contributions starting at this Timestamp
:keyword end: Iterate contributions ending at this Timestamp
:keyword reverse: Iterate oldest contributions first (default: newest)
:keyword namespaces: only iterate pages in these namespaces
:type namespaces: iterable of str or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
:keyword showMinor: if True, iterate only minor edits; if False and
not None, iterate only non-minor edits (default: iterate both)
:keyword top_only: if True, iterate only edits which are the latest
revision (default: False)
:return: tuple of pywikibot.Page, revid, pywikibot.Timestamp, comment
"""
for contrib in self.site.usercontribs(
user=self.username, total=total, **kwargs):
ts = pywikibot.Timestamp.fromISOformat(contrib['timestamp'])
yield (Page(self.site, contrib['title'], contrib['ns']),
contrib['revid'],
ts,
contrib.get('comment'))
@property
def first_edit(self):
"""Return first user contribution.
:return: first user contribution entry
:return: tuple of pywikibot.Page, revid, pywikibot.Timestamp, comment
:rtype: tuple or None
"""
return next(self.contributions(reverse=True, total=1), None)
@property
def last_edit(self):
"""Return last user contribution.
:return: last user contribution entry
:return: tuple of pywikibot.Page, revid, pywikibot.Timestamp, comment
:rtype: tuple or None
"""
return next(self.contributions(total=1), None)
def deleted_contributions(
self, *, total: int = 500, **kwargs
) -> Iterable[Tuple[Page, Revision]]:
"""Yield tuples describing this user's deleted edits.
*New in version 5.5.*
:param total: Limit results to this number of pages
:keyword start: Iterate contributions starting at this Timestamp
:keyword end: Iterate contributions ending at this Timestamp
:keyword reverse: Iterate oldest contributions first (default: newest)
:keyword namespaces: Only iterate pages in these namespaces
"""
for data in self.site.alldeletedrevisions(user=self.username,
total=total, **kwargs):
page = Page(self.site, data['title'], data['ns'])
for contrib in data['revisions']:
yield page, Revision(**contrib)
@deprecate_arg('number', 'total')
def uploadedImages(self, total=10):
"""
Yield tuples describing files uploaded by this user.
Each tuple is composed of a pywikibot.Page, the timestamp (str in
ISO8601 format), comment (str) and a bool for pageid > 0.
Pages returned are not guaranteed to be unique.
:param total: limit result to this number of pages
:type total: int
"""
if not self.isRegistered():
return
for item in self.logevents(logtype='upload', total=total):
yield (item.page(),
str(item.timestamp()),
item.comment(),
item.pageid() > 0)
@property
def is_thankable(self) -> bool:
"""
Determine if the user has thanks notifications enabled.
NOTE: This doesn't accurately determine if thanks is enabled for user.
Privacy of thanks preferences is under discussion, please see
https://phabricator.wikimedia.org/T57401#2216861, and
https://phabricator.wikimedia.org/T120753#1863894
"""
return self.isRegistered() and 'bot' not in self.groups()
class WikibaseEntity:
"""
The base interface for Wikibase entities.
Each entity is identified by a data repository it belongs to
and an identifier.
:cvar DATA_ATTRIBUTES: dictionary which maps data attributes (eg. 'labels',
'claims') to appropriate collection classes (eg. LanguageDict,
ClaimsCollection)
:cvar entity_type: entity type identifier
:type entity_type: str
:cvar title_pattern: regular expression which matches all possible
entity ids for this entity type
:type title_pattern: str
"""
DATA_ATTRIBUTES = {} # type: Dict[str, Any]
def __init__(self, repo, id_=None):
"""
Initializer.
:param repo: Entity repository.
:type repo: DataSite
:param id_: Entity identifier.
:type id_: str or None, -1 and None mean non-existing
"""
self.repo = repo
self.id = id_ if id_ is not None else '-1'
if self.id != '-1' and not self.is_valid_id(self.id):
raise InvalidTitleError(
"'{}' is not a valid {} page title"
.format(self.id, self.entity_type))
def __repr__(self):
if self.id != '-1':
return 'pywikibot.page.{}({!r}, {!r})'.format(
self.__class__.__name__, self.repo, self.id)
return 'pywikibot.page.{}({!r})'.format(
self.__class__.__name__, self.repo)
@classmethod
def is_valid_id(cls, entity_id: str) -> bool:
"""
Whether the string can be a valid id of the entity type.
:param entity_id: The ID to test.
"""
if not hasattr(cls, 'title_pattern'):
return True
return bool(re.fullmatch(cls.title_pattern, entity_id))
def __getattr__(self, name):
if name in self.DATA_ATTRIBUTES:
if self.getID() == '-1':
for key, cls in self.DATA_ATTRIBUTES.items():
setattr(self, key, cls.new_empty(self.repo))
return getattr(self, name)
return self.get()[name]
raise AttributeError("'{}' object has no attribute '{}'"
.format(self.__class__.__name__, name))
def _defined_by(self, singular: bool = False) -> dict:
"""
Internal function to provide the API parameters to identify the entity.
An empty dict is returned if the entity has not been created yet.
:param singular: Whether the parameter names should use the singular
form
:return: API parameters
"""
params = {}
if self.id != '-1':
if singular:
params['id'] = self.id
else:
params['ids'] = self.id
return params
def getID(self, numeric=False):
"""
Get the identifier of this entity.
:param numeric: Strip the first letter and return an int
:type numeric: bool
"""
if numeric:
return int(self.id[1:]) if self.id != '-1' else -1
return self.id
def get_data_for_new_entity(self) -> dict:
"""
Return data required for creation of a new entity.
Override it if you need.
"""
return {}
def toJSON(self, diffto: Optional[dict] = None) -> dict:
"""
Create JSON suitable for Wikibase API.
When diffto is provided, JSON representing differences
to the provided data is created.
:param diffto: JSON containing entity data
"""
data = {}
for key in self.DATA_ATTRIBUTES:
attr = getattr(self, key, None)
if attr is None:
continue
if diffto:
value = attr.toJSON(diffto=diffto.get(key))
else:
value = attr.toJSON()
if value:
data[key] = value
return data
@classmethod
def _normalizeData(cls, data: dict) -> dict:
"""
Helper function to expand data into the Wikibase API structure.
:param data: The dict to normalize
:return: The dict with normalized data
"""
norm_data = {}
for key, attr in cls.DATA_ATTRIBUTES.items():
if key in data:
norm_data[key] = attr.normalizeData(data[key])
return norm_data
@property
def latest_revision_id(self) -> Optional[int]:
"""
Get the revision identifier for the most recent revision of the entity.
:rtype: int or None if it cannot be determined
:raise NoWikibaseEntityError: if the entity doesn't exist
"""
if not hasattr(self, '_revid'):
# fixme: unlike BasePage.latest_revision_id, this raises
# exception when entity is redirect, cannot use get_redirect
self.get()
return self._revid
@latest_revision_id.setter
def latest_revision_id(self, value: Optional[int]) -> None:
self._revid = value
@latest_revision_id.deleter
def latest_revision_id(self) -> None:
if hasattr(self, '_revid'):
del self._revid
def exists(self) -> bool:
"""Determine if an entity exists in the data repository."""
if not hasattr(self, '_content'):
try:
self.get()
return True
except NoWikibaseEntityError:
return False
return 'missing' not in self._content
def get(self, force: bool = False) -> dict:
"""
Fetch all entity data and cache it.
:param force: override caching
:raise NoWikibaseEntityError: if this entity doesn't exist
:return: actual data which entity holds
"""
if force or not hasattr(self, '_content'):
identification = self._defined_by()
if not identification:
raise NoWikibaseEntityError(self)
try:
data = self.repo.loadcontent(identification)
except APIError as err:
if err.code == 'no-such-entity':
raise NoWikibaseEntityError(self)
raise
item_index, content = data.popitem()
self.id = item_index
self._content = content
if 'missing' in self._content:
raise NoWikibaseEntityError(self)
self.latest_revision_id = self._content.get('lastrevid')
data = {}
# This initializes all data,
for key, cls in self.DATA_ATTRIBUTES.items():
value = cls.fromJSON(self._content.get(key, {}), self.repo)
setattr(self, key, value)
data[key] = value
return data
def editEntity(self, data=None, **kwargs):
"""
Edit an entity using Wikibase wbeditentity API.
:param data: Data to be saved
:type data: dict, or None to save the current content of the entity.
"""
if data is None:
data = self.toJSON(diffto=getattr(self, '_content', None))
else:
data = self._normalizeData(data)
baserevid = getattr(self, '_revid', None)
updates = self.repo.editEntity(
self, data, baserevid=baserevid, **kwargs)
# the attribute may have been unset in ItemPage
if getattr(self, 'id', '-1') == '-1':
self.__init__(self.repo, updates['entity']['id'])
# the response also contains some data under the 'entity' key
# but it is NOT the actual content
# see also [[d:Special:Diff/1356933963]]
# TODO: there might be some circumstances under which
# the content can be safely reused
if hasattr(self, '_content'):
del self._content
self.latest_revision_id = updates['entity'].get('lastrevid')
def concept_uri(self):
"""
Return the full concept URI.
:raise NoWikibaseEntityError: if this entity doesn't exist
"""
entity_id = self.getID()
if entity_id == '-1':
raise NoWikibaseEntityError(self)
return '{}{}'.format(self.repo.concept_base_uri, entity_id)
class WikibasePage(BasePage, WikibaseEntity):
"""
Mixin base class for Wikibase entities which are also pages (eg. items).
There should be no need to instantiate this directly.
"""
_cache_attrs = BasePage._cache_attrs + ('_content', )
def __init__(self, site, title='', **kwargs):
"""
Initializer.
If title is provided, either ns or entity_type must also be provided,
and will be checked against the title parsed using the Page
initialisation logic.
:param site: Wikibase data site
:type site: pywikibot.site.DataSite
:param title: normalized title of the page
:type title: str
:keyword ns: namespace
:type ns: Namespace instance, or int
:keyword entity_type: Wikibase entity type
:type entity_type: str ('item' or 'property')
:raises TypeError: incorrect use of parameters
:raises ValueError: incorrect namespace
:raises pywikibot.exceptions.Error: title parsing problems
:raises NotImplementedError: the entity type is not supported
"""
if not isinstance(site, pywikibot.site.DataSite):
raise TypeError('site must be a pywikibot.site.DataSite object')
if title and ('ns' not in kwargs and 'entity_type' not in kwargs):
pywikibot.debug('{}.__init__: {} title {!r} specified without '
'ns or entity_type'
.format(self.__class__.__name__, site,
title),
layer='wikibase')
self._namespace = None
if 'ns' in kwargs:
if isinstance(kwargs['ns'], Namespace):
self._namespace = kwargs.pop('ns')
kwargs['ns'] = self._namespace.id
else:
# numerical namespace given
ns = int(kwargs['ns'])
if site.item_namespace.id == ns:
self._namespace = site.item_namespace
elif site.property_namespace.id == ns:
self._namespace = site.property_namespace
else:
raise ValueError('{!r}: Namespace "{}" is not valid'
.format(site, int(ns)))
if 'entity_type' in kwargs:
entity_type = kwargs.pop('entity_type')
try:
entity_type_ns = site.get_namespace_for_entity_type(
entity_type)
except EntityTypeUnknownError:
raise ValueError('Wikibase entity type "{}" unknown'
.format(entity_type))
if self._namespace:
if self._namespace != entity_type_ns:
raise ValueError('Namespace "{}" is not valid for Wikibase'
' entity type "{}"'
.format(int(kwargs['ns']), entity_type))
else:
self._namespace = entity_type_ns
kwargs['ns'] = self._namespace.id
BasePage.__init__(self, site, title, **kwargs)
# If a title was not provided,
# avoid checks which may cause an exception.
if not title:
WikibaseEntity.__init__(self, site)
return
if self._namespace:
if self._link.namespace != self._namespace.id:
raise ValueError("'{}' is not in the namespace {}"
.format(title, self._namespace.id))
else:
# Neither ns or entity_type was provided.
# Use the _link to determine entity type.
ns = self._link.namespace
if self.site.item_namespace.id == ns:
self._namespace = self.site.item_namespace
elif self.site.property_namespace.id == ns:
self._namespace = self.site.property_namespace
else:
raise ValueError('{!r}: Namespace "{!r}" is not valid'
.format(self.site, ns))
WikibaseEntity.__init__(
self,
# .site forces a parse of the Link title to determine site
self.site,
# Link.__init__, called from Page.__init__, has cleaned the title
# stripping whitespace and uppercasing the first letter according
# to the namespace case=first-letter.
self._link.title)
def namespace(self) -> int:
"""
Return the number of the namespace of the entity.
:return: Namespace id
"""
return self._namespace.id
def exists(self) -> bool:
"""Determine if an entity exists in the data repository."""
if not hasattr(self, '_content'):
try:
self.get(get_redirect=True)
return True
except NoPageError:
return False
return 'missing' not in self._content
def botMayEdit(self) -> bool:
"""
Return whether bots may edit this page.
Because there is currently no system to mark a page that it shouldn't
be edited by bots on Wikibase pages it always returns True. The content
of the page is not text but a dict, the original way (to search for a
template) doesn't apply.
:return: True
"""
return True
def get(self, force: bool = False, *args, **kwargs) -> dict:
"""
Fetch all page data, and cache it.
:param force: override caching
:raise NotImplementedError: a value in args or kwargs
:return: actual data which entity holds
:note: dicts returned by this method are references to content
of this entity and their modifying may indirectly cause
unwanted change to the live content
"""
if args or kwargs:
raise NotImplementedError(
'{}.get does not implement var args: {!r} and {!r}'.format(
self.__class__.__name__, args, kwargs))
# todo: this variable is specific to ItemPage
lazy_loading_id = not hasattr(self, 'id') and hasattr(self, '_site')
try:
data = WikibaseEntity.get(self, force=force)
except NoWikibaseEntityError:
if lazy_loading_id:
p = Page(self._site, self._title)
if not p.exists():
raise NoPageError(p)
# todo: raise a nicer exception here (T87345)
raise NoPageError(self)
if 'pageid' in self._content:
self._pageid = self._content['pageid']
# xxx: this is ugly
if 'claims' in data:
self.claims.set_on_item(self)
return data
@property
def latest_revision_id(self) -> int:
"""
Get the revision identifier for the most recent revision of the entity.
:rtype: int
:raise pywikibot.exceptions.NoPageError: if the entity doesn't exist
"""
if not hasattr(self, '_revid'):
self.get()
return self._revid
@latest_revision_id.setter
def latest_revision_id(self, value):
self._revid = value
@latest_revision_id.deleter
def latest_revision_id(self):
# fixme: this seems too destructive in comparison to the parent
self.clear_cache()
@allow_asynchronous
def editEntity(self, data=None, **kwargs):
"""
Edit an entity using Wikibase wbeditentity API.
This function is wrapped around by:
- editLabels
- editDescriptions
- editAliases
- ItemPage.setSitelinks
:param data: Data to be saved
:type data: dict, or None to save the current content of the entity.
:keyword asynchronous: if True, launch a separate thread to edit
asynchronously
:type asynchronous: bool
:keyword callback: a callable object that will be called after the
entity has been updated. It must take two arguments: (1) a
WikibasePage object, and (2) an exception instance, which will be
None if the page was saved successfully. This is intended for use
by bots that need to keep track of which saves were successful.
:type callback: callable
"""
# kept for the decorator
super().editEntity(data, **kwargs)
def editLabels(self, labels, **kwargs):
"""
Edit entity labels.
Labels should be a dict, with the key
as a language or a site object. The
value should be the string to set it to.
You can set it to '' to remove the label.
"""
data = {'labels': labels}
self.editEntity(data, **kwargs)
def editDescriptions(self, descriptions, **kwargs):
"""
Edit entity descriptions.
Descriptions should be a dict, with the key
as a language or a site object. The
value should be the string to set it to.
You can set it to '' to remove the description.
"""
data = {'descriptions': descriptions}
self.editEntity(data, **kwargs)
def editAliases(self, aliases, **kwargs):
"""
Edit entity aliases.
Aliases should be a dict, with the key
as a language or a site object. The
value should be a list of strings.
"""
data = {'aliases': aliases}
self.editEntity(data, **kwargs)
def set_redirect_target(self, target_page, create=False, force=False,
keep_section=False, save=True, **kwargs):
"""
Set target of a redirect for a Wikibase page.
Has not been implemented in the Wikibase API yet, except for ItemPage.
"""
raise NotImplementedError
@allow_asynchronous
def addClaim(self, claim, bot=True, **kwargs):
"""
Add a claim to the entity.
:param claim: The claim to add
:type claim: pywikibot.page.Claim
:param bot: Whether to flag as bot (if possible)
:type bot: bool
:keyword asynchronous: if True, launch a separate thread to add claim
asynchronously
:type asynchronous: bool
:keyword callback: a callable object that will be called after the
claim has been added. It must take two arguments:
(1) a WikibasePage object, and (2) an exception instance,
which will be None if the entity was saved successfully. This is
intended for use by bots that need to keep track of which saves
were successful.
:type callback: callable
"""
if claim.on_item is not None:
raise ValueError(
'The provided Claim instance is already used in an entity')
self.repo.addClaim(self, claim, bot=bot, **kwargs)
claim.on_item = self
def removeClaims(self, claims, **kwargs):
"""
Remove the claims from the entity.
:param claims: list of claims to be removed
:type claims: list or pywikibot.Claim
"""
# this check allows single claims to be removed by pushing them into a
# list of length one.
if isinstance(claims, pywikibot.Claim):
claims = [claims]
data = self.repo.removeClaims(claims, **kwargs)
for claim in claims:
claim.on_item.latest_revision_id = data['pageinfo']['lastrevid']
claim.on_item = None
claim.snak = None
class ItemPage(WikibasePage):
"""
Wikibase entity of type 'item'.
A Wikibase item may be defined by either a 'Q' id (qid),
or by a site & title.
If an item is defined by site & title, once an item's qid has
been looked up, the item is then defined by the qid.
"""
_cache_attrs = WikibasePage._cache_attrs + (
'labels', 'descriptions', 'aliases', 'claims', 'sitelinks')
entity_type = 'item'
title_pattern = r'Q[1-9]\d*'
DATA_ATTRIBUTES = {
'labels': LanguageDict,
'descriptions': LanguageDict,
'aliases': AliasesDict,
'claims': ClaimCollection,
'sitelinks': SiteLinkCollection,
}
def __init__(self, site, title=None, ns=None):
"""
Initializer.
:param site: data repository
:type site: pywikibot.site.DataSite
:param title: identifier of item, "Q###",
-1 or None for an empty item.
:type title: str
:type ns: namespace
:type ns: Namespace instance, or int, or None
for default item_namespace
"""
if ns is None:
ns = site.item_namespace
# Special case for empty item.
if title is None or title == '-1':
super().__init__(site, '-1', ns=ns)
assert self.id == '-1'
return
# we don't want empty titles
if not title:
raise InvalidTitleError("Item's title cannot be empty")
super().__init__(site, title, ns=ns)
assert self.id == self._link.title
def _defined_by(self, singular: bool = False) -> dict:
"""
Internal function to provide the API parameters to identify the item.
The API parameters may be 'id' if the ItemPage has one,
or 'site'&'title' if instantiated via ItemPage.fromPage with
lazy_load enabled.
Once an item's Q## is looked up, that will be used for all future
requests.
An empty dict is returned if the ItemPage is instantiated without
either ID (internally it has id = '-1') or site&title.
:param singular: Whether the parameter names should use the
singular form
:return: API parameters
"""
params = {}
if singular:
id = 'id'
site = 'site'
title = 'title'
else:
id = 'ids'
site = 'sites'
title = 'titles'
lazy_loading_id = not hasattr(self, 'id') and hasattr(self, '_site')
# id overrides all
if hasattr(self, 'id'):
if self.id != '-1':
params[id] = self.id
elif lazy_loading_id:
params[site] = self._site.dbName()
params[title] = self._title
else:
# if none of the above applies, this item is in an invalid state
# which needs to be raise as an exception, but also logged in case
# an exception handler is catching the generic Error.
pywikibot.error('{} is in invalid state'
.format(self.__class__.__name__))
raise Error('{} is in invalid state'
.format(self.__class__.__name__))
return params
def title(self, **kwargs):
"""
Return ID as title of the ItemPage.
If the ItemPage was lazy-loaded via ItemPage.fromPage, this method
will fetch the Wikibase item ID for the page, potentially raising
NoPageError with the page on the linked wiki if it does not exist, or
does not have a corresponding Wikibase item ID.
This method also refreshes the title if the id property was set.
i.e. item.id = 'Q60'
All optional keyword parameters are passed to the superclass.
"""
# If instantiated via ItemPage.fromPage using site and title,
# _site and _title exist, and id does not exist.
lazy_loading_id = not hasattr(self, 'id') and hasattr(self, '_site')
if lazy_loading_id or self._link._text != self.id:
# If the item is lazy loaded or has been modified,
# _link._text is stale. Removing _link._title
# forces Link to re-parse ._text into ._title.
if hasattr(self._link, '_title'):
del self._link._title
self._link._text = self.getID()
self._link.parse()
# Remove the temporary values that are no longer needed after
# the .getID() above has called .get(), which populated .id
if hasattr(self, '_site'):
del self._title
del self._site
return super().title(**kwargs)
def getID(self, numeric=False, force=False):
"""
Get the entity identifier.
:param numeric: Strip the first letter and return an int
:type numeric: bool
:param force: Force an update of new data
:type force: bool
"""
if not hasattr(self, 'id') or force:
self.get(force=force)
return super().getID(numeric=numeric)
@classmethod
def fromPage(cls, page, lazy_load=False):
"""
Get the ItemPage for a Page that links to it.
:param page: Page to look for corresponding data item
:type page: pywikibot.page.Page
:param lazy_load: Do not raise NoPageError if either page or
corresponding ItemPage does not exist.
:type lazy_load: bool
:rtype: pywikibot.page.ItemPage
:raise pywikibot.exceptions.NoPageError: There is no corresponding
ItemPage for the page
:raise pywikibot.exceptions.WikiBaseError: The site of the page
has no data repository.
"""
if hasattr(page, '_item'):
return page._item
if not page.site.has_data_repository:
raise WikiBaseError('{} has no data repository'
.format(page.site))
if not lazy_load and not page.exists():
raise NoPageError(page)
repo = page.site.data_repository()
if hasattr(page,
'_pageprops') and page.properties().get('wikibase_item'):
# If we have already fetched the pageprops for something else,
# we already have the id, so use it
page._item = cls(repo, page.properties().get('wikibase_item'))
return page._item
i = cls(repo)
# clear id, and temporarily store data needed to lazy loading the item
del i.id
i._site = page.site
i._title = page.title(with_section=False)
if not lazy_load and not i.exists():
raise NoPageError(i)
page._item = i
return page._item
@classmethod
def from_entity_uri(cls, site, uri: str, lazy_load: bool = False):
"""
Get the ItemPage from its entity uri.
:param site: The Wikibase site for the item.
:type site: pywikibot.site.DataSite
:param uri: Entity uri for the Wikibase item.
:param lazy_load: Do not raise NoPageError if ItemPage does not exist.
:rtype: pywikibot.page.ItemPage
:raise TypeError: Site is not a valid DataSite.
:raise ValueError: Site does not match the base of the provided uri.
:raise pywikibot.exceptions.NoPageError: Uri points to non-existent
item.
"""
if not isinstance(site, DataSite):
raise TypeError('{} is not a data repository.'.format(site))
base_uri, _, qid = uri.rpartition('/')
if base_uri != site.concept_base_uri.rstrip('/'):
raise ValueError(
'The supplied data repository ({repo}) does not correspond to '
'that of the item ({item})'.format(
repo=site.concept_base_uri.rstrip('/'),
item=base_uri))
item = cls(site, qid)
if not lazy_load and not item.exists():
raise NoPageError(item)
return item
def get(self, force=False, get_redirect=False, *args, **kwargs) -> dict:
"""
Fetch all item data, and cache it.
:param force: override caching
:type force: bool
:param get_redirect: return the item content, do not follow the
redirect, do not raise an exception.
:type get_redirect: bool
:raise NotImplementedError: a value in args or kwargs
:return: actual data which entity holds
:note: dicts returned by this method are references to content of this
entity and their modifying may indirectly cause unwanted change to
the live content
"""
data = super().get(force, *args, **kwargs)
if self.isRedirectPage() and not get_redirect:
raise IsRedirectPageError(self)
return data
def getRedirectTarget(self):
"""Return the redirect target for this page."""
target = super().getRedirectTarget()
cmodel = target.content_model
if cmodel != 'wikibase-item':
raise Error('{} has redirect target {} with content model {} '
'instead of wikibase-item'
.format(self, target, cmodel))
return self.__class__(target.site, target.title(), target.namespace())
def iterlinks(self, family=None):
"""
Iterate through all the sitelinks.
:param family: string/Family object which represents what family of
links to iterate
:type family: str|pywikibot.family.Family
:return: iterator of pywikibot.Page objects
:rtype: iterator
"""
if not hasattr(self, 'sitelinks'):
self.get()
if family is not None and not isinstance(family, Family):
family = Family.load(family)
for sl in self.sitelinks.values():
if family is None or family == sl.site.family:
pg = pywikibot.Page(sl)
pg._item = self
yield pg
def getSitelink(self, site, force=False) -> str:
"""
Return the title for the specific site.
If the item doesn't have that language, raise NoPageError.
:param site: Site to find the linked page of.
:type site: pywikibot.Site or database name
:param force: override caching
"""
if force or not hasattr(self, '_content'):
self.get(force=force)
if site not in self.sitelinks:
raise NoPageError(self)
return self.sitelinks[site].canonical_title()
def setSitelink(self, sitelink, **kwargs):
"""
Set sitelinks. Calls setSitelinks().
A sitelink can be a Page object, a BaseLink object
or a {'site':dbname,'title':title} dictionary.
"""
self.setSitelinks([sitelink], **kwargs)
def removeSitelink(self, site, **kwargs):
"""
Remove a sitelink.
A site can either be a Site object, or it can be a dbName.
"""
self.removeSitelinks([site], **kwargs)
def removeSitelinks(self, sites, **kwargs):
"""
Remove sitelinks.
Sites should be a list, with values either
being Site objects, or dbNames.
"""
data = []
for site in sites:
site = SiteLinkCollection.getdbName(site)
data.append({'site': site, 'title': ''})
self.setSitelinks(data, **kwargs)
def setSitelinks(self, sitelinks, **kwargs):
"""
Set sitelinks.
Sitelinks should be a list. Each item in the
list can either be a Page object, a BaseLink object, or a dict
with a value for 'site' and 'title'.
"""
data = {'sitelinks': sitelinks}
self.editEntity(data, **kwargs)
def mergeInto(self, item, **kwargs):
"""
Merge the item into another item.
:param item: The item to merge into
:type item: pywikibot.page.ItemPage
"""
data = self.repo.mergeItems(from_item=self, to_item=item, **kwargs)
if not data.get('success', 0):
return
self.latest_revision_id = data['from']['lastrevid']
item.latest_revision_id = data['to']['lastrevid']
if data.get('redirected', 0):
self._isredir = True
self._redirtarget = item
def set_redirect_target(self, target_page, create=False, force=False,
keep_section=False, save=True, **kwargs):
"""
Make the item redirect to another item.
You need to define an extra argument to make this work, like save=True
:param target_page: target of the redirect, this argument is required.
:type target_page: pywikibot.page.ItemPage or string
:param force: if true, it sets the redirect target even the page
is not redirect.
:type force: bool
"""
if isinstance(target_page, str):
target_page = pywikibot.ItemPage(self.repo, target_page)
elif self.repo != target_page.repo:
raise InterwikiRedirectPageError(self, target_page)
if self.exists() and not self.isRedirectPage() and not force:
raise IsNotRedirectPageError(self)
if not save or keep_section or create:
raise NotImplementedError
data = self.repo.set_redirect_target(
from_item=self, to_item=target_page,
bot=kwargs.get('botflag', True))
if data.get('success', 0):
del self.latest_revision_id
self._isredir = True
self._redirtarget = target_page
def isRedirectPage(self):
"""Return True if item is a redirect, False if not or not existing."""
if hasattr(self, '_content') and not hasattr(self, '_isredir'):
self._isredir = self.id != self._content.get('id', self.id)
return self._isredir
return super().isRedirectPage()
# alias for backwards compatibility
ItemPage.concept_url = redirect_func(
ItemPage.concept_uri, old_name='concept_url', class_name='ItemPage',
since='20170222')
class Property:
"""
A Wikibase property.
While every Wikibase property has a Page on the data repository,
this object is for when the property is used as part of another concept
where the property is not _the_ Page of the property.
For example, a claim on an ItemPage has many property attributes, and so
it subclasses this Property class, but a claim does not have Page like
behaviour and semantics.
"""
types = {'wikibase-item': ItemPage,
# 'wikibase-property': PropertyPage, must be declared first
'string': str,
'commonsMedia': FilePage,
'globe-coordinate': pywikibot.Coordinate,
'url': str,
'time': pywikibot.WbTime,
'quantity': pywikibot.WbQuantity,
'monolingualtext': pywikibot.WbMonolingualText,
'math': str,
'external-id': str,
'geo-shape': pywikibot.WbGeoShape,
'tabular-data': pywikibot.WbTabularData,
'musical-notation': str,
}
# the value type where different from the type
value_types = {'wikibase-item': 'wikibase-entityid',
'wikibase-property': 'wikibase-entityid',
'commonsMedia': 'string',
'url': 'string',
'globe-coordinate': 'globecoordinate',
'math': 'string',
'external-id': 'string',
'geo-shape': 'string',
'tabular-data': 'string',
'musical-notation': 'string',
}
def __init__(self, site, id: str, datatype: Optional[str] = None):
"""
Initializer.
:param site: data repository
:type site: pywikibot.site.DataSite
:param id: id of the property
:param datatype: datatype of the property;
if not given, it will be queried via the API
"""
self.repo = site
self.id = id.upper()
if datatype:
self._type = datatype
@property
def type(self) -> str:
"""Return the type of this property."""
if not hasattr(self, '_type'):
self._type = self.repo.getPropertyType(self)
return self._type
def getID(self, numeric=False):
"""
Get the identifier of this property.
:param numeric: Strip the first letter and return an int
:type numeric: bool
"""
if numeric:
return int(self.id[1:])
return self.id
class PropertyPage(WikibasePage, Property):
"""
A Wikibase entity in the property namespace.
Should be created as::
PropertyPage(DataSite, 'P21')
or::
PropertyPage(DataSite, datatype='url')
"""
_cache_attrs = WikibasePage._cache_attrs + (
'_type', 'labels', 'descriptions', 'aliases', 'claims')
entity_type = 'property'
title_pattern = r'P[1-9]\d*'
DATA_ATTRIBUTES = {
'labels': LanguageDict,
'descriptions': LanguageDict,
'aliases': AliasesDict,
'claims': ClaimCollection,
}
def __init__(self, source, title=None, datatype=None):
"""
Initializer.
:param source: data repository property is on
:type source: pywikibot.site.DataSite
:param title: identifier of property, like "P##",
"-1" or None for an empty property.
:type title: str
:param datatype: Datatype for a new property.
:type datatype: str
"""
# Special case for new property.
if title is None or title == '-1':
if not datatype:
raise TypeError('"datatype" is required for new property.')
WikibasePage.__init__(self, source, '-1',
ns=source.property_namespace)
Property.__init__(self, source, '-1', datatype=datatype)
assert self.id == '-1'
else:
if not title:
raise InvalidTitleError(
"Property's title cannot be empty")
WikibasePage.__init__(self, source, title,
ns=source.property_namespace)
Property.__init__(self, source, self.id)
def get(self, force: bool = False, *args, **kwargs) -> dict:
"""
Fetch the property entity, and cache it.
:param force: override caching
:raise NotImplementedError: a value in args or kwargs
:return: actual data which entity holds
:note: dicts returned by this method are references to content of this
entity and their modifying may indirectly cause unwanted change to
the live content
"""
if args or kwargs:
raise NotImplementedError(
'PropertyPage.get only implements "force".')
data = WikibasePage.get(self, force)
if 'datatype' in self._content:
self._type = self._content['datatype']
data['datatype'] = self._type
return data
def newClaim(self, *args, **kwargs):
"""
Helper function to create a new claim object for this property.
:rtype: pywikibot.page.Claim
"""
# todo: raise when self.id is -1
return Claim(self.site, self.getID(), datatype=self.type,
*args, **kwargs)
def getID(self, numeric=False):
"""
Get the identifier of this property.
:param numeric: Strip the first letter and return an int
:type numeric: bool
"""
# enforce this parent's implementation
return WikibasePage.getID(self, numeric=numeric)
def get_data_for_new_entity(self):
"""Return data required for creation of new property."""
return {'datatype': self.type}
# Add PropertyPage to the class attribute "types" after its declaration.
Property.types['wikibase-property'] = PropertyPage
class Claim(Property):
"""
A Claim on a Wikibase entity.
Claims are standard claims as well as references and qualifiers.
"""
TARGET_CONVERTER = {
'wikibase-item': lambda value, site:
ItemPage(site, 'Q' + str(value['numeric-id'])),
'wikibase-property': lambda value, site:
PropertyPage(site, 'P' + str(value['numeric-id'])),
'commonsMedia': lambda value, site:
FilePage(pywikibot.Site('commons', 'commons'), value), # T90492
'globe-coordinate': pywikibot.Coordinate.fromWikibase,
'geo-shape': pywikibot.WbGeoShape.fromWikibase,
'tabular-data': pywikibot.WbTabularData.fromWikibase,
'time': pywikibot.WbTime.fromWikibase,
'quantity': pywikibot.WbQuantity.fromWikibase,
'monolingualtext': lambda value, site:
pywikibot.WbMonolingualText.fromWikibase(value)
}
SNAK_TYPES = ('value', 'somevalue', 'novalue')
@deprecated_args(isReference='is_reference', isQualifier='is_qualifier')
def __init__(self, site, pid, snak=None, hash=None, is_reference=False,
is_qualifier=False, rank='normal', **kwargs):
"""
Initializer.
Defined by the "snak" value, supplemented by site + pid
:param site: repository the claim is on
:type site: pywikibot.site.DataSite
:param pid: property id, with "P" prefix
:param snak: snak identifier for claim
:param hash: hash identifier for references
:param is_reference: whether specified claim is a reference
:param is_qualifier: whether specified claim is a qualifier
:param rank: rank for claim
"""
Property.__init__(self, site, pid, **kwargs)
self.snak = snak
self.hash = hash
self.rank = rank
self.isReference = is_reference
self.isQualifier = is_qualifier
if self.isQualifier and self.isReference:
raise ValueError('Claim cannot be both a qualifier and reference.')
self.sources = []
self.qualifiers = OrderedDict()
self.target = None
self.snaktype = 'value'
self._on_item = None # The item it's on
@property
def on_item(self):
"""Return item this claim is attached to."""
return self._on_item
@on_item.setter
def on_item(self, item):
self._on_item = item
for values in self.qualifiers.values():
for qualifier in values:
qualifier.on_item = item
for source in self.sources:
for values in source.values():
for source in values:
source.on_item = item
def __repr__(self):
"""Return the representation string."""
return '{cls_name}.fromJSON({}, {})'.format(
repr(self.repo), self.toJSON(), cls_name=type(self).__name__)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.same_as(other)
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def _claim_mapping_same(this, other):
if len(this) != len(other):
return False
my_values = list(chain.from_iterable(this.values()))
other_values = list(chain.from_iterable(other.values()))
if len(my_values) != len(other_values):
return False
for val in my_values:
if val not in other_values:
return False
for val in other_values:
if val not in my_values:
return False
return True
def same_as(self, other, ignore_rank=True, ignore_quals=False,
ignore_refs=True):
"""Check if two claims are same."""
if ignore_rank:
attributes = ['id', 'snaktype', 'target']
else:
attributes = ['id', 'snaktype', 'rank', 'target']
for attr in attributes:
if getattr(self, attr) != getattr(other, attr):
return False
if not ignore_quals:
if not self._claim_mapping_same(self.qualifiers, other.qualifiers):
return False
if not ignore_refs:
if len(self.sources) != len(other.sources):
return False
for source in self.sources:
same = False
for other_source in other.sources:
if self._claim_mapping_same(source, other_source):
same = True
break
if not same:
return False
return True
def copy(self):
"""
Create an independent copy of this object.
:rtype: pywikibot.page.Claim
"""
is_qualifier = self.isQualifier
is_reference = self.isReference
self.isQualifier = False
self.isReference = False
copy = self.fromJSON(self.repo, self.toJSON())
for cl in (self, copy):
cl.isQualifier = is_qualifier
cl.isReference = is_reference
copy.hash = None
copy.snak = None
return copy
@classmethod
def fromJSON(cls, site, data):
"""
Create a claim object from JSON returned in the API call.
:param data: JSON containing claim data
:type data: dict
:rtype: pywikibot.page.Claim
"""
claim = cls(site, data['mainsnak']['property'],
datatype=data['mainsnak'].get('datatype', None))
if 'id' in data:
claim.snak = data['id']
elif 'hash' in data:
claim.hash = data['hash']
claim.snaktype = data['mainsnak']['snaktype']
if claim.getSnakType() == 'value':
value = data['mainsnak']['datavalue']['value']
# The default covers string, url types
if claim.type in cls.types or claim.type == 'wikibase-property':
claim.target = cls.TARGET_CONVERTER.get(
claim.type, lambda value, site: value)(value, site)
else:
pywikibot.warning(
'{} datatype is not supported yet.'.format(claim.type))
claim.target = pywikibot.WbUnknown.fromWikibase(value)
if 'rank' in data: # References/Qualifiers don't have ranks
claim.rank = data['rank']
if 'references' in data:
for source in data['references']:
claim.sources.append(cls.referenceFromJSON(site, source))
if 'qualifiers' in data:
for prop in data['qualifiers-order']:
claim.qualifiers[prop] = [
cls.qualifierFromJSON(site, qualifier)
for qualifier in data['qualifiers'][prop]]
return claim
@classmethod
def referenceFromJSON(cls, site, data) -> dict:
"""
Create a dict of claims from reference JSON returned in the API call.
Reference objects are represented a bit differently, and require
some more handling.
"""
source = OrderedDict()
# Before #84516 Wikibase did not implement snaks-order.
# https://gerrit.wikimedia.org/r/c/84516/
if 'snaks-order' in data:
prop_list = data['snaks-order']
else:
prop_list = data['snaks'].keys()
for prop in prop_list:
for claimsnak in data['snaks'][prop]:
claim = cls.fromJSON(site, {'mainsnak': claimsnak,
'hash': data.get('hash')})
claim.isReference = True
if claim.getID() not in source:
source[claim.getID()] = []
source[claim.getID()].append(claim)
return source
@classmethod
def qualifierFromJSON(cls, site, data):
"""
Create a Claim for a qualifier from JSON.
Qualifier objects are represented a bit
differently like references, but I'm not
sure if this even requires it's own function.
:rtype: pywikibot.page.Claim
"""
claim = cls.fromJSON(site, {'mainsnak': data,
'hash': data.get('hash')})
claim.isQualifier = True
return claim
def toJSON(self) -> dict:
"""Create dict suitable for the MediaWiki API."""
data = {
'mainsnak': {
'snaktype': self.snaktype,
'property': self.getID()
},
'type': 'statement'
}
if hasattr(self, 'snak') and self.snak is not None:
data['id'] = self.snak
if hasattr(self, 'rank') and self.rank is not None:
data['rank'] = self.rank
if self.getSnakType() == 'value':
data['mainsnak']['datatype'] = self.type
data['mainsnak']['datavalue'] = self._formatDataValue()
if self.isQualifier or self.isReference:
data = data['mainsnak']
if hasattr(self, 'hash') and self.hash is not None:
data['hash'] = self.hash
else:
if self.qualifiers:
data['qualifiers'] = {}
data['qualifiers-order'] = list(self.qualifiers.keys())
for prop, qualifiers in self.qualifiers.items():
for qualifier in qualifiers:
assert qualifier.isQualifier is True
data['qualifiers'][prop] = [
qualifier.toJSON() for qualifier in qualifiers]
if self.sources:
data['references'] = []
for collection in self.sources:
reference = {
'snaks': {}, 'snaks-order': list(collection.keys())}
for prop, val in collection.items():
reference['snaks'][prop] = []
for source in val:
assert source.isReference is True
src_data = source.toJSON()
if 'hash' in src_data:
reference.setdefault('hash', src_data['hash'])
del src_data['hash']
reference['snaks'][prop].append(src_data)
data['references'].append(reference)
return data
def setTarget(self, value):
"""
Set the target value in the local object.
:param value: The new target value.
:type value: object
:exception ValueError: if value is not of the type
required for the Claim type.
"""
value_class = self.types[self.type]
if not isinstance(value, value_class):
raise ValueError('{} is not type {}.'
.format(value, value_class))
self.target = value
def changeTarget(self, value=None, snaktype='value', **kwargs):
"""
Set the target value in the data repository.
:param value: The new target value.
:type value: object
:param snaktype: The new snak type.
:type snaktype: str ('value', 'somevalue', or 'novalue')
"""
if value:
self.setTarget(value)
data = self.repo.changeClaimTarget(self, snaktype=snaktype,
**kwargs)
# TODO: Re-create the entire item from JSON, not just id
self.snak = data['claim']['id']
self.on_item.latest_revision_id = data['pageinfo']['lastrevid']
def getTarget(self):
"""
Return the target value of this Claim.
None is returned if no target is set
:return: object
"""
return self.target
def getSnakType(self) -> str:
"""
Return the type of snak.
:return: str ('value', 'somevalue' or 'novalue')
"""
return self.snaktype
def setSnakType(self, value):
"""
Set the type of snak.
:param value: Type of snak
:type value: str ('value', 'somevalue', or 'novalue')
"""
if value in self.SNAK_TYPES:
self.snaktype = value
else:
raise ValueError(
"snaktype must be 'value', 'somevalue', or 'novalue'.")
def getRank(self):
"""Return the rank of the Claim."""
return self.rank
def setRank(self, rank):
"""Set the rank of the Claim."""
self.rank = rank
def changeRank(self, rank, **kwargs):
"""Change the rank of the Claim and save."""
self.rank = rank
return self.repo.save_claim(self, **kwargs)
def changeSnakType(self, value=None, **kwargs):
"""
Save the new snak value.
TODO: Is this function really needed?
"""
if value:
self.setSnakType(value)
self.changeTarget(snaktype=self.getSnakType(), **kwargs)
def getSources(self) -> list:
"""Return a list of sources, each being a list of Claims."""
return self.sources
def addSource(self, claim, **kwargs):
"""
Add the claim as a source.
:param claim: the claim to add
:type claim: pywikibot.Claim
"""
self.addSources([claim], **kwargs)
def addSources(self, claims, **kwargs):
"""
Add the claims as one source.
:param claims: the claims to add
:type claims: list of pywikibot.Claim
"""
for claim in claims:
if claim.on_item is not None:
raise ValueError(
'The provided Claim instance is already used in an entity')
if self.on_item is not None:
data = self.repo.editSource(self, claims, new=True, **kwargs)
self.on_item.latest_revision_id = data['pageinfo']['lastrevid']
for claim in claims:
claim.hash = data['reference']['hash']
claim.on_item = self.on_item
source = defaultdict(list)
for claim in claims:
claim.isReference = True
source[claim.getID()].append(claim)
self.sources.append(source)
def removeSource(self, source, **kwargs):
"""
Remove the source. Call removeSources().
:param source: the source to remove
:type source: pywikibot.Claim
"""
self.removeSources([source], **kwargs)
def removeSources(self, sources, **kwargs):
"""
Remove the sources.
:param sources: the sources to remove
:type sources: list of pywikibot.Claim
"""
data = self.repo.removeSources(self, sources, **kwargs)
self.on_item.latest_revision_id = data['pageinfo']['lastrevid']
for source in sources:
source_dict = defaultdict(list)
source_dict[source.getID()].append(source)
self.sources.remove(source_dict)
def addQualifier(self, qualifier, **kwargs):
"""Add the given qualifier.
:param qualifier: the qualifier to add
:type qualifier: pywikibot.page.Claim
"""
if qualifier.on_item is not None:
raise ValueError(
'The provided Claim instance is already used in an entity')
if self.on_item is not None:
data = self.repo.editQualifier(self, qualifier, **kwargs)
self.on_item.latest_revision_id = data['pageinfo']['lastrevid']
qualifier.on_item = self.on_item
qualifier.isQualifier = True
if qualifier.getID() in self.qualifiers:
self.qualifiers[qualifier.getID()].append(qualifier)
else:
self.qualifiers[qualifier.getID()] = [qualifier]
def removeQualifier(self, qualifier, **kwargs):
"""
Remove the qualifier. Call removeQualifiers().
:param qualifier: the qualifier to remove
:type qualifier: pywikibot.page.Claim
"""
self.removeQualifiers([qualifier], **kwargs)
def removeQualifiers(self, qualifiers, **kwargs):
"""
Remove the qualifiers.
:param qualifiers: the qualifiers to remove
:type qualifiers: list Claim
"""
data = self.repo.remove_qualifiers(self, qualifiers, **kwargs)
self.on_item.latest_revision_id = data['pageinfo']['lastrevid']
for qualifier in qualifiers:
self.qualifiers[qualifier.getID()].remove(qualifier)
qualifier.on_item = None
def target_equals(self, value) -> bool:
"""
Check whether the Claim's target is equal to specified value.
The function checks for:
- WikibasePage ID equality
- WbTime year equality
- Coordinate equality, regarding precision
- WbMonolingualText text equality
- direct equality
:param value: the value to compare with
:return: true if the Claim's target is equal to the value provided,
false otherwise
"""
if (isinstance(self.target, WikibasePage)
and isinstance(value, str)):
return self.target.id == value
if (isinstance(self.target, pywikibot.WbTime)
and not isinstance(value, pywikibot.WbTime)):
return self.target.year == int(value)
if (isinstance(self.target, pywikibot.Coordinate)
and isinstance(value, str)):
coord_args = [float(x) for x in value.split(',')]
if len(coord_args) >= 3:
precision = coord_args[2]
else:
precision = 0.0001 # Default value (~10 m at equator)
with suppress(TypeError):
if self.target.precision is not None:
precision = max(precision, self.target.precision)
return (abs(self.target.lat - coord_args[0]) <= precision
and abs(self.target.lon - coord_args[1]) <= precision)
if (isinstance(self.target, pywikibot.WbMonolingualText)
and isinstance(value, str)):
return self.target.text == value
return self.target == value
def has_qualifier(self, qualifier_id: str, target) -> bool:
"""
Check whether Claim contains specified qualifier.
:param qualifier_id: id of the qualifier
:param target: qualifier target to check presence of
:return: true if the qualifier was found, false otherwise
"""
if self.isQualifier or self.isReference:
raise ValueError('Qualifiers and references cannot have '
'qualifiers.')
for qualifier in self.qualifiers.get(qualifier_id, []):
if qualifier.target_equals(target):
return True
return False
def _formatValue(self) -> dict:
"""
Format the target into the proper JSON value that Wikibase wants.
:return: JSON value
"""
if self.type in ('wikibase-item', 'wikibase-property'):
value = {'entity-type': self.getTarget().entity_type,
'numeric-id': self.getTarget().getID(numeric=True)}
elif self.type in ('string', 'url', 'math', 'external-id',
'musical-notation'):
value = self.getTarget()
elif self.type == 'commonsMedia':
value = self.getTarget().title(with_ns=False)
elif self.type in ('globe-coordinate', 'time',
'quantity', 'monolingualtext',
'geo-shape', 'tabular-data'):
value = self.getTarget().toWikibase()
else: # WbUnknown
pywikibot.warning(
'{} datatype is not supported yet.'.format(self.type))
value = self.getTarget().toWikibase()
return value
def _formatDataValue(self) -> dict:
"""
Format the target into the proper JSON datavalue that Wikibase wants.
:return: Wikibase API representation with type and value.
"""
return {
'value': self._formatValue(),
'type': self.value_types.get(self.type, self.type)
}
class FileInfo:
"""
A structure holding imageinfo of latest rev. of FilePage.
All keys of API imageinfo dictionary are mapped to FileInfo attributes.
Attributes can be retrieved both as self['key'] or self.key.
Following attributes will be returned:
- timestamp, user, comment, url, size, sha1, mime, metadata
- archivename (not for latest revision)
See Site.loadimageinfo() for details.
Note: timestamp will be casted to pywikibot.Timestamp.
"""
def __init__(self, file_revision):
"""Initiate the class using the dict from L{APISite.loadimageinfo}."""
self.__dict__.update(file_revision)
self.timestamp = pywikibot.Timestamp.fromISOformat(self.timestamp)
def __getitem__(self, key):
"""Give access to class values by key."""
return getattr(self, key)
def __repr__(self):
"""Return a more complete string representation."""
return repr(self.__dict__)
def __eq__(self, other):
"""Test if two File_info objects are equal."""
return self.__dict__ == other.__dict__
class BaseLink(ComparableMixin):
"""
A MediaWiki link (local or interwiki).
Has the following attributes:
- title: The title of the page linked to (str); does not include
namespace or section
- namespace: The Namespace object of the page linked to
- site: The Site object for the wiki linked to
"""
# Components used for __repr__
_items = ('title', 'namespace', '_sitekey')
def __init__(self, title: str, namespace=None, site=None):
"""
Initializer.
:param title: the title of the page linked to (str); does not
include namespace or section
:param namespace: the namespace of the page linked to. Can be provided
as either an int, a Namespace instance or a str, defaults to the
MAIN namespace.
:type namespace: int, pywikibot.Namespace or str
:param site: the Site object for the wiki linked to. Can be provided as
either a Site instance or a db key, defaults to pywikibot.Site().
:type site: pywikibot.Site or str
"""
self.title = title
if isinstance(namespace, pywikibot.site.Namespace):
self._namespace = namespace
else:
# postpone evaluation of namespace until needed
self._nskey = namespace
site = site or pywikibot.Site()
if isinstance(site, pywikibot.site.BaseSite):
self._site = site
self._sitekey = site.dbName()
else:
self._sitekey = site
def __repr__(self):
"""Return a more complete string representation."""
assert isinstance(self._items, tuple)
assert all(isinstance(item, (bytes, str)) for item in self._items)
attrs = ('{0!r}'.format(getattr(self, attr)) for attr in self._items)
return 'pywikibot.page.{}({})'.format(
self.__class__.__name__, ', '.join(attrs))
def lookup_namespace(self):
"""
Look up the namespace given the provided namespace id or name.
:rtype: pywikibot.Namespace
"""
default_nskey = Namespace.MAIN
self._nskey = self._nskey or default_nskey
if isinstance(self._nskey, str):
ns = self.site.namespaces.lookup_name(self._nskey)
if ns:
return ns
self._nskey = default_nskey
if isinstance(self._nskey, int):
try:
ns = self.site.namespaces[self._nskey]
except KeyError:
ns = self.site.namespaces[default_nskey]
return ns
raise TypeError(
'Invalid type "{}" for Page._nskey. Must be int or str.'
.format(type(self._nskey)))
@property
def site(self):
"""
Return the site of the link.
:rtype: pywikibot.Site
"""
if not hasattr(self, '_site'):
self._site = pywikibot.site.APISite.fromDBName(self._sitekey)
return self._site
@property
def namespace(self):
"""
Return the namespace of the link.
:rtype: pywikibot.Namespace
"""
if not hasattr(self, '_namespace'):
self._namespace = self.lookup_namespace()
return self._namespace
def canonical_title(self):
"""Return full page title, including localized namespace."""
# Avoid that ':' will be added to the title for Main ns.
if self.namespace != Namespace.MAIN:
return '{}:{}'.format(self.site.namespace(self.namespace),
self.title)
return self.title
def ns_title(self, onsite=None):
"""
Return full page title, including namespace.
:param onsite: site object
if specified, present title using onsite local namespace,
otherwise use self canonical namespace.
:raise pywikibot.exceptions.Error: no corresponding namespace is found
in onsite
"""
if onsite is None:
name = self.namespace.canonical_name
else:
# look for corresponding ns in onsite by name comparison
for alias in self.namespace:
namespace = onsite.namespaces.lookup_name(alias)
if namespace is not None:
name = namespace.custom_name
break
else:
# not found
raise Error(
'No corresponding namespace found for namespace {} on {}.'
.format(self.namespace, onsite))
if self.namespace != Namespace.MAIN:
return '{}:{}'.format(name, self.title)
return self.title
def astext(self, onsite=None):
"""
Return a text representation of the link.
:param onsite: if specified, present as a (possibly interwiki) link
from the given site; otherwise, present as an internal link on
the site.
"""
if onsite is None:
onsite = self.site
title = self.title
if self.namespace != Namespace.MAIN:
title = onsite.namespace(self.namespace) + ':' + title
if onsite == self.site:
return '[[{}]]'.format(title)
if onsite.family == self.site.family:
return '[[{}:{}]]'.format(self.site.code, title)
if self.site.family.name == self.site.code:
# use this form for sites like commons, where the
# code is the same as the family name
return '[[{}:{}]]'.format(self.site.code, title)
return '[[{}:{}]]'.format(self.site.sitename, title)
def _cmpkey(self):
"""
Key for comparison of BaseLink objects.
BaseLink objects are "equal" if and only if they are on the same site
and have the same normalized title.
BaseLink objects are sortable by site, then namespace, then title.
"""
return (self.site, self.namespace, self.title)
def __str__(self) -> str:
"""Return a str string representation."""
return self.astext()
def __hash__(self):
"""A stable identifier to be used as a key in hash-tables."""
return hash((self.site.sitename, self.canonical_title()))
@classmethod
def fromPage(cls, page):
"""
Create a BaseLink to a Page.
:param page: target pywikibot.page.Page
:type page: pywikibot.page.Page
:rtype: pywikibot.page.BaseLink
"""
title = page.title(with_ns=False,
allow_interwiki=False,
with_section=False)
return cls(title, namespace=page.namespace(), site=page.site)
class Link(BaseLink):
"""
A MediaWiki wikitext link (local or interwiki).
Constructs a Link object based on a wikitext link and a source site.
Extends BaseLink by the following attributes:
- section: The section of the page linked to (str or None); this
contains any text following a '#' character in the title
- anchor: The anchor text (str or None); this contains any text
following a '|' character inside the link
"""
# Components used for __repr__
_items = ('title', 'site')
illegal_titles_pattern = re.compile(
# Matching titles will be held as illegal.
r'[\x00-\x1f\x23\x3c\x3e\x5b\x5d\x7b\x7c\x7d\x7f]'
# URL percent encoding sequences interfere with the ability
# to round-trip titles -- you can't link to them consistently.
'|%[0-9A-Fa-f]{2}'
# XML/HTML character references produce similar issues.
'|&[A-Za-z0-9\x80-\xff]+;'
'|&#[0-9]+;'
'|&#x[0-9A-Fa-f]+;'
)
@deprecated_args(defaultNamespace='default_namespace')
def __init__(self, text, source=None, default_namespace=0):
"""
Initializer.
:param text: the link text (everything appearing between [[ and ]]
on a wiki page)
:type text: str
:param source: the Site on which the link was found (not necessarily
the site to which the link refers)
:type source: Site or BasePage
:param default_namespace: a namespace to use if the link does not
contain one (defaults to 0)
:type default_namespace: int
:raises UnicodeError: text could not be converted to unicode.
"""
source_is_page = isinstance(source, BasePage)
if source_is_page:
self._source = source.site
else:
self._source = source or pywikibot.Site()
assert isinstance(self._source, pywikibot.site.BaseSite), \
'source parameter should be either a Site or Page object'
self._text = text
# See bug T104864, default_namespace might have been deleted.
try:
self._defaultns = self._source.namespaces[default_namespace]
except KeyError:
self._defaultns = default_namespace
# preprocess text (these changes aren't site-dependent)
# First remove anchor, which is stored unchanged, if there is one
if '|' in self._text:
self._text, self._anchor = self._text.split('|', 1)
else:
self._anchor = None
# Convert URL-encoded characters to unicode
self._text = pywikibot.tools.chars.url2string(
self._text, encodings=self._source.encodings())
# Clean up the name, it can come from anywhere.
# Convert HTML entities to unicode
t = html2unicode(self._text)
# Normalize unicode string to a NFC (composed) format to allow
# proper string comparisons to strings output from MediaWiki API.
t = unicodedata.normalize('NFC', t)
# This code was adapted from Title.php : secureAndSplit()
if '\ufffd' in t:
raise InvalidTitleError(
'{!r} contains illegal char {!r}'.format(t, '\ufffd'))
# Cleanup whitespace
t = re.sub(
'[_ \xa0\u1680\u180E\u2000-\u200A\u2028\u2029\u202F\u205F\u3000]+',
' ', t)
# Strip spaces at both ends
t = t.strip()
# Remove left-to-right and right-to-left markers.
t = t.replace('\u200e', '').replace('\u200f', '')
self._text = t
if source_is_page:
self._text = source.title(with_section=False) + self._text
def parse_site(self) -> tuple:
"""
Parse only enough text to determine which site the link points to.
This method does not parse anything after the first ":"; links
with multiple interwiki prefixes (such as "wikt:fr:Parlais") need
to be re-parsed on the first linked wiki to get the actual site.
:return: The family name and site code for the linked site. If the site
is not supported by the configured families it returns None instead
of a str.
"""
t = self._text
fam = self._source.family
code = self._source.code
while ':' in t:
# Initial colon
if t.startswith(':'):
# remove the colon but continue processing
# remove any subsequent whitespace
t = t.lstrip(':').lstrip(' ')
continue
prefix = t[:t.index(':')].lower() # part of text before :
ns = self._source.namespaces.lookup_name(prefix)
if ns:
# The prefix is a namespace in the source wiki
return (fam.name, code)
if prefix in fam.langs:
# prefix is a language code within the source wiki family
return (fam.name, prefix)
try:
newsite = self._source.interwiki(prefix)
except KeyError:
break # text before : doesn't match any known prefix
except SiteDefinitionError:
return (None, None)
else:
return (newsite.family.name, newsite.code)
return (fam.name, code) # text before : doesn't match any known prefix
def parse(self):
"""
Parse wikitext of the link.
Called internally when accessing attributes.
"""
self._site = self._source
self._namespace = self._defaultns
self._is_interwiki = False
ns_prefix = False
old_position = int(self._text.startswith(':'))
colon_position = self._text.find(':', old_position)
first_other_site = None
while colon_position >= 0:
prefix = self._text[old_position:colon_position].lower()
# All spaces after a prefix are discarded
colon_position += 1
while (len(self._text) > colon_position
and self._text[colon_position] == ' '):
colon_position += 1
ns = self._site.namespaces.lookup_name(prefix)
if ns:
if len(self._text) <= colon_position:
raise InvalidTitleError(
"'{}' has no title.".format(self._text))
self._namespace = ns
ns_prefix = True
old_position = colon_position
break
try:
newsite = self._site.interwiki(prefix)
except KeyError:
break # text before : doesn't match any known prefix
except SiteDefinitionError as e:
raise SiteDefinitionError(
'{} is not a local page on {}, and the interwiki '
'prefix {} is not supported by Pywikibot!\n{}'
.format(self._text, self._site, prefix, e))
else:
if first_other_site:
if not self._site.local_interwiki(prefix):
raise InvalidTitleError(
'{} links to a non local site {} via an '
'interwiki link to {}.'.format(
self._text, newsite, first_other_site))
elif newsite != self._source:
first_other_site = newsite
self._site = newsite
self._is_interwiki = True
old_position = colon_position
colon_position = self._text.find(':', old_position)
# Remove any namespaces/interwiki prefixes
t = self._text[old_position:]
if '#' in t:
t, sec = t.split('#', 1)
t, self._section = t.rstrip(), sec.lstrip()
else:
self._section = None
if ns_prefix:
# 'namespace:' is not a valid title
if not t:
raise InvalidTitleError(
"'{}' has no title.".format(self._text))
if ':' in t and self._namespace >= 0: # < 0 don't have talk
other_ns = self._site.namespaces[self._namespace - 1
if self._namespace % 2 else
self._namespace + 1]
if '' in other_ns: # other namespace uses empty str as ns
next_ns = t[:t.index(':')]
if self._site.namespaces.lookup_name(next_ns):
raise InvalidTitleError(
"The (non-)talk page of '{}' is a valid title "
'in another namespace.'.format(self._text))
# Reject illegal characters.
m = Link.illegal_titles_pattern.search(t)
if m:
raise InvalidTitleError('{!r} contains illegal char(s) {!r}'
.format(t, m.group(0)))
# Pages with "/./" or "/../" appearing in the URLs will
# often be unreachable due to the way web browsers deal
# * with 'relative' URLs. Forbid them explicitly.
if '.' in t and (t in ('.', '..')
or t.startswith(('./', '../'))
or '/./' in t
or '/../' in t
or t.endswith(('/.', '/..'))):
raise InvalidTitleError(
"(contains . / combinations): '{}'"
.format(self._text))
# Magic tilde sequences? Nu-uh!
if '~~~' in t:
raise InvalidTitleError("(contains ~~~): '{}'"
.format(self._text))
if self._namespace != -1 and len(t) > 255:
raise InvalidTitleError("(over 255 bytes): '{}'".format(t))
# "empty" local links can only be self-links
# with a fragment identifier.
if not t.strip(' ') and not self._is_interwiki: # T197642
raise InvalidTitleError(
'The link [[{}]] does not contain a page title'
.format(self._text))
if self._site.namespaces[self._namespace].case == 'first-letter':
t = first_upper(t)
self._title = t
# define attributes, to be evaluated lazily
@property
def site(self):
"""
Return the site of the link.
:rtype: pywikibot.Site
"""
if not hasattr(self, '_site'):
self.parse()
return self._site
@property
def namespace(self):
"""
Return the namespace of the link.
:rtype: pywikibot.Namespace
"""
if not hasattr(self, '_namespace'):
self.parse()
return self._namespace
@property
def title(self) -> str:
"""Return the title of the link."""
if not hasattr(self, '_title'):
self.parse()
return self._title
@property
def section(self) -> str:
"""Return the section of the link."""
if not hasattr(self, '_section'):
self.parse()
return self._section
@property
def anchor(self) -> str:
"""Return the anchor of the link."""
if not hasattr(self, '_anchor'):
self.parse()
return self._anchor
def astext(self, onsite=None):
"""
Return a text representation of the link.
:param onsite: if specified, present as a (possibly interwiki) link
from the given site; otherwise, present as an internal link on
the source site.
"""
if onsite is None:
onsite = self._source
text = super().astext(onsite)
if self.section:
text = '{}#{}]]'.format(text.rstrip(']'), self.section)
return text
def _cmpkey(self):
"""
Key for comparison of Link objects.
Link objects are "equal" if and only if they are on the same site
and have the same normalized title, including section if any.
Link objects are sortable by site, then namespace, then title.
"""
return (self.site, self.namespace, self.title)
@classmethod
def fromPage(cls, page, source=None):
"""
Create a Link to a Page.
:param page: target Page
:type page: pywikibot.page.Page
:param source: Link from site source
:param source: Site
:rtype: pywikibot.page.Link
"""
base_link = BaseLink.fromPage(page)
link = cls.__new__(cls)
link._site = base_link.site
link._title = base_link.title
link._namespace = base_link.namespace
link._section = page.section()
link._anchor = None
link._source = source or pywikibot.Site()
return link
@classmethod
def langlinkUnsafe(cls, lang, title, source):
"""
Create a "lang:title" Link linked from source.
Assumes that the lang & title come clean, no checks are made.
:param lang: target site code (language)
:type lang: str
:param title: target Page
:type title: str
:param source: Link from site source
:param source: Site
:rtype: pywikibot.page.Link
"""
link = cls.__new__(cls)
if source.family.interwiki_forward:
link._site = pywikibot.Site(lang, source.family.interwiki_forward)
else:
link._site = pywikibot.Site(lang, source.family.name)
link._section = None
link._source = source
link._namespace = link._site.namespaces[0]
if ':' in title:
ns, t = title.split(':', 1)
ns = link._site.namespaces.lookup_name(ns)
if ns:
link._namespace = ns
title = t
if '#' in title:
t, sec = title.split('#', 1)
title, link._section = t.rstrip(), sec.lstrip()
else:
link._section = None
link._title = title
return link
@classmethod
def create_separated(cls, link, source, default_namespace=0, section=None,
label=None):
"""
Create a new instance but overwrite section or label.
The returned Link instance is already parsed.
:param link: The original link text.
:type link: str
:param source: The source of the link.
:type source: Site
:param default_namespace: The namespace this link uses when no
namespace is defined in the link text.
:type default_namespace: int
:param section: The new section replacing the one in link. If None
(default) it doesn't replace it.
:type section: None or str
:param label: The new label replacing the one in link. If None
(default) it doesn't replace it.
"""
link = cls(link, source, default_namespace)
link.parse()
if section:
link._section = section
elif section is not None:
link._section = None
if label:
link._anchor = label
elif label is not None:
link._anchor = ''
return link
class SiteLink(BaseLink):
"""
A single sitelink in a Wikibase item.
Extends BaseLink by the following attribute:
- badges: Any badges associated with the sitelink
*New in version 3.0.*
"""
# Components used for __repr__
_items = ('_sitekey', '_rawtitle', 'badges')
def __init__(self, title, site=None, badges=None):
"""
Initializer.
:param title: the title of the linked page including namespace
:type title: str
:param site: the Site object for the wiki linked to. Can be provided as
either a Site instance or a db key, defaults to pywikibot.Site().
:type site: pywikibot.Site or str
:param badges: list of badges
:type badges: [pywikibot.ItemPage]
"""
# split of namespace from title
namespace = None
self._rawtitle = title
if ':' in title:
site, namespace, title = SiteLink._parse_namespace(title, site)
super().__init__(title, namespace, site)
badges = badges or []
self._badges = set(badges)
@staticmethod
def _parse_namespace(title, site=None):
"""
Parse enough of a title with a ':' to determine the namespace.
:param site: the Site object for the wiki linked to. Can be provided as
either a Site instance or a db key, defaults to pywikibot.Site().
:type site: pywikibot.Site or str
:param title: the title of the linked page including namespace
:type title: str
:return: a (site, namespace, title) tuple
:rtype: (pywikibot.Site, pywikibot.Namespace or None, str)
"""
# need a Site instance to evaluate local namespaces
site = site or pywikibot.Site()
if not isinstance(site, pywikibot.site.BaseSite):
site = pywikibot.site.APISite.fromDBName(site)
prefix = title[:title.index(':')].lower() # part of text before :
ns = site.namespaces.lookup_name(prefix)
if ns: # The prefix is a namespace in the source wiki
namespace, _, title = title.partition(':')
else: # The ':' is part of the actual title see e.g. Q3700510
namespace = None
return (site, namespace, title)
@property
def badges(self):
"""
Return a list of all badges associated with the link.
:rtype: [pywikibot.ItemPage]
"""
return list(self._badges)
@classmethod
def fromJSON(cls, data: dict, site=None):
"""
Create a SiteLink object from JSON returned in the API call.
:param data: JSON containing SiteLink data
:param site: The Wikibase site
:type site: pywikibot.site.DataSite
:rtype: pywikibot.page.SiteLink
"""
sl = cls(data['title'], data['site'])
repo = site or sl.site.data_repository()
for badge in data.get('badges', []):
sl._badges.add(pywikibot.ItemPage(repo, badge))
return sl
def toJSON(self) -> dict:
"""
Convert the SiteLink to a JSON object for the Wikibase API.
:return: Wikibase JSON
"""
json = {
'site': self._sitekey,
'title': self._rawtitle,
'badges': [badge.title() for badge in self.badges]
}
return json
# Utility functions for parsing page titles
# This regular expression will match any decimal and hexadecimal entity and
# also entities that might be named entities.
_ENTITY_SUB = re.compile(
r'&(#(?P<decimal>\d+)|#x(?P<hex>[0-9a-fA-F]+)|(?P<name>[A-Za-z]+));').sub
# These characters are Html-illegal, but sadly you *can* find some of
# these and converting them to chr(decimal) is unsuitable
_ILLEGAL_HTML_ENTITIES_MAPPING = {
128: 8364, # €
130: 8218, # ‚
131: 402, # ƒ
132: 8222, # „
133: 8230, # …
134: 8224, # †
135: 8225, # ‡
136: 710, # ˆ
137: 8240, # ‰
138: 352, # Š
139: 8249, # ‹
140: 338, # Œ
142: 381, # Ž
145: 8216, # ‘
146: 8217, # ’
147: 8220, # “
148: 8221, # ”
149: 8226, # •
150: 8211, # –
151: 8212, # —
152: 732, # ˜
153: 8482, # ™
154: 353, # š
155: 8250, # ›
156: 339, # œ
158: 382, # ž
159: 376 # Ÿ
}
def html2unicode(text: str, ignore=None, exceptions=None) -> str:
"""
Replace HTML entities with equivalent unicode.
:param ignore: HTML entities to ignore
:param ignore: list of int
"""
if ignore is None:
ignore = []
# ensuring that illegal   and , which have no known
# values, don't get converted to chr(129), chr(141) or chr(157)
ignore = (set(map(lambda x: _ILLEGAL_HTML_ENTITIES_MAPPING.get(x, x),
ignore)) | {129, 141, 157})
def handle_entity(match):
if textlib.isDisabled(match.string, match.start(), tags=exceptions):
# match.string stores original text so we do not need
# to pass it to handle_entity, ♥ Python
return match.group(0)
if match.group('decimal'):
unicode_codepoint = int(match.group('decimal'))
elif match.group('hex'):
unicode_codepoint = int(match.group('hex'), 16)
elif match.group('name'):
name = match.group('name')
unicode_codepoint = name2codepoint.get(name, False)
unicode_codepoint = _ILLEGAL_HTML_ENTITIES_MAPPING.get(
unicode_codepoint, unicode_codepoint)
if unicode_codepoint and unicode_codepoint not in ignore:
return chr(unicode_codepoint)
# Leave the entity unchanged
return match.group(0)
return _ENTITY_SUB(handle_entity, text)
@deprecated_args(site='encodings')
@deprecated('pywikibot.tools.chars.url2string', since='6.2.0')
def url2unicode(title: str, encodings='utf-8') -> str:
"""
DEPRECATED. Convert URL-encoded text to unicode using several encoding.
Uses the first encoding that doesn't cause an error.
:param title: URL-encoded character data to convert
:param encodings: Encodings to attempt to use during conversion.
:type encodings: str, list or Site
:raise UnicodeError: Could not convert using any encoding.
"""
if isinstance(encodings, pywikibot.site.BaseSite):
# use all possible encodings from Site object
encodings = encodings.encodings()
issue_deprecation_warning(
'Passing BaseSite object to encodings parameter',
'BaseSite.endcodings()',
depth=1,
since='6.2.0'
)
return pywikibot.tools.chars.url2string(title, encodings)
wrapper = ModuleDeprecationWrapper(__name__)
wrapper.add_deprecated_attr(
'UnicodeToAsciiHtml',
replacement_name='pywikibot.tools.chars.string_to_ascii_html',
since='6.2.0')
wrapper.add_deprecated_attr(
'unicode2html',
replacement_name='pywikibot.tools.chars.string2html',
since='6.2.0')
| 38.53926 | 80 | 0.564353 |
a360f84302ee5efb13419c211018c149c6542c03 | 645 | py | Python | tests/test_line.py | firasm/pysketcher | ef0c25b11b739197e254d714c69c86e107059be3 | [
"MIT"
] | 27 | 2020-09-03T16:59:32.000Z | 2022-03-11T08:21:25.000Z | tests/test_line.py | ReblochonMasque/pysketcher-1 | 9e804f4855edc6962b68e92091f35c2e960df813 | [
"MIT"
] | 395 | 2020-09-05T06:32:54.000Z | 2022-03-31T12:06:55.000Z | tests/test_line.py | ReblochonMasque/pysketcher-1 | 9e804f4855edc6962b68e92091f35c2e960df813 | [
"MIT"
] | 4 | 2021-04-19T09:23:06.000Z | 2021-11-12T20:21:30.000Z | from hypothesis import assume, HealthCheck, settings
from pysketcher import Line, Point
from tests.utils import given_inferred
class TestLine:
@given_inferred
@settings(suppress_health_check=[HealthCheck.filter_too_much])
def test_start(self, a: Point, b: Point) -> None:
assume(a != b)
line = Line(a, b)
assert line.start == a
assert line.end == b
# def test_rotate(self, line: Line, center: Point, theta: float, expected: Line):
# result = line.rotate(theta, center)
# assert abs(result.start - expected.start) < 1e-14
# assert abs(result.end - expected.end) < 1e-14
| 32.25 | 85 | 0.662016 |
70c8e500b491cc61f9621bb13d52a07cec3e0f2b | 17,585 | py | Python | nnutils/train_utils.py | silviazuffi/smalst | f7871d6e53331938ac8e15491d88988f3645020c | [
"MIT"
] | 121 | 2019-09-11T18:26:06.000Z | 2022-03-09T07:25:12.000Z | nnutils/train_utils.py | silviazuffi/smalst | f7871d6e53331938ac8e15491d88988f3645020c | [
"MIT"
] | 27 | 2019-09-19T11:06:36.000Z | 2022-03-12T00:05:51.000Z | nnutils/train_utils.py | silviazuffi/smalst | f7871d6e53331938ac8e15491d88988f3645020c | [
"MIT"
] | 21 | 2019-09-12T00:36:05.000Z | 2021-11-16T18:01:12.000Z | """
Generic Training Utils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import os
import os.path as osp
import time
import pdb
from absl import flags
import pickle as pkl
import scipy.misc
import numpy as np
from ..nnutils import geom_utils
import time
from ..utils.visualizer import Visualizer
from .smal_mesh_eval import smal_mesh_eval
#-------------- flags -------------#
#----------------------------------#
## Flags for training
curr_path = osp.dirname(osp.abspath(__file__))
cache_path = osp.join(curr_path, '..', 'cachedir')
flags.DEFINE_string('name', 'exp_name', 'Experiment Name')
# flags.DEFINE_string('cache_dir', cache_path, 'Cachedir') # Not used!
flags.DEFINE_integer('gpu_id', 0, 'Which gpu to use')
flags.DEFINE_integer('num_epochs', 1000, 'Number of epochs to train')
flags.DEFINE_integer('num_pretrain_epochs', 0, 'If >0, we will pretain from an existing saved model.')
flags.DEFINE_float('learning_rate', 0.0001, 'learning rate')
flags.DEFINE_float('beta1', 0.9, 'Momentum term of adam')
flags.DEFINE_bool('use_sgd', False, 'if true uses sgd instead of adam, beta1 is used as mmomentu')
flags.DEFINE_integer('batch_size', 8, 'Size of minibatches')
flags.DEFINE_integer('num_iter', 0, 'Number of training iterations. 0 -> Use epoch_iter')
flags.DEFINE_integer('new_dataset_freq', 2, 'at which epoch to get a new dataset')
## Flags for logging and snapshotting
flags.DEFINE_string('checkpoint_dir', osp.join(cache_path, 'snapshots'),
'Root directory for output files')
flags.DEFINE_integer('print_freq', 20, 'scalar logging frequency')
flags.DEFINE_integer('save_latest_freq', 10000, 'save latest model every x iterations')
flags.DEFINE_integer('save_epoch_freq', 25, 'save model every k epochs')
flags.DEFINE_bool('save_training_imgs', False, 'save mask and images for debugging')
## Flags for visualization
flags.DEFINE_integer('display_freq', 100, 'visuals logging frequency')
flags.DEFINE_boolean('display_visuals', False, 'whether to display images')
flags.DEFINE_boolean('print_scalars', True, 'whether to print scalars')
flags.DEFINE_boolean('plot_scalars', False, 'whether to plot scalars')
flags.DEFINE_boolean('is_train', True, 'Are we training ?')
flags.DEFINE_integer('display_id', 1, 'Display Id')
flags.DEFINE_integer('display_winsize', 256, 'Display Size')
flags.DEFINE_integer('display_port', 8097, 'Display port')
flags.DEFINE_integer('display_single_pane_ncols', 0, 'if positive, display all images in a single visdom web panel with certain number of images per row.')
flags.DEFINE_integer('num_train_epoch', 40, '')
flags.DEFINE_boolean('do_validation', False, 'compute on validation set at each epoch')
flags.DEFINE_boolean('is_var_opt', False, 'set to True to optimize over pose scale trans')
def set_bn_eval(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm1d') != -1) or (classname.find('BatchNorm2d') != -1):
m.eval()
#-------- tranining class ---------#
#----------------------------------#
class Trainer():
def __init__(self, opts):
self.opts = opts
self.gpu_id = opts.gpu_id
self.Tensor = torch.cuda.FloatTensor if (self.gpu_id is not None) else torch.Tensor
self.invalid_batch = False #the trainer can optionally reset this every iteration during set_input call
self.save_dir = os.path.join(opts.checkpoint_dir, opts.name)
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
log_file = os.path.join(self.save_dir, 'opts.log')
with open(log_file, 'w') as f:
for k in dir(opts):
f.write('{}: {}\n'.format(k, opts.__getattr__(k)))
# helper saving function that can be used by subclasses
def save_network(self, network, network_label, epoch_label, gpu_id=None):
save_filename = '{}_net_{}.pth'.format(network_label, epoch_label)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if gpu_id is not None and torch.cuda.is_available():
network.cuda(device=gpu_id)
return
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label, network_dir=None):
save_filename = '{}_net_{}.pth'.format(network_label, epoch_label)
if network_dir is None:
network_dir = self.save_dir
save_path = os.path.join(network_dir, save_filename)
network.load_state_dict(torch.load(save_path))
return
def define_model(self):
'''Should be implemented by the child class.'''
raise NotImplementedError
def init_dataset(self):
'''Should be implemented by the child class.'''
raise NotImplementedError
def define_criterion(self):
'''Should be implemented by the child class.'''
raise NotImplementedError
def set_input(self, batch):
'''Should be implemented by the child class.'''
raise NotImplementedError
def forward(self):
'''Should compute self.total_loss. To be implemented by the child class.'''
raise NotImplementedError
def save(self, epoch_prefix):
'''Saves the model.'''
self.save_network(self.model, 'pred', epoch_prefix, gpu_id=self.opts.gpu_id)
return
def get_current_visuals(self):
'''Should be implemented by the child class.'''
raise NotImplementedError
def get_current_scalars(self):
'''Should be implemented by the child class.'''
raise NotImplementedError
def get_current_points(self):
'''Should be implemented by the child class.'''
raise NotImplementedError
def init_training(self):
opts = self.opts
self.init_dataset()
self.define_model()
self.define_criterion()
if opts.use_sgd:
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=opts.learning_rate, momentum=opts.beta1)
else:
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=opts.learning_rate, betas=(opts.beta1, 0.999))
def save_current(self, opts, initial_loss=0, final_loss=0, code='_'):
res_dict = {
'final_loss': final_loss.data.detach().cpu().numpy(),
'delta_v': self.delta_v.data.detach().cpu().numpy(),
'kp_pred': self.kp_pred.data.detach().cpu().numpy(),
'scale': self.scale_pred.data.detach().cpu().numpy(),
'trans': self.trans_pred.data.detach().cpu().numpy(),
'pose': self.pose_pred.data.detach().cpu().numpy(),
'initial_loss': initial_loss.data.detach().cpu().numpy(),
}
scipy.misc.imsave(opts.image_file_string.replace('.png', code + 'mask.png'),
255*self.mask_pred.detach().cpu()[0,:,:])
uv_flows = self.model.texture_predictor.uvimage_pred
uv_flows = uv_flows.permute(0, 2, 3, 1)
uv_images = torch.nn.functional.grid_sample(self.imgs, uv_flows)
scipy.misc.imsave(opts.image_file_string.replace('.png', code + 'tex.png'),
255*np.transpose(uv_images.detach().cpu()[0,:,:,:], (1, 2, 0)))
pkl.dump(res_dict, open(opts.image_file_string.replace('.png', code + 'res.pkl'), 'wb'))
def train(self):
time_stamp = str(time.time())[:10]
opts = self.opts
self.smoothed_total_loss = 0
self.visualizer = Visualizer(opts)
visualizer = self.visualizer
total_steps = 0
dataset_size = len(self.dataloader)
print('dataset_size '+str(dataset_size))
v_log_file = os.path.join(self.save_dir, 'validation.log')
curr_epoch_err = 1000
if opts.is_optimization:
self.model.eval()
self.model.texture_predictor.eval()
for param in self.model.texture_predictor.parameters():
param.requires_grad = False
self.model.encoder.eval()
for param in self.model.encoder.parameters():
param.requires_grad = False
self.model.code_predictor.shape_predictor.pred_layer.eval()
for param in self.model.code_predictor.shape_predictor.pred_layer.parameters():
param.requires_grad = False
self.model.code_predictor.shape_predictor.fc.eval()
for param in self.model.code_predictor.shape_predictor.fc.parameters():
param.requires_grad = False
self.model.code_predictor.scale_predictor.pred_layer.eval()
for param in self.model.code_predictor.scale_predictor.pred_layer.parameters():
param.requires_grad = False
self.model.code_predictor.trans_predictor.pred_layer_xy.eval()
for param in self.model.code_predictor.trans_predictor.pred_layer_xy.parameters():
param.requires_grad = False
self.model.code_predictor.trans_predictor.pred_layer_z.eval()
for param in self.model.code_predictor.trans_predictor.pred_layer_z.parameters():
param.requires_grad = False
self.model.code_predictor.pose_predictor.pred_layer.eval()
for param in self.model.code_predictor.pose_predictor.pred_layer.parameters():
param.requires_grad = False
self.model.apply(set_bn_eval)
if opts.is_optimization:
code = osp.splitext(osp.basename(opts.image_file_string))[0]
visualizer.print_message(code)
self.background_model_top = None
set_optimization_input = True
if True:
for epoch in range(opts.num_pretrain_epochs, opts.num_epochs):
epoch_iter = 0
for i, batch in enumerate(self.dataloader):
iter_start_time = time.time()
if not opts.is_optimization:
self.set_input(batch)
else:
if set_optimization_input:
self.set_input(batch)
if not self.invalid_batch:
self.optimizer.zero_grad()
self.forward()
if opts.is_optimization:
if set_optimization_input:
initial_loss = self.tex_loss
print("Initial loss")
print(initial_loss)
current_loss = initial_loss
opt_loss = current_loss
# Now the input should be the image prediction
self.set_optimization_input()
set_optimization_input = False
self.save_current(opts, initial_loss, current_loss, code='_init_')
else:
current_loss = self.tex_loss
if current_loss < opt_loss:
opt_loss = current_loss
self.save_current(opts, initial_loss, current_loss, code='_best_')
visualizer.print_message('save current best ' + str(current_loss))
# self.background_model_top is not used but exloited as a flag
if opts.is_optimization and self.background_model_top is None:
# Create background model with current prediction
M = np.abs(self.mask_pred.cpu().detach().numpy()[0,:,:]-1)
I = np.transpose(self.imgs.cpu().detach().numpy()[0,:,:,:],(1,2,0))
N = 128
# Top half of the image
self.background_model_top = np.zeros((3))
n = np.sum(M[:N,:])
for c in range(3):
J = I[:,:,c] * M
self.background_model_top[c] = np.sum(J[:N,:])/n
self.background_model_bottom = np.zeros((3))
n = np.sum(M[N:,:])
for c in range(3):
J = I[:,:,c] * M
self.background_model_bottom[c] = np.sum(J[N:,:])/n
if opts.use_sgd:
self.optimizer = torch.optim.SGD(
[self.model.op_features], lr=opts.learning_rate, momentum=opts.beta1)
else:
if opts.is_var_opt:
self.optimizer = torch.optim.Adam(
self.model.op_features, lr=opts.learning_rate, betas=(opts.beta1, 0.999))
else:
self.optimizer = torch.optim.Adam(
[self.model.op_features], lr=opts.learning_rate, betas=(opts.beta1, 0.999))
self.smoothed_total_loss = self.smoothed_total_loss*0.99 + 0.01*self.total_loss.data
self.total_loss.backward()
self.optimizer.step()
total_steps += 1
epoch_iter += 1
if opts.display_visuals and (total_steps % opts.display_freq == 0):
iter_end_time = time.time()
print('time/itr %.2g' % ((iter_end_time - iter_start_time)/opts.display_freq))
visualizer.display_current_results(self.get_current_visuals(), epoch)
visualizer.plot_current_points(self.get_current_points())
if opts.print_scalars and (total_steps % opts.print_freq == 0):
scalars = self.get_current_scalars()
visualizer.print_current_scalars(epoch, epoch_iter, scalars)
if opts.plot_scalars:
visualizer.plot_current_scalars(epoch, float(epoch_iter)/dataset_size, opts, scalars)
if total_steps % opts.save_latest_freq == 0:
print('saving the model at the end of epoch {:d}, iters {:d}'.format(epoch, total_steps))
self.save('latest')
if total_steps == opts.num_iter:
return
if opts.do_validation:
self.save('100000')
epoch_err = smal_mesh_eval(num_train_epoch=100000)
if epoch_err <= curr_epoch_err:
print('update best model')
curr_epoch_err = epoch_err
self.save('best')
with open(v_log_file, 'a') as f:
f.write('{}: {}\n'.format(epoch, epoch_err))
'''
if opts.is_optimization and (epoch==(opts.num_epochs-1) or epoch==opts.num_pretrain_epochs):
img_pred = self.texture_pred*self.mask_pred + self.background_imgs*(torch.abs(self.mask_pred - 1.))
T = img_pred.cpu().detach().numpy()[0,:,:,:]
T = np.transpose(T,(1,2,0))
scipy.misc.imsave(code + '_img_pred_'+str(epoch)+'.png', T)
img_pred = self.texture_pred*self.mask_pred + self.imgs*(torch.abs(self.mask_pred - 1.))
T = img_pred.cpu().detach().numpy()[0,:,:,:]
T = np.transpose(T,(1,2,0))
scipy.misc.imsave(code + '_img_ol_'+str(epoch)+'.png', T)
'''
'''
if opts.is_optimization and opts.save_training_imgs and np.mod(epoch,20)==0:
img_pred = self.texture_pred*self.mask_pred + self.background_imgs*(torch.abs(self.mask_pred - 1.))
T = img_pred.cpu().detach().numpy()[0,:,:,:]
T = np.transpose(T,(1,2,0))
scipy.misc.imsave(opts.name + '_img_pred_'+str(epoch)+'.png', T)
img_pred = self.texture_pred*self.mask_pred + self.imgs*(torch.abs(self.mask_pred - 1.))
T = img_pred.cpu().detach().numpy()[0,:,:,:]
T = np.transpose(T,(1,2,0))
scipy.misc.imsave(opts.name + '_img_ol_'+str(epoch)+'.png', T)
'''
'''
if opts.is_optimization and epoch == opts.num_pretrain_epochs:
T = 255*self.imgs.cpu().detach().numpy()[0,:,:,:]
T = np.transpose(T,(1,2,0))
scipy.misc.imsave(code+'_img_gt.png', T)
'''
if (epoch+1) % opts.save_epoch_freq == 0:
print('saving the model at the end of epoch {:d}, iters {:d}'.format(epoch, total_steps))
if opts.is_optimization:
self.save_current(opts, initial_loss, current_loss, code=None)
else:
self.save(epoch+1)
self.save('latest')
if opts.is_optimization:
if opt_loss < initial_loss:
visualizer.print_message('updated')
| 46.398417 | 155 | 0.570145 |
c316914573b4e32e600a95968ae4101c7a8e7e69 | 15,222 | py | Python | salt/modules/portage_config.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | 3 | 2016-09-03T06:26:42.000Z | 2019-06-30T13:04:53.000Z | salt/modules/portage_config.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | null | null | null | salt/modules/portage_config.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | 1 | 2021-12-02T15:30:00.000Z | 2021-12-02T15:30:00.000Z | # -*- coding: utf-8 -*-
'''
Configure ``portage(5)``
'''
# Import python libs
from __future__ import absolute_import
import os
import shutil
# Import salt libs
import salt.utils
# Import third party libs
try:
import portage
HAS_PORTAGE = True
except ImportError:
HAS_PORTAGE = False
import sys
if os.path.isdir('/usr/lib/portage/pym'):
try:
# In a virtualenv, the portage python path needs to be manually
# added
sys.path.insert(0, '/usr/lib/portage/pym')
import portage
HAS_PORTAGE = True
except ImportError:
pass
BASE_PATH = '/etc/portage/package.{0}'
SUPPORTED_CONFS = ('accept_keywords', 'env', 'license', 'mask', 'properties',
'unmask', 'use')
def __virtual__():
'''
Confirm this module is on a Gentoo based system.
'''
if HAS_PORTAGE and __grains__['os'] == 'Gentoo':
return 'portage_config'
return False
def _porttree():
return portage.db[portage.root]['porttree']
def _p_to_cp(p):
'''
Convert a package name or a DEPEND atom to category/package format.
Raises an exception if program name is ambiguous.
'''
ret = _porttree().dbapi.xmatch("match-all", p)
if ret:
return portage.cpv_getkey(ret[0])
return None
def enforce_nice_config():
'''
Enforce a nice tree structure for /etc/portage/package.* configuration
files.
.. seealso::
:py:func:`salt.modules.ebuild.ex_mod_init`
for information on automatically running this when pkg is used.
CLI Example:
.. code-block:: bash
salt '*' portage_config.enforce_nice_config
'''
_convert_all_package_confs_to_dir()
_order_all_package_confs()
def _convert_all_package_confs_to_dir():
'''
Convert all /etc/portage/package.* configuration files to directories.
'''
for conf_file in SUPPORTED_CONFS:
_package_conf_file_to_dir(conf_file)
def _order_all_package_confs():
'''
Place all entries in /etc/portage/package.* config dirs in the correct
file.
'''
for conf_file in SUPPORTED_CONFS:
_package_conf_ordering(conf_file)
_unify_keywords()
def _unify_keywords():
'''
Merge /etc/portage/package.keywords and
/etc/portage/package.accept_keywords.
'''
old_path = BASE_PATH.format('keywords')
if os.path.exists(old_path):
if os.path.isdir(old_path):
for triplet in os.walk(old_path):
for file_name in triplet[2]:
file_path = '{0}/{1}'.format(triplet[0], file_name)
with salt.utils.fopen(file_path) as fh_:
for line in fh_:
line = line.strip()
if line and not line.startswith('#'):
append_to_package_conf(
'accept_keywords', string=line)
shutil.rmtree(old_path)
else:
with salt.utils.fopen(old_path) as fh_:
for line in fh_:
line = line.strip()
if line and not line.startswith('#'):
append_to_package_conf('accept_keywords', string=line)
os.remove(old_path)
def _package_conf_file_to_dir(file_name):
'''
Convert a config file to a config directory.
'''
if file_name in SUPPORTED_CONFS:
path = BASE_PATH.format(file_name)
if os.path.exists(path):
if os.path.isdir(path):
return False
else:
os.rename(path, path + '.tmpbak')
os.mkdir(path, 0o755)
with salt.utils.fopen(path + '.tmpbak') as fh_:
for line in fh_:
line = line.strip()
if line and not line.startswith('#'):
append_to_package_conf(file_name, string=line)
os.remove(path + '.tmpbak')
return True
else:
os.mkdir(path, 0o755)
return True
def _package_conf_ordering(conf, clean=True, keep_backup=False):
'''
Move entries in the correct file.
'''
if conf in SUPPORTED_CONFS:
rearrange = []
path = BASE_PATH.format(conf)
backup_files = []
for triplet in os.walk(path):
for file_name in triplet[2]:
file_path = '{0}/{1}'.format(triplet[0], file_name)
cp = triplet[0][len(path) + 1:] + '/' + file_name
shutil.copy(file_path, file_path + '.bak')
backup_files.append(file_path + '.bak')
if cp[0] == '/' or cp.split('/') > 2:
rearrange.extend(list(salt.utils.fopen(file_path)))
os.remove(file_path)
else:
new_contents = ''
with salt.utils.fopen(file_path, 'r+') as file_handler:
for line in file_handler:
try:
atom = line.strip().split()[0]
except IndexError:
new_contents += line
else:
if atom[0] == '#' or \
portage.dep_getkey(atom) == cp:
new_contents += line
else:
rearrange.append(line.strip())
if len(new_contents) != 0:
file_handler.seek(0)
file_handler.truncate(len(new_contents))
file_handler.write(new_contents)
if len(new_contents) == 0:
os.remove(file_path)
for line in rearrange:
append_to_package_conf(conf, string=line)
if not keep_backup:
for bfile in backup_files:
try:
os.remove(bfile)
except OSError:
pass
if clean:
for triplet in os.walk(path):
if len(triplet[1]) == 0 and len(triplet[2]) == 0 and \
triplet[0] != path:
shutil.rmtree(triplet[0])
def _merge_flags(*args):
'''
Merges multiple lists of flags removing duplicates and resolving conflicts
giving priority to lasts lists.
'''
tmp = portage.flatten(args)
flags = {}
for flag in tmp:
if flag[0] == '-':
flags[flag[1:]] = False
else:
flags[flag] = True
tmp = []
for k, v in flags.items():
if v:
tmp.append(k)
else:
tmp.append('-' + k)
# Next sort is just aesthetic, can be commented for a small performance
# boost
tmp.sort(cmp=lambda x, y: cmp(x.lstrip('-'), y.lstrip('-')))
return tmp
def append_to_package_conf(conf, atom='', flags=None, string='', overwrite=False):
'''
Append a string or a list of flags for a given package or DEPEND atom to a
given configuration file.
CLI Example:
.. code-block:: bash
salt '*' portage_config.append_to_package_conf use string="app-admin/salt ldap -libvirt"
salt '*' portage_config.append_to_package_conf use atom="> = app-admin/salt-0.14.1" flags="['ldap', '-libvirt']"
'''
if flags is None:
flags = []
if conf in SUPPORTED_CONFS:
if not string:
if '/' not in atom:
atom = _p_to_cp(atom)
if not atom:
return
string = '{0} {1}'.format(atom, ' '.join(flags))
new_flags = list(flags)
else:
atom = string.strip().split()[0]
new_flags = portage.dep.strip_empty(string.strip().split(' '))[1:]
if '/' not in atom:
atom = _p_to_cp(atom)
string = '{0} {1}'.format(atom, ' '.join(new_flags))
if not atom:
return
to_delete_if_empty = []
if conf == 'accept_keywords':
if '-~ARCH' in new_flags:
new_flags.remove('-~ARCH')
to_delete_if_empty.append(atom)
if '~ARCH' in new_flags:
new_flags.remove('~ARCH')
append_to_package_conf(conf, string=atom, overwrite=overwrite)
if not new_flags:
return
# Next sort is just aesthetic, can be commented for a small performance
# boost
new_flags.sort(cmp=lambda x, y: cmp(x.lstrip('-'), y.lstrip('-')))
package_file = _p_to_cp(atom)
if not package_file:
return
psplit = package_file.split('/')
if len(psplit) == 2:
pdir = BASE_PATH.format(conf) + '/' + psplit[0]
if not os.path.exists(pdir):
os.mkdir(pdir, 0o755)
complete_file_path = BASE_PATH.format(conf) + '/' + package_file
try:
shutil.copy(complete_file_path, complete_file_path + '.bak')
except IOError:
pass
try:
file_handler = salt.utils.fopen(complete_file_path, 'r+')
except IOError:
file_handler = salt.utils.fopen(complete_file_path, 'w+')
new_contents = ''
added = False
for l in file_handler:
l_strip = l.strip()
if l_strip == '':
new_contents += '\n'
elif l_strip[0] == '#':
new_contents += l
elif l_strip.split()[0] == atom:
if l_strip in to_delete_if_empty:
continue
if overwrite:
new_contents += string.strip() + '\n'
added = True
else:
old_flags = portage.dep.strip_empty(l_strip.split(' '))[1:]
if conf == 'accept_keywords':
if not old_flags:
new_contents += l
if not new_flags:
added = True
continue
elif not new_flags:
continue
merged_flags = _merge_flags(old_flags, new_flags)
if merged_flags:
new_contents += '{0} {1}\n'.format(
atom, ' '.join(merged_flags))
else:
new_contents += '{0}\n'.format(atom)
added = True
else:
new_contents += l
if not added:
new_contents += string.strip() + '\n'
file_handler.seek(0)
file_handler.truncate(len(new_contents))
file_handler.write(new_contents)
file_handler.close()
try:
os.remove(complete_file_path + '.bak')
except OSError:
pass
def append_use_flags(atom, uses=None, overwrite=False):
'''
Append a list of use flags for a given package or DEPEND atom
CLI Example:
.. code-block:: bash
salt '*' portage_config.append_use_flags "app-admin/salt[ldap, -libvirt]"
salt '*' portage_config.append_use_flags ">=app-admin/salt-0.14.1" "['ldap', '-libvirt']"
'''
if not uses:
uses = portage.dep.dep_getusedeps(atom)
if len(uses) == 0:
return
atom = atom[:atom.rfind('[')]
append_to_package_conf('use', atom=atom, flags=uses, overwrite=overwrite)
def get_flags_from_package_conf(conf, atom):
'''
Get flags for a given package or DEPEND atom.
Warning: This only works if the configuration files tree is in the correct
format (the one enforced by enforce_nice_config)
CLI Example:
.. code-block:: bash
salt '*' portage_config.get_flags_from_package_conf license salt
'''
if conf in SUPPORTED_CONFS:
package_file = '{0}/{1}'.format(BASE_PATH.format(conf), _p_to_cp(atom))
if '/' not in atom:
atom = _p_to_cp(atom)
try:
match_list = set(_porttree().dbapi.xmatch("match-all", atom))
except AttributeError:
return []
flags = []
try:
file_handler = salt.utils.fopen(package_file)
except IOError:
return []
else:
for line in file_handler:
line = line.strip()
line_package = line.split()[0]
line_list = _porttree().dbapi.xmatch("match-all", line_package)
if match_list.issubset(line_list):
f_tmp = portage.dep.strip_empty(line.strip().split()[1:])
if f_tmp:
flags.extend(f_tmp)
else:
flags.append('~ARCH')
return _merge_flags(flags)
def has_flag(conf, atom, flag):
'''
Verify if the given package or DEPEND atom has the given flag.
Warning: This only works if the configuration files tree is in the correct
format (the one enforced by enforce_nice_config)
CLI Example:
.. code-block:: bash
salt '*' portage_config.has_flag license salt Apache-2.0
'''
if flag in get_flags_from_package_conf(conf, atom):
return True
return False
def get_missing_flags(conf, atom, flags):
'''
Find out which of the given flags are currently not set.
CLI Example:
.. code-block:: bash
salt '*' portage_config.get_missing_flags use salt "['ldap', '-libvirt', 'openssl']"
'''
new_flags = []
for flag in flags:
if not has_flag(conf, atom, flag):
new_flags.append(flag)
return new_flags
def has_use(atom, use):
'''
Verify if the given package or DEPEND atom has the given use flag.
Warning: This only works if the configuration files tree is in the correct
format (the one enforced by enforce_nice_config)
CLI Example:
.. code-block:: bash
salt '*' portage_config.has_use salt libvirt
'''
return has_flag('use', atom, use)
def is_present(conf, atom):
'''
Tell if a given package or DEPEND atom is present in the configuration
files tree.
Warning: This only works if the configuration files tree is in the correct
format (the one enforced by enforce_nice_config)
CLI Example:
.. code-block:: bash
salt '*' portage_config.is_present unmask salt
'''
if conf in SUPPORTED_CONFS:
package_file = '{0}/{1}'.format(BASE_PATH.format(conf), _p_to_cp(atom))
match_list = set(_porttree().dbapi.xmatch("match-all", atom))
try:
file_handler = salt.utils.fopen(package_file)
except IOError:
return False
else:
for line in file_handler:
line = line.strip()
line_package = line.split()[0]
line_list = _porttree().dbapi.xmatch("match-all", line_package)
if match_list.issubset(line_list):
return True
return False
| 31.450413 | 120 | 0.535278 |
d8519b84b147ec619c546d717ba55459594c7a0d | 738 | py | Python | .history/myblog/models_20200416093106.py | abhinavmarwaha/demo-django-blog | c80a7d825e44d7e1589d9272c3583764562a2515 | [
"MIT"
] | null | null | null | .history/myblog/models_20200416093106.py | abhinavmarwaha/demo-django-blog | c80a7d825e44d7e1589d9272c3583764562a2515 | [
"MIT"
] | null | null | null | .history/myblog/models_20200416093106.py | abhinavmarwaha/demo-django-blog | c80a7d825e44d7e1589d9272c3583764562a2515 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
STATUS = (
(0,"Draft"),
(1,"Publish")
)
class Post(models.Model):
title = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True)
author = models.ForeignKey(User, on_delete= models.CASCADE,related_name='blog_posts')
updated_on = models.DateTimeField(auto_now= True)
content = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
status = models.IntegerField(choices=STATUS, default=0)
class Meta:
ordering = ['-created_on']
def __str__(self):
return self.title
class Comment(models.Model):
Ppost - models.ForeignKey(Post, on_delete)
| 27.333333 | 89 | 0.707317 |
4ffc19cf4a2fdd3801fbc7431bca3b425cdb9842 | 42,393 | py | Python | app.py | hkeseyan/uh-edc | 41c663661f2dd03cea004a08902226cc5a4579ca | [
"MIT"
] | null | null | null | app.py | hkeseyan/uh-edc | 41c663661f2dd03cea004a08902226cc5a4579ca | [
"MIT"
] | null | null | null | app.py | hkeseyan/uh-edc | 41c663661f2dd03cea004a08902226cc5a4579ca | [
"MIT"
] | null | null | null | # if the access file has its columns reorganized the load_Metadata_Page() method needs to have
# the elements renumbered appropriate
from flask import Flask, render_template, request, flash, session, url_for, redirect
import flask
import os
from flask_uploads import UploadSet, configure_uploads, IMAGES
from datetime import datetime
from flask_wtf import form
import pypyodbc
import re
from static.FieldName import Field, DataForm, SearchHit
app = Flask(__name__)
# flask.g[0] = (string) | MetadataID, flask.g[1] = (boolean) | was update clicked?
flask.g = [None, None]
connect = pypyodbc.connect(r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'
r'DBQ=C:\Users\AR42\PycharmProjects\Data Hub\Metadata.accdb;')
cursor = connect.cursor()
# cursor.execute('CREATE TABLE `Organizations` (`id` INTEGER NOT NULL, `name` VARCHAR(200)NOT NULL, primary key(`id`));')
# sql = "ALTER TABLE [Metadata] ADD [CodeLibrary] VARCHAR(255);"
# cursor.execute(sql).commit()
# sql = "ALTER TABLE [Metadata] ADD [OwnerEmail] VARCHAR(100);"
# cursor.execute(sql).commit()
# sql = "DELETE * FROM Metadata"
# sql = "DELETE * FROM FieldNames"
# cursor.execute(sql).commit()
# sql = "DELETE * FROM Tags"
# cursor.execute(sql).commit()
# rows = cur.execute('SELECT `name` FROM `t3` WHERE `id` = 3')
photos = UploadSet('photos', IMAGES)
app.config['UPLOADED_PHOTOS_DEST'] = 'static/img'
configure_uploads(app, photos)
# for use in debugging the jinja code. Use {{ debug(string) }} in the html to call function
def debug(text):
print(text)
return ''
def type_jinja(object):
if type(object) == DataForm:
return'DataForm'
elif type(object) == Field:
return 'Field'
else:
return 'None'
# places the function to be used in jinja
app.jinja_env.globals.update(debug=debug)
app.jinja_env.globals.update(type_jinja=type_jinja)
# configures the flask form key to be used
app.config.update(dict(
SECRET_KEY="dfsdfdfsecret1234567890key",
WTF_CSRF_SECRET_KEY="what is a csrf key doing here?"
))
# displays the home page with buttons for metadata form and profile page
# with the option of going to a specific profile page based on metadata id number
# (Button) : (Description)
# New Metadata Form : redirects to a blank metadata form page that can be filled out
# Blank Profile Page : redirects to the profile page and loads no data
@app.route('/', methods=['POST', 'GET'])
def home_page():
data_form = DataForm()
data_form.multipleFields.clear()
data_form.organizations = load_organization_list()
flask.g = [None, None]
error = None
if request.method == 'POST':
if request.form['Button'] == 'New Metadata Form':
return redirect(url_for("metadata_page", data_form=data_form))
elif request.form['Button'] == 'Blank Profile Page':
flask.g[0] = None
return redirect(url_for('profile_page', id=" "))
elif request.form['Button'] == 'search':
search_list = search(request.form['search'])
results = load_search_results(search_list)
return render_template("Home.html", results=results , searching=True)
return render_template("Home.html", searching=False)
# profile_page( string| id)
# will load the id into flask.g as D### to be used to find the metadata
# (Button) : (Description)
# Back : redirects to the homepage
# Update : sets flask.g[1] to True and redirects to the Metadata page that will have the data for the ID in flask.g[0]
# Upload Image: will save the img in the file input bar the the static/img folder then save the filename to the
# Metadata row and redirects so the image will be displayed in the profile.
# only one image will be saved at a time and it will delete the previous image in the static/img folder
@app.route('/Profile/<string:id>', methods=['POST', 'GET'])
def profile_page(id):
flask.g[1] = False
if id != ' ' and id != '':
flask.g = [('D' + id), True]
if flask.g[0] is not None and flask.g[0] != '':
data_form = load_Metadata_Page(flask.g[0])
else:
data_form = DataForm()
if request.method == 'POST':
if request.form['Button'] == 'Back':
flask.g[0] = None
return redirect(url_for('home_page'))
elif request.form['Button'] == 'Update':
flask.g[1] = True
return redirect(url_for("metadata_page", data_form=data_form))
elif request.form['Button'] == 'Upload Image' and 'photo' in request.files:
filename = photos.save(request.files['photo'])
sql_select = "SELECT Image FROM Metadata WHERE MetadataID = " + flask.g[0][1:] + ";"
old_img = cursor.execute(sql_select).fetchone()[0]
if old_img is not None:
print('old_img is not none')
print("old_img = /static/img/" + old_img)
os.remove("static/img/" + old_img)
sql_update = "UPDATE MetaData SET Image = ? WHERE MetadataID = " + flask.g[0][1:] + ";"
cursor.execute(sql_update, [filename]).commit()
return redirect('/Profile/' + id)
return render_template('Profile.html', data_form=data_form)
# metadata_page()
# the metadata form page provides functionality to the input buttons on the page
# (Button) : (Description)
# Field Data : saves the metadata entered in the text boxes into the metadata table and creates the field names in the
# field names table
# Cancel : will delete all data associated with the current dataset in flask.g and redirect to the home page
# or redirect to the profile page if in update
@app.route('/metadataForm', methods=['POST', 'GET'])
def metadata_page():
data_form = DataForm()
data_form.organizations = load_organization_list()
if request.method == 'POST':
if request.form['Button'] == 'Field Data':
pattern_phone = re.compile("^[\+]?[(]?[0-9]{3}[)]?[-\s\.]?[0-9]{3}[-\s\.]?[0-9]{4,6}$")
pattern_email = re.compile("^(([^<>()\[\]\.,;:\s@\"]+(\.[^<>()\[\]\.,;:\s@\"]+)*)|(\".+\"))@(([^<>()[\]\.,;:\s@\"]+\.)+[^<>()[\]\.,;:\s@\"]{2,})$")
checkbox = False
if 'accessControl' in request.form:
checkbox = True
frequency = ''
if request.form['frequencyIngestRate'] == 'Other':
frequency = request.form['frequencyOther']
else:
frequency = request.form['frequencyIngestRate']
organization = request.form['OrganizationSearchAdd']
organizations_list_insert(organization)
params = (request.form['dataOwnerName'], #0
organization, #1
request.form['analystName'], #2
request.form['interviewerName'], #3
request.form['guidanceInAcquiring'], #4
request.form['format'], #5
request.form['dataSize'] + request.form['dataSizeType'],#6
frequency, #7
request.form['sourceOfData'], #8
request.form['versionControl'], #9
request.form['archivingRules'], #10
request.form['headerInformation'], #11
request.form['metricsCollected'], #12
request.form['metricsToCollect'], #13
request.form['dataDependencies'], #14
request.form['actionsTaken'], #15
checkbox, #16
request.form['verificationOfData'], #17
request.form['concerns'], #18
request.form['SCG'], #19
request.form['distributionStatement'], #20
request.form['tags'], #21
request.form['fieldNames'], #22
request.form['dataSetName'], #23
request.form['dataOwnerPhone'], #24
request.form['dataOwnerEmail'], #25
request.form['codeLibrary']) #26
if request.form['dataSetName'] == '':
error = u'dataSetNameNonexistent'
data_form = convert_to_data_form(params)
flash(error, 'error')
return render_template("MetadataForm.html", error=error, data_form=data_form)
elif in_table('Metadata', 'DataSetName', "'" + request.form['dataSetName'] + "'") and not flask.g[1]:
error = u'dataSetNameAlreadyExists'
data_form = convert_to_data_form(params)
flash(error, 'error')
return render_template("MetadataForm.html", error=error, data_form=data_form)
elif pattern_phone.match(request.form['dataOwnerPhone']) is None and request.form['dataOwnerPhone'] != '':
error = u'InvalidPhone'
data_form = convert_to_data_form(params)
flash(error, 'error')
return render_template("MetadataForm.html", error=error, data_form=data_form)
elif pattern_email.match(request.form['dataOwnerEmail']) is None and request.form['dataOwnerEmail'] != '':
error = u'InvaildEmail'
data_form = convert_to_data_form(params)
flash(error, 'error')
return render_template("MetadataForm.html", error=error, data_form=data_form)
else:
if flask.g[0] is None:
insert_Metadata(params)
else:
check_tags_delete_missing(request.form['tags'], 'Metadata', 'MetadataID', flask.g[0])
update_Metadata(params)
data_form = insert_fields_from_textarea(data_form)
data_form = load_Metadata_Page(flask.g[0])
tag_list = request.form['tags'].split(';')
for tags in tag_list:
insert_tag(tags, flask.g[0])
more_tags = tags.split(' ')
for tag_from_space in more_tags:
if tag_from_space != '':
insert_tag(tag_from_space, flask.g[0])
update_metadata_fields_string()
return render_template('FieldForm.html', data_form=data_form)
elif request.form['Button'] == 'Cancel':
if flask.g[0] is not None:
if flask.g[1]:
return redirect('/Profile/' + flask.g[0][1:])
else:
return cancel_form()
else:
return redirect(url_for("home_page"))
if flask.g[0] is None:
return render_template("MetadataForm.html", data_form=data_form)
elif flask.g[0] is not None:
update_metadata_fields_string()
return render_template("MetadataForm.html", data_form=load_Metadata_Page(flask.g[0]))
# field_page()
# displays the field names page and add functionality to input buttons on the page.
# must be accessed through metadata form page
# (Button) : (Description)
# Add : inserts a Field name with entered data into the FieldNames table and adds it the list of current Field Names
# Delete Field : finds the field to be deleted based on data in hidden input then removes it from the list
# and FieldNames table
# Submit : saves the data in the field form to the FieldNames page and creates a date stamp for the date created.
# it will then display the success page to show completion
# Back : saves field names data to FieldNames table and returns to the metadata form page
@app.route('/FieldForm', methods=['POST', 'GET'])
def field_page():
data_form = DataForm()
if request.form['Button'] == 'Add':
if request.form['addFieldName'] == '':
error = u'addFieldNameNonexistent'
flash(error, 'error')
return render_template("FieldForm.html", error=error, data_form=data_form)
new_field = Field()
new_field.name = request.form['addFieldName']
new_field.description = request.form['addFieldDescription']
new_field.units = request.form['addFieldUnits']
new_field.tags = request.form['addFieldTags']
new_field.collapseButtonTargetID = "button" + request.form['addFieldName'] + \
str(len(data_form.multipleFields) - 1)
new_field.collapseButtonID = "buttonID" + request.form['addFieldName'] + str(len(data_form.multipleFields) - 1)
new_field.nameID = request.form['addFieldName'] + str(len(data_form.multipleFields) - 1)
new_field.descriptionID = "description" + request.form['addFieldName'] + str(len(data_form.multipleFields) - 1)
new_field.unitsID = "units" + request.form['addFieldName'] + str(len(data_form.multipleFields) - 1)
new_field.tagsID = "tags" + request.form['addFieldName'] + str(len(data_form.multipleFields) - 1)
params = [flask.g[0], new_field.name, new_field.description, new_field.units, new_field.tags]
new_field.fieldID = insert_FieldNames(params)
data_form.multipleFields.append(new_field)
update_metadata_fields_string()
return render_template("FieldForm.html", data_form=data_form)
elif request.form['Button'] == 'Delete Field':
for fields in data_form.multipleFields:
if fields.nameID == request.form['deleteField']:
data_form.multipleFields.remove(fields)
sql_select = "SELECT ID FROM FieldNames WHERE DataSet = '" + flask.g[0] + "' AND FieldName = '" + fields.name + "';"
field_id = cursor.execute(sql_select).fetchone()[0]
for tag in fields.tags.split(';'):
delete_tag(tag,field_id)
sql_delete = "DELETE FROM FieldNames WHERE DataSet = ? AND FieldName = ?;"
cursor.execute(sql_delete, [flask.g[0], fields.name]).commit()
sql_select = "SELECT Fields FROM Metadata WHERE MetadataID = " + flask.g[0][1:] + ";"
field_strings = cursor.execute(sql_select).fetchone()[0].split(';')
newString = ''
for value in field_strings:
if value == fields.name:
field_strings.remove(value)
for values in field_strings:
newString += (values + ';')
sql_update = "UPDATE Metadata SET Fields = '" + newString[:-1] + "' WHERE MetadataID = " + flask.g[0][
1:] + ";"
cursor.execute(sql_update).commit()
return render_template("FieldForm.html", data_form=data_form)
elif request.form['Button'] == 'Submit':
for field in data_form.multipleFields:
if request.form[field.nameID] == '':
error = field.nameID
collapseID = field.collapseButtonTargetID
flash(error, 'error')
return render_template("FieldForm.html", error=error, data_form=data_form, collapseID=collapseID)
params = [flask.g[0], request.form[field.nameID], request.form[field.descriptionID],
request.form[field.unitsID], request.form[field.tagsID]]
save_fields(params, field.fieldID)
update_metadata_fields_string()
date_stamp()
return redirect(url_for("form_success"))
elif request.form['Button'] == 'Back':
for field in data_form.multipleFields:
params = [flask.g[0], request.form[field.nameID], request.form[field.descriptionID],
request.form[field.unitsID], request.form[field.tagsID]]
save_fields(params, field.fieldID)
update_metadata_fields_string()
return redirect(url_for("metadata_page", data_form=data_form))
error = ''
return render_template("FieldForm.html", data_form=load_Metadata_Page(flask.g[0]), error=error)
# form_success()
# a page to show that all the data was successfully submitted
@app.route('/FormSuccess', methods=['POST', 'GET'])
def form_success():
if request.method == 'POST':
if flask.g[1]:
return redirect('/Profile/' + flask.g[0][1:])
else:
return redirect(url_for("home_page"))
return render_template("FormSuccess.html")
# date_stamp()
# checks if metadata row has a created date stamp and if it doesn't it will save to the DateCreated column
# otherwise it will save the date stamp to the DateUpdated column
def date_stamp():
sql_select = "SELECT DateCreated FROM Metadata WHERE MetadataID = " + flask.g[0][1:] + ";"
check = cursor.execute(sql_select).fetchone()[0]
if check == None or check == '':
sql_update = "UPDATE Metadata SET DateCreated = '" + str(datetime.now().date()) + \
"' WHERE MetadataID = " + flask.g[0][1:] + ";"
cursor.execute(sql_update).commit()
else:
sql_update = "UPDATE Metadata SET DateUpdated = '" + str(datetime.now().date()) + \
"' WHERE MetadataID = " + flask.g[0][1:] + ";"
cursor.execute(sql_update).commit()
# clear_single_data( string| table, string| column)
# clear a single data point, parameters are the table and column of the data point
def clear_single_data(table, column):
if flask.g[0] != None:
sql_delete = "UPDATE " + table + " SET " + column + " = '' WHERE MetadataID = " + flask.g[0][1:] + ";"
cursor.execute(sql_delete)
# save_fields( list[string]| params, string| field_id)
# cleans the field name and determines if the field name is to be inserted or updated
def save_fields(params, field_id):
params[1] = clean_field_name(params[1])
sql_select = "SELECT * FROM FieldNames WHERE ID =" + field_id[1:] + ";"
if cursor.execute(sql_select).fetchone() is None:
insert_FieldNames(params)
else:
update_FieldNames(params, field_id)
# load_Metadata_Page( string | MetadataID)
# loads all the data for the Metadata form page MetadataID is in format (D123)
def load_Metadata_Page(MetadataID):
Form = DataForm()
id_number = MetadataID[1:]
sql_select = "SELECT * FROM Metadata WHERE MetadataID= " + id_number + ";"
data = cursor.execute(sql_select).fetchall()
Form.DataSetID = data[0][0]
Form.DataOwnerName = data[0][1]
Form.DataOwnerOrganization = data[0][2]
Form.AnalystName = data[0][3]
Form.InterviewerName = data[0][4]
Form.GuidanceInAcquiringData = data[0][5]
Form.Format = data[0][6]
Form.TotalSize = data[0][7]
Form.ArchivingRules = data[0][11]
Form.Frequency = data[0][8]
Form.Source = data[0][9]
Form.Version = data[0][10]
Form.Header = data[0][12]
Form.Tags = data[0][22]
Form.ActionsTaken = data[0][16]
Form.MetricsCollected = data[0][13]
Form.MetricsToCollect = data[0][14]
Form.DataDependencies = data[0][15]
Form.VerificationOfData = data[0][18]
Form.SecurityConcerns = data[0][19]
Form.SCG = data[0][20]
Form.DistributionStatement = data[0][21]
Form.FieldNames = data[0][23]
Form.DataSetName = data[0][24]
if data[0][25] is not None:
Form.Created = data[0][25].date()
if data[0][26] is not None:
Form.LastUpdated = data[0][26].date()
Form.DataOwnerPhone = data[0][27]
Form.DataOwnerEmail = data[0][28]
Form.CodeLibrary = data[0][29]
Form.Image = data[0][30]
Form.multipleFields.clear()
sql_select = "SELECT * FROM FieldNames WHERE Dataset = '" + MetadataID + "';"
if cursor.execute(sql_select).fetchall() != []:
sql_select = "SELECT ID FROM FieldNames WHERE Dataset = '" + MetadataID + "';"
fields_ids = cursor.execute(sql_select).fetchall()
fields_ids_list = []
for ids in fields_ids:
fields_ids_list += [ids[0]]
for ids in fields_ids_list:
sql_select = "SELECT * FROM FieldNames WHERE ID= " + str(ids) + ";"
field_data = cursor.execute(sql_select).fetchall()
new_field = Field()
new_field.fieldID = 'F' + str(ids)
new_field.name = field_data[0][2]
new_field.description = field_data[0][3]
new_field.units = field_data[0][4]
new_field.tags = field_data[0][5]
new_field.collapseButtonTargetID = "button" + new_field.name + str(len(Form.multipleFields) - 1)
new_field.collapseButtonID = "buttonID" + new_field.name + str(len(Form.multipleFields) - 1)
new_field.nameID = new_field.name + str(len(Form.multipleFields) - 1)
new_field.descriptionID = "description" + new_field.name + str(len(Form.multipleFields) - 1)
new_field.unitsID = "units" + new_field.name + str(len(Form.multipleFields) - 1)
new_field.tagsID = "tags" + new_field.name + str(len(Form.multipleFields) - 1)
Form.multipleFields.append(new_field)
Form = insert_fields_from_textarea(Form)
Form.organizations = load_organization_list()
return Form
# load_Metadata_searched(string | MetadataID)
# Uses the metadata ID to find the data in the database and loads it into
# a DataForm class object and returns it to be added into the results list
def load_Metadata_searched(MetadataID):
Form = DataForm()
id_number = MetadataID[1:]
sql_select = "SELECT * FROM Metadata WHERE MetadataID= " + id_number + ";"
data = cursor.execute(sql_select).fetchall()
Form.DataSetID = data[0][0]
Form.DataOwnerName = data[0][1]
Form.DataOwnerOrganization = data[0][2]
Form.AnalystName = data[0][3]
Form.InterviewerName = data[0][4]
Form.GuidanceInAcquiringData = data[0][5]
Form.Format = data[0][6]
Form.TotalSize = data[0][7]
Form.ArchivingRules = data[0][11]
Form.Frequency = data[0][8]
Form.Source = data[0][9]
Form.Version = data[0][10]
Form.Header = data[0][12]
Form.Tags = data[0][22]
Form.ActionsTaken = data[0][16]
Form.MetricsCollected = data[0][13]
Form.MetricsToCollect = data[0][14]
Form.DataDependencies = data[0][15]
Form.VerificationOfData = data[0][18]
Form.SecurityConcerns = data[0][19]
Form.SCG = data[0][20]
Form.DistributionStatement = data[0][21]
Form.FieldNames = data[0][23]
Form.DataSetName = data[0][24]
if data[0][25] is not None:
Form.Created = data[0][25].date()
if data[0][26] is not None:
Form.LastUpdated = data[0][26].date()
Form.DataOwnerPhone = data[0][27]
Form.DataOwnerEmail = data[0][28]
Form.CodeLibrary = data[0][29]
Form.Image = data[0][30]
return Form
# load_organization_list()
# add data_owner_organizations to a list that will be loaded into the Metadata page
def load_organization_list():
sql_select = "SELECT Organizations FROM Organizations "
organizations_list = []
for strings in cursor.execute(sql_select).fetchall():
organizations_list += [strings[0]]
return organizations_list
# insert_fields_from_textarea( DataForm| Form)
# creates and inserts the new field names that were entered in the text field
def insert_fields_from_textarea(Form):
sql_select = "SELECT Fields FROM Metadata WHERE MetadataID= " + flask.g[0][1:] + ";"
data = cursor.execute(sql_select).fetchone()[0]
if data != '':
fields_list = ''
for field_name in data.split(';'):
field_name = clean_field_name(field_name)
fields_list += field_name + ';'
for field_name in fields_list.split(';'):
if field_name != '':
sql_select = "SELECT * FROM FieldNames WHERE Dataset=? AND fieldName=?;"
if cursor.execute(sql_select, params=[flask.g[0], field_name]).fetchone() is None:
insert_FieldNames([flask.g[0], field_name, '', '', ''])
element_num = 0
in_Form = False
for field_name in fields_list.split(';'):
if field_name != '':
for names in Form.multipleFields:
if field_name == names.name:
in_Form = True
if not in_Form:
sql_select = "SELECT ID FROM FieldNames WHERE DataSet='" + flask.g[0] + "' AND FieldName='" + field_name + "';"
field_id = cursor.execute(sql_select).fetchone()[0]
new_field = Field()
new_field.fieldID = 'F' + str(field_id)
new_field.collapseButtonTargetID = "button" + field_name + str(element_num)
new_field.collapseButtonID = "buttonID" + field_name + str(element_num)
new_field.nameID = field_name + str(element_num)
new_field.descriptionID = "description" + field_name + str(element_num)
new_field.unitsID = "units" + field_name + str(element_num)
new_field.tagsID = "tags" + field_name + str(element_num)
new_field.name = field_name
element_num += 1
Form.multipleFields.append(new_field)
return Form
# insert_Metadata( list[string]| params)
# inserts a new row for the metadata that is entered
def insert_Metadata(params):
sql_insert = "INSERT INTO Metadata (OwnerName, OwnerOrganization, AnalystName, InterviewerName , " \
"GuidanceInAcquiring, Format, TotalSize, Frequency, CollectionMethod, Version, ArchivingRules, " \
"Header, CurrentMetrics, MetricsToCollect, Dependencies, ActionsTaken, RequiresRequest, Validation, " \
"Concerns, SCG, DistributionStatement, Tags, Fields, DataSetName, OwnerPhone, OwnerEmail, CodeLibrary) " \
"VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);"
cursor.execute(sql_insert, params).commit()
sql_id = "SELECT @@IDENTITY AS id;"
pat = re.compile(r"""\d""")
flask.g[0] = 'D'
for ints in pat.findall(str(cursor.execute(sql_id).fetchone())):
flask.g[0] += ints
# update_Metadata( list[string]| params)
# updates the data for existing Metadata row
def update_Metadata(params):
id = flask.g[0][1:]
sql_update = "UPDATE Metadata SET OwnerName = ?, OwnerOrganization = ?, AnalystName = ?, InterviewerName = ?, " \
"GuidanceInAcquiring = ?, Format = ?, TotalSize = ?, Frequency = ?, CollectionMethod = ?," \
" Version = ?, ArchivingRules = ?, Header = ?, CurrentMetrics = ?, MetricsToCollect = ?," \
" Dependencies = ?, ActionsTaken = ?, RequiresRequest = ?, Validation = ?, Concerns = ?, " \
"SCG = ?, DistributionStatement = ?, Tags = ?, Fields = ?, DataSetName = ?, OwnerPhone = ?, " \
"OwnerEmail = ?, CodeLibrary = ? WHERE MetadataID = " + id + " ;"
cursor.execute(sql_update, params).commit()
# insert_FieldNames( list[string]| params)
# inserts the field name where the id equals the field ID and inserts the params
# params = (DataSet it belongs to, the field name, Description of the field, the tags associated with it)
def insert_FieldNames(params):
sql_insert = "INSERT INTO FieldNames (DataSet, FieldName, Description, Units, Tags) VALUES (?,?,?,?,?);"
cursor.execute(sql_insert, params).commit()
sql_id = "SELECT ID FROM FieldNames WHERE DataSet ='" + flask.g[0] + "' AND FieldName ='" + params[1] + "';"
pat = re.compile(r"""\d""")
field_id = 'F'
for ints in pat.findall(str(cursor.execute(sql_id).fetchone())):
field_id += ints
if params[4] != '':
check_tags_delete_missing(params[4], 'FieldNames', 'ID', field_id)
tags = params[4].split(';')
for tag in tags:
insert_tag(tag, field_id)
more_tags = tag.split(' ')
for tag_from_space in more_tags:
if tag_from_space != '':
insert_tag(tag_from_space, field_id)
return field_id
# update_FieldNames( list[string]| params, string| id)
# updates the field name where the id equals the field ID and updates the params
# params = (DataSet it belongs to, the field name, Description of the field, the tags associated with it)
def update_FieldNames(params, id):
sql_update = "UPDATE FieldNames SET DataSet = ?, FieldName = ?, Description = ?, Units = ?, Tags = ? WHERE ID =" + id[
1:] + ";"
cursor.execute(sql_update, params).commit()
pat = re.compile(r"""\d""")
field_id = 'F'
for ints in pat.findall(id):
field_id += ints
if params[4] != '':
check_tags_delete_missing(params[4], 'FieldNames', 'ID', field_id)
tags = params[4].split(';')
for tag in tags:
insert_tag(tag, field_id)
more_tags = tag.split(' ')
for tag_from_space in more_tags:
if tag_from_space != '':
insert_tag(tag_from_space, field_id)
# insert_tag( string| tag, string| data_key)
# inserts tag if it doesn't exists or inserts the key for the dataset or field name if tag exists
def insert_tag(tag, data_key):
if tag != '':
tag = clean_field_name(tag)
sql_check = "SELECT * FROM Tags WHERE Word='" + tag + "';"
check = cursor.execute(sql_check, params=None).fetchone()
if check is None:
sql_insert = "INSERT INTO Tags(Word, FieldsDataSets) VALUES(?, ?)"
cursor.execute(sql_insert, params=[tag, data_key]).commit()
else:
tag_data_keys = check[2].split(';')
new_data_keys = ''
if not in_data_keys(data_key, tag_data_keys):
tag_data_keys += [data_key]
tag_data_keys.sort()
for values in tag_data_keys:
new_data_keys += (values + ";")
sql_update = "UPDATE Tags SET FieldsDataSets = ? WHERE Word = ?;"
cursor.execute(sql_update, params=[new_data_keys[:-1], tag]).commit()
# delete_tag( string| tag, string| id)
# when a tag is removed from the text area this function will determine which tag and from which ID it is being removed
# from. It will then remove that ID from the tag and if there are no more IDs in the tag it will remove the tag
# from the database
def delete_tag(tag, id):
sql_select = "SELECT FieldsDataSets FROM Tags WHERE Word = '" + tag + "';"
fields_datasets = cursor.execute(sql_select).fetchone()
new_string = ''
if fields_datasets is None:
return
else:
fields_datasets = fields_datasets[0]
for ids in fields_datasets.split(';'):
if ids == id:
ids = ''
new_string += ids + ';'
new_string = new_string[:-1]
if new_string == '':
sql_delete = "DELETE FROM Tags WHERE Word = '" + tag + "';"
cursor.execute(sql_delete)
else:
sql_update = "UPDATE Tags SET FieldsDataSets = ? WHERE Word = '" + tag + "';"
cursor.execute(sql_update, params=[new_string])
# id in format (D342) or (F343)
def check_tags_delete_missing(new_tag_string, table, id_column, id):
sql_select = "SELECT Tags FROM " + table + " WHERE " + id_column + " = " + id[1:] + ";"
old_string = cursor.execute(sql_select).fetchone()[0]
if new_tag_string == old_string:
return
else:
new_string = new_tag_string.split(';')
for tag in old_string.split(';'):
if tag not in new_string:
delete_tag(tag, id)
for white_split_tag in tag.split(' '):
if white_split_tag not in re.split(";|\s", new_tag_string):
delete_tag(white_split_tag, id)
# updates the string of field names in the metadata table
def update_metadata_fields_string():
sql_select = "SELECT FieldName FROM FieldNames WHERE DataSet = '" + flask.g[0] + "';"
field_list = cursor.execute(sql_select).fetchall()
field_name_list = []
new_string = ''
for values in field_list:
field_name_list += [values[0]]
for field_name in field_name_list:
field_name = clean_field_name(field_name)
new_string += field_name + ';'
sql_update = "UPDATE MetaData SET Fields = ? WHERE MetadataID = " + flask.g[0][1:] + ";"
cursor.execute(sql_update, params=[new_string[:-1]])
# if organization does not exist in list than insert it into organizations table
def organizations_list_insert(organization):
sql_select = "SELECT * FROM Organizations WHERE Organizations = '" + organization + "';"
if cursor.execute(sql_select).fetchone() is None:
sql_insert = "INSERT INTO Organizations (Organizations) VALUES (?);"
cursor.execute(sql_insert, [organization])
# Ensures that the field names entered don't contain characters that would cause glitches in the code
def clean_field_name(field_name):
list = field_name
field_name = ''
for character in list:
if character == '\'':
character = ''
elif character == '\"':
character = ''
elif character == '\\':
character = ''
field_name += character
return field_name
# checks the data keys in the tag to see if the key already exists in the list
def in_data_keys(data_key, tag_data_keys):
for values in tag_data_keys:
if data_key == values:
return True
return False
# If data is saved to access database, it will delete all data associated with the dataset id that is saved to flask.g
def cancel_form():
sql_select = "SELECT Tags FROM Metadata WHERE MetadataID = " + flask.g[0][1:] + ";"
tags = cursor.execute(sql_select).fetchone()[0]
sql_select = "SELECT ID FROM FieldNames WHERE DataSet ='" + flask.g[0] + "';"
two_d_list = cursor.execute(sql_select).fetchall()
field_ids = []
for elements in two_d_list:
id = ('F' + str(elements[0]))
field_ids += id
for tag in tags.split(';'):
sql_check = "SELECT * FROM Tags WHERE Word='" + tag + "';"
check = cursor.execute(sql_check, params=None).fetchone()
if check is not None:
sql_select = "SELECT FieldsDataSets FROM Tags WHERE word='" + tag + "';"
fields_and_datasets = cursor.execute(sql_select).fetchone()[0].split(';')
for values in fields_and_datasets:
if values == '' or values == flask.g[0]:
fields_and_datasets.remove(values)
else:
for field_id in field_ids:
if values == field_id:
fields_and_datasets.remove(values)
sql_delete = "DELETE FROM FieldNames WHERE DataSet = '" + flask.g[0] + "';"
cursor.execute(sql_delete).commit()
sql_delete = "DELETE FROM Metadata WHERE MetadataID = " + flask.g[0][1:] + ";"
cursor.execute(sql_delete).commit()
flask.g[0] = None
return redirect(url_for("home_page"))
# takes in a list of params and creates a DataForm object that it puts the data into and sends back the object
def convert_to_data_form(params):
Form = DataForm()
Form.DataOwnerName = params[0]
Form.DataOwnerOrganization = params[1]
Form.AnalystName = params[2]
Form.InterviewerName = params[3]
Form.GuidanceInAcquiringData = params[4]
Form.Format = params[5]
Form.TotalSize = params[6]
Form.ArchivingRules = params[10]
Form.Frequency = params[7]
Form.Source = params[8]
Form.Version = params[9]
Form.Header = params[11]
Form.Tags = params[21]
Form.ActionsTaken = params[15]
Form.MetricsCollected = params[12]
Form.MetricsToCollect = params[13]
Form.DataDependencies = params[14]
Form.VerificationOfData = params[17]
Form.SecurityConcerns = params[18]
Form.SCG = params[19]
Form.DistributionStatement = params[20]
Form.FieldNames = params[22]
Form.DataSetName = params[23]
Form.DataOwnerPhone = params[24]
Form.DataOwnerEmail = params[25]
Form.CodeLibrary = params[26]
return Form
# parameters (string:(name of the table),string:(id number to look for),string:(the name of the id column) )
def in_table(table, data_column, data):
sql_select = "SELECT * FROM " + table + " WHERE " + data_column + " = " + data + ";"
if cursor.execute(sql_select).fetchone() != None:
return True
else:
return False
# performs the search with the string in the parameters
# search order
# 1: Data set Name
# 2: Owner Name
# 3: Owner Organization
# 4: Field Name
# 5: Tags with multiple words in quotes
# 6: Tags with each word in the string
def search(search_string):
search_list = []
sql_select = "SELECT MetadataID FROM Metadata WHERE DatasetName = '" + search_string + "';"
if cursor.execute(sql_select).fetchone() is not None:
metadata_id = 'D' + str(cursor.execute(sql_select).fetchone()[0])
add_hit(metadata_id, search_list)
sql_select = "SELECT MetadataID FROM Metadata WHERE OwnerName = '" + search_string + "';"
if cursor.execute(sql_select).fetchone() is not None:
metadata_id = 'D' + str(cursor.execute(sql_select).fetchone()[0])
add_hit(metadata_id, search_list)
sql_select = "SELECT MetadataID FROM Metadata WHERE OwnerOrganization = '" + search_string + "';"
if cursor.execute(sql_select).fetchone() is not None:
metadata_id = 'D' + str(cursor.execute(sql_select).fetchone()[0])
add_hit(metadata_id, search_list)
sql_select = "SELECT ID FROM FieldNames WHERE FieldName = '" + search_string + "';"
if cursor.execute(sql_select).fetchone() is not None:
field_id = 'F' + str(cursor.execute(sql_select).fetchone()[0])
add_hit(field_id, search_list)
for words in re.findall(r'"([^"]*)"', search_string):
sql_select = "SELECT FieldsDataSets FROM Tags WHERE Word = '" + words + "';"
for id_numbers in cursor.execute(sql_select).fetchone()[0].split(';'):
add_hit(id_numbers, search_list)
for words in re.findall('\w+', search_string):
sql_select = "SELECT FieldsDataSets FROM Tags WHERE Word = '" + words + "';"
if cursor.execute(sql_select).fetchone() is not None:
for id_numbers in cursor.execute(sql_select).fetchone()[0].split(';'):
add_hit(id_numbers, search_list)
return search_list
# called from search to add a hit or increase in integer value to the
# data set or field that has been found to have a match to the search_string
# all fields found will also give an additional hit to the data set it belongs to
def add_hit(id_number, search_list):
search_hit = SearchHit()
in_list = False
for search_hits in search_list:
if search_hits.IDNumber == id_number:
search_hit = search_hits
in_list = True
if in_list:
search_hit.Hit += 1
else:
search_hit.IDNumber = id_number
search_hit.Hit += 1
search_list.append(search_hit)
if id_number[0] == 'F':
sql_select = "SELECT DataSet FROM FieldNames WHERE ID = " + id_number[1:] + ";"
dataset = cursor.execute(sql_select).fetchone()[0]
add_hit(dataset, search_list)
selection_sort_search_list(search_list)
return search_list
# simple selection sort to place the object with the most hits in front
def selection_sort_search_list(search_list):
for fill_slot in range(len(search_list) - 1, 0, -1):
position_of_max = 0
for index in range(1, fill_slot + 1):
if search_list[index].Hit < search_list[position_of_max].Hit:
position_of_max = index
temp = search_list[fill_slot]
search_list[fill_slot] = search_list[position_of_max]
search_list[position_of_max] = temp
# takes the search_list[data set ID,Field ID] and turns the IDs into Metadata objects
# or Field objects so that the data can be displayed on the home page
def load_search_results(search_list):
results = []
for search_hit in search_list:
if search_hit.IDNumber[0] == 'D':
results.append(load_Metadata_searched(search_hit.IDNumber))
else:
results.append(load_Field_Data(search_hit.IDNumber))
return results
# takes the field name ID number and creates a Field class object
# inputting the data from the database into the object and it gets
# added to the results list
def load_Field_Data(FieldID):
sql_select = "SELECT * FROM FieldNames WHERE ID = " + FieldID[1:] + ";"
field_data = cursor.execute(sql_select).fetchone()
if field_data is not None:
new_field = Field()
new_field.fieldID = 'F' + str(field_data[0])
new_field.dataset = field_data[1]
new_field.name = field_data[2]
new_field.description = field_data[3]
new_field.units = field_data[4]
new_field.tags = field_data[5]
return new_field
if __name__ == "__main__":
app.run() | 45.486052 | 160 | 0.601727 |
133877b2579beca2dcb435755912f35a4880bbe4 | 29 | py | Python | project_euler/051-100/97.py | floppp/programming_challenges | 42df1b72faf5ddf907296f90e9b14e014d2ea13b | [
"MIT"
] | null | null | null | project_euler/051-100/97.py | floppp/programming_challenges | 42df1b72faf5ddf907296f90e9b14e014d2ea13b | [
"MIT"
] | null | null | null | project_euler/051-100/97.py | floppp/programming_challenges | 42df1b72faf5ddf907296f90e9b14e014d2ea13b | [
"MIT"
] | null | null | null | print 28433 * 2**7830457 + 1
| 14.5 | 28 | 0.655172 |
a8b294b5ff4cc1c317a1f439815e58ceb07b3538 | 7,048 | py | Python | nTD/n_expected_Sarsa.py | Tony-Tan/Reinforcement_Leanring_An_Introduction | 285b93b0108bb3401623149129fad4ec863db0d5 | [
"MIT"
] | 5 | 2020-11-09T11:40:10.000Z | 2021-06-10T12:06:12.000Z | nTD/n_expected_Sarsa.py | Tony-Tan/Reinforcement_Leanring_An_Introduction | 285b93b0108bb3401623149129fad4ec863db0d5 | [
"MIT"
] | 1 | 2020-11-10T02:38:06.000Z | 2020-11-10T10:49:44.000Z | nTD/n_expected_Sarsa.py | Tony-Tan/Reinforcement_Leanring_An_Introduction | 285b93b0108bb3401623149129fad4ec863db0d5 | [
"MIT"
] | null | null | null | import collections
import matplotlib.pyplot as plt
import numpy as np
from environment.random_walk_19_states import RandomWalk
def constant_factory(n):
probability_list = np.ones(n)
return lambda: probability_list / np.sum(probability_list)
class Agent:
def __init__(self, environment_, n):
self.env = environment_
self.n = n
self.policies = collections.defaultdict(constant_factory(self.env.action_space.n))
self.value_of_state_action = collections.defaultdict(lambda: 0)
def select_action(self, state):
probability_distribution = self.policies[state]
action = np.random.choice(self.env.action_space.n, 1, p=probability_distribution)
return action[0]
def estimating(self, iteration_times, alpha=0.3, gamma=0.9, epsilon=0.1):
for iteration_time in range(iteration_times):
current_stat = self.env.reset()
action = self.select_action(current_stat)
new_state, reward, is_done, _ = self.env.step(action)
# the doc of deque can be found: https://docs.python.org/3/library/collections.html#collections.deque
n_queue = collections.deque()
n_queue.append([current_stat, action, reward])
while True:
if is_done:
while len(n_queue) != 0:
state_updated, action_updated, reward = n_queue.popleft()
gamma_temp = gamma
g_value = reward
for iter_n in n_queue:
# iter_n[2] is the reward in the queue
g_value += gamma_temp * iter_n[2]
gamma_temp *= gamma
self.value_of_state_action[(state_updated, action_updated)] += \
(alpha * (g_value - self.value_of_state_action[(state_updated, action_updated)]))
# update policy
# value_of_action_list = []
# for action_iter in range(self.env.action_space.n):
# value_of_action_list.append(self.value_of_state_action[(state_updated, action_iter)])
# value_of_action_list = np.array(value_of_action_list)
# optimal_action = np.random.choice(np.flatnonzero(value_of_action_list ==
# value_of_action_list.max()))
# for action_iter in range(self.env.action_space.n):
# if action_iter == optimal_action:
# self.policies[state_updated][action_iter] = \
# 1 - epsilon + epsilon / self.env.action_space.n
# else:
# self.policies[state_updated][action_iter] = \
# epsilon / self.env.action_space.n
break
else:
if len(n_queue) == self.n + 1:
state_updated, action_updated, reward = n_queue.popleft()
gamma_temp = gamma
g_value = reward
for iter_n in n_queue:
g_value += gamma_temp * iter_n[2]
gamma_temp *= gamma
# new
current_stat = new_state
action = self.select_action(current_stat)
new_state, reward, is_done, _ = self.env.step(action)
n_queue.append([current_stat, action, reward])
expected_g_state_value = 0
for action_iter in range(self.env.action_space.n):
expected_g_state_value += self.value_of_state_action[(current_stat, action_iter)] * \
self.policies[current_stat][action_iter]
g_value += expected_g_state_value * gamma_temp
self.value_of_state_action[(state_updated, action_updated)] += \
(alpha * (g_value - self.value_of_state_action[(state_updated, action_updated)]))
# update policy
# value_of_action_list = []
# for action_iter in range(self.env.action_space.n):
# value_of_action_list.append(self.value_of_state_action[(state_updated, action_iter)])
# value_of_action_list = np.array(value_of_action_list)
# optimal_action = np.random.choice(np.flatnonzero(value_of_action_list ==
# value_of_action_list.max()))
# for action_iter in range(self.env.action_space.n):
# if action_iter == optimal_action:
# self.policies[state_updated][action_iter] = \
# 1 - epsilon + epsilon / self.env.action_space.n
# else:
# self.policies[state_updated][action_iter] = \
# epsilon / self.env.action_space.n
else:
current_stat = new_state
action = self.select_action(current_stat)
new_state, reward, is_done, _ = self.env.step(action)
n_queue.append([current_stat, action, reward])
# update policy
for state_iter in range(self.env.state_space.n):
value_of_action_list = []
for action_iter in range(self.env.action_space.n):
value_of_action_list.append(self.value_of_state_action[(state_iter, action_iter)])
value_of_action_list = np.array(value_of_action_list)
optimal_action = np.random.choice(
np.flatnonzero(value_of_action_list == value_of_action_list.max()))
for action_iter in range(self.env.action_space.n):
if action_iter == optimal_action:
self.policies[state_iter][
action_iter] = 1 - epsilon + epsilon / self.env.action_space.n
else:
self.policies[state_iter][action_iter] = epsilon / self.env.action_space.n
if __name__ == '__main__':
env = RandomWalk(19)
ground_truth = []
for i in range(0, 19):
ground_truth.append(-1 + i / 9)
agent = Agent(env, 4)
agent.estimating(10000)
estimating_value = np.zeros(19)
for i in range(env.state_space.n):
for j in range(env.action_space.n):
estimating_value[i] += agent.value_of_state_action[(i, j)] * agent.policies[i][j]
print(estimating_value)
plt.figure(0)
plt.plot(estimating_value[1:-1])
plt.plot(ground_truth[1:-1])
plt.show()
| 53.801527 | 115 | 0.532775 |
ebaf8b1c467ceafe16f008467043a4aa0f19de54 | 3,650 | py | Python | adafruit_epd/mcp_sram.py | makermelissa/Adafruit_CircuitPython_EPD | d738c779a65ea91a7f3d897214b0dba035a65212 | [
"Unlicense",
"MIT-0",
"MIT"
] | 26 | 2018-09-12T21:08:38.000Z | 2022-03-18T03:21:06.000Z | adafruit_epd/mcp_sram.py | makermelissa/Adafruit_CircuitPython_EPD | d738c779a65ea91a7f3d897214b0dba035a65212 | [
"Unlicense",
"MIT-0",
"MIT"
] | 35 | 2018-07-18T17:52:03.000Z | 2021-07-05T21:55:17.000Z | adafruit_epd/mcp_sram.py | makermelissa/Adafruit_CircuitPython_EPD | d738c779a65ea91a7f3d897214b0dba035a65212 | [
"Unlicense",
"MIT-0",
"MIT"
] | 17 | 2018-08-04T15:51:21.000Z | 2022-03-18T15:04:51.000Z | # SPDX-FileCopyrightText: 2018 Dean Miller for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_epd.mcp_sram` - Adafruit MCP SRAM - sram driver
====================================================================================
CircuitPython driver for Microchip SRAM chips
* Author(s): Dean Miller
"""
from micropython import const
from adafruit_bus_device import spi_device
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_EPD.git"
SRAM_SEQUENTIAL_MODE = const(1 << 6)
class Adafruit_MCP_SRAM_View:
"""A interface class that turns an SRAM chip into something like a memoryview"""
def __init__(self, sram, offset):
self._sram = sram
self._offset = offset
self._buf = [0]
def __getitem__(self, i):
return self._sram.read(self._offset + i, 1)[0]
def __setitem__(self, i, val):
self._buf[0] = val
self._sram.write(self._offset + i, self._buf)
class Adafruit_MCP_SRAM:
"""supporting class for communicating with
Microchip SRAM chips"""
SRAM_READ = 0x03
SRAM_WRITE = 0x02
SRAM_RDSR = 0x05
SRAM_WRSR = 0x01
def __init__(self, cs_pin, spi):
# Handle hardware SPI
self._spi = spi_device.SPIDevice(spi, cs_pin, baudrate=8000000)
self.spi_device = spi
self.cs_pin = cs_pin
self._buf = bytearray(3)
self._buf[0] = Adafruit_MCP_SRAM.SRAM_WRSR
self._buf[1] = 0x43
with self._spi as spidev:
spidev.write(self._buf, end=2) # pylint: disable=no-member
def get_view(self, offset):
"""Create an object that can be used as a memoryview, with a given offset"""
return Adafruit_MCP_SRAM_View(self, offset)
def write(self, addr, buf, reg=SRAM_WRITE):
"""write the passed buffer to the passed address"""
self._buf[0] = reg
self._buf[1] = (addr >> 8) & 0xFF
self._buf[2] = addr & 0xFF
with self._spi as spi:
spi.write(self._buf, end=3) # pylint: disable=no-member
spi.write(bytearray(buf)) # pylint: disable=no-member
def read(self, addr, length, reg=SRAM_READ):
"""read passed number of bytes at the passed address"""
self._buf[0] = reg
self._buf[1] = (addr >> 8) & 0xFF
self._buf[2] = addr & 0xFF
buf = bytearray(length)
with self._spi as spi:
spi.write(self._buf, end=3) # pylint: disable=no-member
spi.readinto(buf) # pylint: disable=no-member
return buf
def read8(self, addr, reg=SRAM_READ):
"""read a single byte at the passed address"""
return self.read(addr, 1, reg)[0]
def read16(self, addr, reg=SRAM_READ):
"""read 2 bytes at the passed address"""
buf = self.read(addr, 2, reg)
return buf[0] << 8 | buf[1]
def write8(self, addr, value, reg=SRAM_WRITE):
"""write a single byte at the passed address"""
self.write(addr, [value], reg)
def write16(self, addr, value, reg=SRAM_WRITE):
"""write 2 bytes at the passed address"""
self.write(addr, [value >> 8, value], reg)
def erase(self, addr, length, value):
"""erase the passed number of bytes starting at the passed address"""
self._buf[0] = Adafruit_MCP_SRAM.SRAM_WRITE
self._buf[1] = (addr >> 8) & 0xFF
self._buf[2] = addr & 0xFF
fill = bytearray([value])
with self._spi as spi:
spi.write(self._buf, end=3) # pylint: disable=no-member
for _ in range(length):
spi.write(fill) # pylint: disable=no-member
| 33.181818 | 84 | 0.606575 |
325885920991d1c927ae063598bc6e880aa4ecab | 11,046 | py | Python | pytracking/util_scripts/download_results.py | sehomi/pyCFTrackers | 4dbd550fbac78f4e7e35fdb4a1761b5b0cf9b096 | [
"MIT"
] | null | null | null | pytracking/util_scripts/download_results.py | sehomi/pyCFTrackers | 4dbd550fbac78f4e7e35fdb4a1761b5b0cf9b096 | [
"MIT"
] | null | null | null | pytracking/util_scripts/download_results.py | sehomi/pyCFTrackers | 4dbd550fbac78f4e7e35fdb4a1761b5b0cf9b096 | [
"MIT"
] | null | null | null | import os
import sys
import gdown
import re
import shutil
import argparse
import tempfile
env_path = os.path.join(os.path.dirname(__file__), '../..')
if env_path not in sys.path:
sys.path.append(env_path)
from pytracking.evaluation.environment import env_settings
pytracking_results_link_dict = {
"dimp": {
"prdimp50_003.zip": "1p13j3iwcOCubBi3ms0hLwqnP6-x0J8Mc",
"prdimp50_002.zip": "1PPKgrAepbuyM2kjfzYAozQKTL6AjcQOz",
"prdimp50_001.zip": "17NFBObEDeK6mW4Mk2vN5Ekk1SGbFvxRS",
"prdimp50_000.zip": "1r3Efq7AumML2yGQ_KV4zmf4ATKVE1bo6",
"prdimp18_004.zip": "1DF4ZJQAa4CwvN_OiT4te33AV0kpsO7JM",
"prdimp18_003.zip": "1RgwJAN4TxnzgVgsfvrHIg1OUXD1EBZkO",
"prdimp18_002.zip": "17lMllYhygCqgE81DoHX4BZar3xc3auzM",
"prdimp18_001.zip": "1Yg7DmGYOnn2k0MYtSjjKlGyzO1Uimj4G",
"prdimp18_000.zip": "1DuZJSBJ-23WJBQTOWSAaoPYSbGAJJN2Z",
"prdimp50_004.zip": "1f9bx9-dtx3B5_IvIJhjjJyp-cnXciqLO",
"dimp50_004.zip": "1Lj3p8mYCoIqxzdQXZkWFTw-MA8c6eeLa",
"dimp50_000.zip": "1LCgf5sg453Z4bY37A_W5mbXeG68U1fET",
"dimp18_000.zip": "17M7dJZ1oKrIY4-O5lL_mlQPEubUn034g",
"dimp18_001.zip": "1AsiliVgISyDTouYOQYVOXA0srj3YskhJ",
"dimp50_got_001.zip": "1EE5FcPXqMBkv_0ghfzytCMmbKxWxy04p",
"dimp18_002.zip": "1I0GrBaPnySOyPWSvItHhXH8182tFCi_Y",
"dimp50_got_002.zip": "1ALXzVkn58GZ1E0I22vrbXkEXwy5u0xOc",
"dimp18_got_000.zip": "1BxowlgGEonnuaVXwiDwiYr7VV7BRWLvr",
"dimp50_001.zip": "1XfPvwAcymW88J1rq7RlhyKmqsawJDK-K",
"dimp18_got_002.zip": "1awqXQnFRr5NwjLfI-Ngtt3zT7XmQIwzs",
"dimp18_got_001.zip": "1rr2J6NuuYJ5E4wDUw-PrxaNKjIsfgAyk",
"dimp50_got_000.zip": "1ruP8XJOu0woq-bvKdHJ9_Y9RceHDrDjm",
"dimp18_004.zip": "1EztF6bpROFwZ1PSJWgMB7bQ4G_Z08YIg",
"dimp18_003.zip": "1iuiFLv04WE7GfBjm8UkZXFq4gheG2Ru8",
"dimp50_003.zip": "1rLsgeQXyKpD6ryl9BjlIVdO3vd27ekwy",
"dimp50_002.zip": "1wj2jUwlpHgsP1hAcuxXAVriUPuEspsu4",
"super_dimp_000.zip": "1f3Qk7vVVaPGb1jhSmLRbc6lTeUQfrCoF",
"super_dimp_001.zip": "1T4JlTaKekWpJtDiA0H3cwEAoJ2Oibt_3",
"super_dimp_002.zip": "1n5a1VlVYwpQPwpeBcQ-MZRE2QuDqhJ6A",
"super_dimp_003.zip": "1iS9uTaufNUk_9sK9euZwyYwkCzISH-aO",
"super_dimp_004.zip": "15xCdZB7S8fo5rXePFfr3unqHewYBiD9q",
"super_dimp_simple_000.zip": "13m7YNC_VD3yJ1tLfacPsEk0tX6zQ2lub",
"super_dimp_simple_001.zip": "10vUt1ji6W2h_rVDfl28tZoJAj_BMk25S",
"super_dimp_simple_002.zip": "1k0Ti9l9JACpNkqBixKu9MrZi2v47eQGx",
"super_dimp_simple_003.zip": "1SdIbXnYeFxTJa6QFn1HbsB662OFoLswl",
"super_dimp_simple_004.zip": "1PaCepfqrSQIXyMwuwk_sYaaSOLpQvm27",
},
"atom": {
"default_004.zip": "1BapnQh_8iRM44DXj862eOZV4q8zQLdmT",
"default_003.zip": "1YpfOBLBEUQQiX0fWMPA5pnW3dm0NG3E5",
"default_got_000.zip": "1uJnC0PPQhavwRbAL7VQ2Zow8YdLVzeCb",
"default_got_001.zip": "1YzJm0H31veDW-lMxwy8MYNpMULgsYHKf",
"default_000.zip": "1x6fKGZk3V839mX99Gl_pw7JUaiMaTxc5",
"default_002.zip": "1QIlQFv3p6MBTwsYdIMYmzUDBDQGxGsUC",
"default_001.zip": "1-K2--GNCURDKEgUuiEF18K4DcCLvDEVt",
"default_got_002.zip": "1qGtArxdAy0uWSd-HqFT5zmXpR6TCm4Vc",
},
"kys": {
"default_004.zip": "1QdfkA3d4MzKwdDiBOM1ZhDJWk9NmALxD",
"default_000.zip": "1SCs79_ePTc8zxPDzRAgAmbbRlnmE89SN",
"default_003.zip": "1TCzq38QW4YiMrgU5VR6NAEefJ85gwzfT",
"default_002.zip": "1_9u1ybCFxHu0yJmW5ZzDR4-isJMEUsDf",
"default_001.zip": "1utJhdosNj6vlI75dfzUxGM3Vy8OjWslT",
},
"keep_track": {
"default_000.zip": "1spUXwzQkoAj1D94OxcLTx8JPBc--0X4y",
"default_001.zip": "1kI0y-xQxH2IL2CZwftqGCcR_PjPdQb66",
"default_002.zip": "1eAzTNV6Ott4dEuunh_e1OvCAaH0lwUYv",
"default_003.zip": "1G4n40ynquPWm7MeHj7WxIWPd7p1YJ75d",
"default_004.zip": "17K_R_OEe-UZc_9GqaH5yMU17jKqSK2dF",
"default_fast_000.zip": "1hoveozlXdXpirXfHTmaatogsaidaiGuM",
"default_fast_001.zip": "1mphAhtl32PzYiQVNCMVFYnZbJXYt2sO4",
"default_fast_002.zip": "1mqQ1bar2vhJ8Bwok4xiDYxhzYx-RY92j",
"default_fast_003.zip": "1WGbicdfC5jYyIuhZrTzW44_zrZTB5dTV",
"default_fast_004.zip": "1McQ7YPeo33SJU5EEqCCbLziVsL3VW36D",
}
}
external_results_link_dict = {
"UPDT": {
"default_009.zip": "11zrgjiRL-gyRFsx5ZzycBpHb3a9rknA4",
"default_008.zip": "1397XtEgFY68Ska6qo-cnGnwU4Dj6F-yE",
"default_007.zip": "1fi4w32daXwfhbmLKao7esRw9JPXpKnwE",
"default_006.zip": "14XxeykT-o1Ovj9KJocertk5zt6UaYB57",
"default_005.zip": "1k5Mm4WNKUlWPTdsHyjxpJwFsUD8zmNo4",
"default_004.zip": "1E3HcRScBf7_wyzS_4jH6fBbRUw9HJ3Tb",
"default_003.zip": "1FdYCYp1KxGb2cwwvGG8tGk1-odY6a7T-",
"default_002.zip": "19V3JaO7bNnhxIYD_OFdvUDGua9Oq3b7H",
"default_001.zip": "18ie7rpLMxVRm-P4JmiS_VLBaibW6YQMz",
"default_000.zip": "1i4mBqcNWxx9b0t-0hH_k65c-XkkGnZfi",
},
"SiamRPN++": {
"default.zip": "1a_hBzAeTojEy_zueDxjwh2egv-EIfHnv",
},
"MDNet": {
"default.zip": "18J0-5QrYbyGlnXXLNeF8g3mzgayxmEM1",
},
"ECO": {
"default_hc.zip": "1M5fpL_b9KHjaHe-eMnfFYhNuTzdvzngb",
"default_deep.zip": "1548ZhXdplOBFFxRG-kSNctSQsoOWHMrp",
},
"DaSiamRPN": {
"default.zip": "1ckxL3nt4es6SfpAEUzVLhRrPYVsGK3Oi",
},
"CCOT": {
"default.zip": "1yt_KpURIHQthwls5w3mcPsi9ia7fNih9",
},
}
def _download_file(file_id, path):
link = 'https://drive.google.com/uc?id=' + file_id
gdown.download(link, path, quiet=True)
def download_results(download_path, trackers='pytracking'):
"""
Script to automatically download tracker results for PyTracking.
args:
download_path - Directory where the zipped results are downloaded
trackers - Tracker results which are to be downloaded.
If set to 'pytracking', results for all pytracking based trackers will be downloaded.
If set to 'external', results for available external trackers will be downloaded.
If set to 'all', all available results are downloaded.
If set to a name of a tracker (e.g. atom), all results for that tracker are downloaded.
Otherwise, it can be set to a dict, where the keys are the names of the trackers for which results are
downloaded. The value can be set to either 'all', in which case all available results for the
tracker are downloaded. Else the value should be a list of parameter file names.
"""
print('Using download path ''{}'''.format(download_path))
os.makedirs(download_path, exist_ok=True)
if isinstance(trackers, str):
if trackers == 'all':
all_trackers = list(pytracking_results_link_dict.keys()) + list(external_results_link_dict.keys())
trackers = {k: 'all' for k in all_trackers}
elif trackers == 'pytracking':
trackers = {k: 'all' for k in pytracking_results_link_dict.keys()}
elif trackers == 'external':
trackers = {k: 'all' for k in external_results_link_dict.keys()}
elif trackers in pytracking_results_link_dict or trackers in external_results_link_dict:
trackers = {trackers: 'all'}
else:
raise Exception('tracker_list must be set to ''all'', a tracker name, or be a dict')
elif isinstance(trackers, dict):
pass
else:
raise Exception('tracker_list must be set to ''all'', or be a dict')
common_link_dict = pytracking_results_link_dict
for k, v in external_results_link_dict.items():
common_link_dict[k] = v
for trk, runfiles in trackers.items():
trk_path = os.path.join(download_path, trk)
if not os.path.exists(trk_path):
os.makedirs(trk_path)
if runfiles == 'all':
for params, fileid in common_link_dict[trk].items():
print('Downloading: {}/{}'.format(trk, params))
_download_file(fileid, os.path.join(trk_path, params))
elif isinstance(runfiles, (list, tuple)):
for p in runfiles:
for params, fileid in common_link_dict[trk].items():
if re.match(r'{}(|_(\d\d\d)).zip'.format(p), params) is not None:
print('Downloading: {}/{}'.format(trk, params))
_download_file(fileid, os.path.join(trk_path, params))
else:
raise Exception('tracker_list values must either be set to ''all'', or be a list of param names')
def unpack_tracking_results(download_path, output_path=None):
"""
Unpacks zipped benchmark results. The directory 'download_path' should have the following structure
- root
- tracker1
- param1.zip
- param2.zip
.
.
- tracker2
- param1.zip
- param2.zip
.
.
args:
download_path - Path to the directory where the zipped results are stored
output_path - Path to the directory where the results will be unpacked. Set to env_settings().results_path
by default
"""
if output_path is None:
output_path = env_settings().results_path
if not os.path.exists(output_path):
os.makedirs(output_path)
trackers = os.listdir(download_path)
for t in trackers:
runfiles = os.listdir(os.path.join(download_path, t))
for r in runfiles:
save_path = os.path.join(output_path, t)
if not os.path.exists(save_path):
os.makedirs(save_path)
shutil.unpack_archive(os.path.join(download_path, t, r), os.path.join(save_path, r[:-4]), 'zip')
def main():
parser = argparse.ArgumentParser(description='Download and unpack zipped results')
parser.add_argument('--tracker', type=str, default='pytracking',
help='Name of tracker results to download, or "pytracking" (downloads results for PyTracking'
' based trackers, or "external" (downloads results for external trackers) or "all"')
parser.add_argument('--output_path', type=str, default=None,
help='Path to the directory where the results will be unpacked.')
parser.add_argument('--temp_download_path', type=str, default=None,
help='Temporary path used for downloading the Zip files.')
parser.add_argument('--download', type=bool, default=True,
help='Whether to download results or unpack existing downloaded files.')
args = parser.parse_args()
download_path = args.temp_download_path
if download_path is None:
download_path = '{}/pytracking_results/'.format(tempfile.gettempdir())
if args.download:
download_results(download_path, args.tracker)
unpack_tracking_results(download_path, args.output_path)
if __name__ == '__main__':
main()
| 44.720648 | 121 | 0.671917 |
dbf110b73649848ec3416806df4824703faecffc | 431 | py | Python | apps/clients/signals.py | swelanauguste/notices1 | 3e05746ad5c3af62dd2fa94beb9b3d3447f2086d | [
"MIT"
] | null | null | null | apps/clients/signals.py | swelanauguste/notices1 | 3e05746ad5c3af62dd2fa94beb9b3d3447f2086d | [
"MIT"
] | null | null | null | apps/clients/signals.py | swelanauguste/notices1 | 3e05746ad5c3af62dd2fa94beb9b3d3447f2086d | [
"MIT"
] | null | null | null | from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from .models import Profile
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save() | 26.9375 | 61 | 0.777262 |
6b559a50061487e99ead4aedbe0586eb5eeb9c44 | 1,741 | py | Python | azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/models/application_insights_component_quota_status.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/models/application_insights_component_quota_status.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/models/application_insights_component_quota_status.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationInsightsComponentQuotaStatus(Model):
"""An Application Insights component daily data volume cap status.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar app_id: The Application ID for the Application Insights component.
:vartype app_id: str
:ivar should_be_throttled: The daily data volume cap is met, and data
ingestion will be stopped.
:vartype should_be_throttled: bool
:ivar expiration_time: Date and time when the daily data volume cap will
be reset, and data ingestion will resume.
:vartype expiration_time: str
"""
_validation = {
'app_id': {'readonly': True},
'should_be_throttled': {'readonly': True},
'expiration_time': {'readonly': True},
}
_attribute_map = {
'app_id': {'key': 'AppId', 'type': 'str'},
'should_be_throttled': {'key': 'ShouldBeThrottled', 'type': 'bool'},
'expiration_time': {'key': 'ExpirationTime', 'type': 'str'},
}
def __init__(self):
super(ApplicationInsightsComponentQuotaStatus, self).__init__()
self.app_id = None
self.should_be_throttled = None
self.expiration_time = None
| 36.270833 | 76 | 0.630672 |
2f2d98ce2e1e290120d8abb5fae6659817d7cc76 | 1,121 | py | Python | tests/to_jsonschema/test_numeric_types.py | pglass/py-openapi-jsonschema-converter | ca84a49fc1e218ae4686d4ca6ee89781ac26c213 | [
"MIT"
] | 12 | 2018-12-10T19:11:47.000Z | 2021-08-02T13:55:04.000Z | tests/to_jsonschema/test_numeric_types.py | pglass/py-openapi-jsonschema-converter | ca84a49fc1e218ae4686d4ca6ee89781ac26c213 | [
"MIT"
] | 6 | 2018-07-06T19:36:47.000Z | 2020-07-27T18:43:50.000Z | tests/to_jsonschema/test_numeric_types.py | pglass/py-openapi-jsonschema-converter | ca84a49fc1e218ae4686d4ca6ee89781ac26c213 | [
"MIT"
] | 3 | 2018-07-05T23:06:18.000Z | 2020-07-11T03:32:07.000Z | from openapi_schema_to_json_schema import to_json_schema as convert
def test_handles_integer_types():
schema = {
"type": 'integer',
}
result = convert(schema)
expected = {
"$schema": 'http://json-schema.org/draft-04/schema#',
"type": 'integer',
}
assert result == expected
schema = {
"type": 'integer',
"format": 'int32',
}
result = convert(schema)
expected = {
"$schema": 'http://json-schema.org/draft-04/schema#',
"type": 'integer',
"format": 'int32',
}
assert result == expected
def test_handles_number_types():
schema = {
"type": 'number',
}
result = convert(schema)
expected = {
"$schema": 'http://json-schema.org/draft-04/schema#',
"type": 'number',
}
assert result == expected
schema = {
"type": 'number',
"format": 'float',
}
result = convert(schema)
expected = {
"$schema": 'http://json-schema.org/draft-04/schema#',
"type": 'number',
"format": 'float',
}
assert result == expected
| 22.42 | 67 | 0.537913 |
5dc13c32d5145aaa335dd450f44ebc648246494d | 1,391 | py | Python | src/app/rdfer.py | epoz/shmarql | 16fde653a7e8fdf81d3ecbf7427bf2428932013f | [
"Unlicense"
] | null | null | null | src/app/rdfer.py | epoz/shmarql | 16fde653a7e8fdf81d3ecbf7427bf2428932013f | [
"Unlicense"
] | null | null | null | src/app/rdfer.py | epoz/shmarql | 16fde653a7e8fdf81d3ecbf7427bf2428932013f | [
"Unlicense"
] | null | null | null | import json, logging
from .config import PREFIXES_FILEPATH, DEFAULT_PREFIXES
try:
if PREFIXES_FILEPATH:
PREFIXES = json.load(open(PREFIXES_FILEPATH))
else:
PREFIXES = DEFAULT_PREFIXES
except:
logging.exception(f"Problem binding PREFIXES from {PREFIXES_FILEPATH}")
def prefixes(value):
vv = value.lower()
for uri, prefix in PREFIXES.items():
replaced = vv.replace(uri, prefix)
if replaced != vv:
return replaced
replaced = vv.replace(prefix, uri)
if replaced != vv:
return replaced
return value
class RDFer:
def __init__(self, results):
self.data = {}
for row in results:
field = row.get("p")
value = row.get("o")
self.data.setdefault(field["value"], []).append(value)
self.data.setdefault(prefixes(field["value"]), []).append(value)
def get(self, key, default=None):
tmp = self.__getitem__(key)
if not tmp:
return default
return tmp
def __getitem__(self, key):
tmp = self.data.get(key)
if not tmp:
tmp = self.data.get(prefixes(key))
return tmp
def __call__(self, field, whole=False):
tmp = self.__getitem__(field)
if tmp:
if whole:
return tmp[0]
return prefixes(tmp[0]["value"])
| 26.75 | 76 | 0.580158 |
8ad7ac9d12fa58cd3c381c3b6d62e7c67f8173e0 | 9,277 | py | Python | docs/source/conf.py | mousepawgames/ratscript | fcd5faf89aee52935626d8a628bb410ad7da208f | [
"BSD-3-Clause"
] | 2 | 2019-10-07T02:57:36.000Z | 2020-01-10T20:02:55.000Z | docs/source/conf.py | mousepawgames/ratscript | fcd5faf89aee52935626d8a628bb410ad7da208f | [
"BSD-3-Clause"
] | null | null | null | docs/source/conf.py | mousepawgames/ratscript | fcd5faf89aee52935626d8a628bb410ad7da208f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Ratscript documentation build configuration file, created by
# sphinx-quickstart on Wed May 25 19:52:16 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Ratscript'
copyright = '2021 MousePaw Media'
author = 'MousePaw Media'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "ratscript_weblogo.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ratscriptdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Ratscript.tex', 'Ratscript Documentation',
'MousePaw Media', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ratscript', 'Ratscript Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Ratscript', 'Ratscript Documentation',
author, 'Ratscript', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.211806 | 80 | 0.71715 |
7ca300f1d3d1f9b919c8fc99aaa51d478a120c15 | 13,935 | py | Python | assets/src/ba_data/python/bastd/ui/settings/keyboard.py | booklin/ballistica | c7835388313e2d100a27568aec46a57d938d6839 | [
"MIT"
] | 1 | 2020-04-04T01:32:29.000Z | 2020-04-04T01:32:29.000Z | assets/src/ba_data/python/bastd/ui/settings/keyboard.py | Awesome-Logic/ballistica | 233a4a4f7840c9c666a1809626b6993a4b145349 | [
"MIT"
] | null | null | null | assets/src/ba_data/python/bastd/ui/settings/keyboard.py | Awesome-Logic/ballistica | 233a4a4f7840c9c666a1809626b6993a4b145349 | [
"MIT"
] | null | null | null | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Keyboard settings related UI functionality."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
import ba
if TYPE_CHECKING:
from typing import Dict, Tuple, Any, Optional
class ConfigKeyboardWindow(ba.Window):
"""Window for configuring keyboards."""
def __init__(self, c: ba.InputDevice, transition: str = 'in_right'):
self._r = 'configKeyboardWindow'
self._input = c
self._name = self._input.name
self._unique_id = self._input.unique_identifier
dname_raw = self._name
if self._unique_id != '#1':
dname_raw += ' ' + self._unique_id.replace('#', 'P')
self._displayname = ba.Lstr(translate=('inputDeviceNames', dname_raw))
self._width = 700
if self._unique_id != '#1':
self._height = 450
else:
self._height = 345
self._spacing = 40
super().__init__(root_widget=ba.containerwidget(
size=(self._width, self._height),
scale=(1.6 if ba.app.small_ui else 1.3 if ba.app.med_ui else 1.0),
stack_offset=(0, -10) if ba.app.small_ui else (0, 0),
transition=transition))
# don't ask to config joysticks while we're in here..
self._rebuild_ui()
def _rebuild_ui(self) -> None:
from ba.internal import get_device_value
for widget in self._root_widget.get_children():
widget.delete()
# fill our temp config with present values
self._settings: Dict[str, int] = {}
for button in [
'buttonJump', 'buttonPunch', 'buttonBomb', 'buttonPickUp',
'buttonStart', 'buttonStart2', 'buttonUp', 'buttonDown',
'buttonLeft', 'buttonRight'
]:
self._settings[button] = get_device_value(self._input, button)
cancel_button = ba.buttonwidget(parent=self._root_widget,
autoselect=True,
position=(38, self._height - 65),
size=(170, 60),
label=ba.Lstr(resource='cancelText'),
scale=0.9,
on_activate_call=self._cancel)
save_button = ba.buttonwidget(parent=self._root_widget,
autoselect=True,
position=(self._width - 190,
self._height - 65),
size=(180, 60),
label=ba.Lstr(resource='makeItSoText'),
scale=0.9,
text_scale=0.9,
on_activate_call=self._save)
ba.containerwidget(edit=self._root_widget,
cancel_button=cancel_button,
start_button=save_button)
ba.widget(edit=cancel_button, right_widget=save_button)
ba.widget(edit=save_button, left_widget=cancel_button)
v = self._height - 54.0
ba.textwidget(parent=self._root_widget,
position=(self._width * 0.5, v + 15),
size=(0, 0),
text=ba.Lstr(resource=self._r + '.configuringText',
subs=[('${DEVICE}', self._displayname)]),
color=ba.app.title_color,
h_align='center',
v_align='center',
maxwidth=270,
scale=0.83)
v -= 20
if self._unique_id != '#1':
v -= 20
v -= self._spacing
ba.textwidget(parent=self._root_widget,
position=(0, v + 19),
size=(self._width, 50),
text=ba.Lstr(resource=self._r +
'.keyboard2NoteText'),
scale=0.7,
maxwidth=self._width * 0.75,
max_height=110,
color=ba.app.infotextcolor,
h_align='center',
v_align='top')
v -= 45
v -= 10
v -= self._spacing * 2.2
v += 25
v -= 42
h_offs = 160
dist = 70
d_color = (0.4, 0.4, 0.8)
self._capture_button(pos=(h_offs, v + 0.95 * dist),
color=d_color,
button='buttonUp',
texture=ba.gettexture('upButton'),
scale=1.0)
self._capture_button(pos=(h_offs - 1.2 * dist, v),
color=d_color,
button='buttonLeft',
texture=ba.gettexture('leftButton'),
scale=1.0)
self._capture_button(pos=(h_offs + 1.2 * dist, v),
color=d_color,
button='buttonRight',
texture=ba.gettexture('rightButton'),
scale=1.0)
self._capture_button(pos=(h_offs, v - 0.95 * dist),
color=d_color,
button='buttonDown',
texture=ba.gettexture('downButton'),
scale=1.0)
if self._unique_id == '#2':
self._capture_button(pos=(self._width * 0.5, v + 0.1 * dist),
color=(0.4, 0.4, 0.6),
button='buttonStart',
texture=ba.gettexture('startButton'),
scale=0.8)
h_offs = self._width - 160
self._capture_button(pos=(h_offs, v + 0.95 * dist),
color=(0.6, 0.4, 0.8),
button='buttonPickUp',
texture=ba.gettexture('buttonPickUp'),
scale=1.0)
self._capture_button(pos=(h_offs - 1.2 * dist, v),
color=(0.7, 0.5, 0.1),
button='buttonPunch',
texture=ba.gettexture('buttonPunch'),
scale=1.0)
self._capture_button(pos=(h_offs + 1.2 * dist, v),
color=(0.5, 0.2, 0.1),
button='buttonBomb',
texture=ba.gettexture('buttonBomb'),
scale=1.0)
self._capture_button(pos=(h_offs, v - 0.95 * dist),
color=(0.2, 0.5, 0.2),
button='buttonJump',
texture=ba.gettexture('buttonJump'),
scale=1.0)
def _capture_button(self,
pos: Tuple[float, float],
color: Tuple[float, float, float],
texture: ba.Texture,
button: str,
scale: float = 1.0) -> None:
base_size = 79
btn = ba.buttonwidget(parent=self._root_widget,
autoselect=True,
position=(pos[0] - base_size * 0.5 * scale,
pos[1] - base_size * 0.5 * scale),
size=(base_size * scale, base_size * scale),
texture=texture,
label='',
color=color)
# do this deferred so it shows up on top of other buttons
def doit() -> None:
uiscale = 0.66 * scale * 2.0
maxwidth = 76.0 * scale
txt = ba.textwidget(parent=self._root_widget,
position=(pos[0] + 0.0 * scale,
pos[1] - (57.0 - 18.0) * scale),
color=(1, 1, 1, 0.3),
size=(0, 0),
h_align='center',
v_align='top',
scale=uiscale,
maxwidth=maxwidth,
text=self._input.get_button_name(
self._settings[button]))
ba.buttonwidget(edit=btn,
autoselect=True,
on_activate_call=ba.Call(AwaitKeyboardInputWindow,
button, txt,
self._settings))
ba.pushcall(doit)
def _cancel(self) -> None:
from bastd.ui.settings.controls import ControlsSettingsWindow
ba.containerwidget(edit=self._root_widget, transition='out_right')
ba.app.main_menu_window = (ControlsSettingsWindow(
transition='in_left').get_root_widget())
def _save(self) -> None:
from bastd.ui.settings.controls import ControlsSettingsWindow
from ba.internal import (get_input_device_config,
should_submit_debug_info, serverput)
ba.containerwidget(edit=self._root_widget, transition='out_right')
ba.playsound(ba.getsound('gunCocking'))
dst = get_input_device_config(self._input, default=False)
dst2: Dict[str, Any] = dst[0][dst[1]]
dst2.clear()
# Store any values that aren't -1.
for key, val in list(self._settings.items()):
if val != -1:
dst2[key] = val
# If we're allowed to phone home, send this config so we can generate
# more defaults in the future.
if should_submit_debug_info():
serverput(
'controllerConfig', {
'ua': ba.app.user_agent_string,
'name': self._name,
'b': ba.app.build_number,
'config': dst2,
'v': 2
})
ba.app.config.apply_and_commit()
ba.app.main_menu_window = (ControlsSettingsWindow(
transition='in_left').get_root_widget())
class AwaitKeyboardInputWindow(ba.Window):
"""Window for capturing a keypress."""
def __init__(self, button: str, ui: ba.Widget, settings: Dict[str, Any]):
self._capture_button = button
self._capture_key_ui = ui
self._settings = settings
width = 400
height = 150
super().__init__(root_widget=ba.containerwidget(
size=(width, height),
transition='in_right',
scale=2.0 if ba.app.small_ui else 1.5 if ba.app.med_ui else 1.0))
ba.textwidget(parent=self._root_widget,
position=(0, height - 60),
size=(width, 25),
text=ba.Lstr(resource='pressAnyKeyText'),
h_align='center',
v_align='top')
self._counter = 5
self._count_down_text = ba.textwidget(parent=self._root_widget,
h_align='center',
position=(0, height - 110),
size=(width, 25),
color=(1, 1, 1, 0.3),
text=str(self._counter))
self._decrement_timer: Optional[ba.Timer] = ba.Timer(
1.0,
ba.Call(self._decrement),
repeat=True,
timetype=ba.TimeType.REAL)
_ba.capture_keyboard_input(ba.WeakCall(self._button_callback))
def __del__(self) -> None:
_ba.release_keyboard_input()
def _die(self) -> None:
# this strong-refs us; killing it allow us to die now
self._decrement_timer = None
if self._root_widget:
ba.containerwidget(edit=self._root_widget, transition='out_left')
def _button_callback(self, event: Dict[str, Any]) -> None:
self._settings[self._capture_button] = event['button']
if event['type'] == 'BUTTONDOWN':
bname = event['input_device'].get_button_name(event['button'])
ba.textwidget(edit=self._capture_key_ui, text=bname)
ba.playsound(ba.getsound('gunCocking'))
self._die()
def _decrement(self) -> None:
self._counter -= 1
if self._counter >= 1:
ba.textwidget(edit=self._count_down_text, text=str(self._counter))
else:
self._die()
| 43.546875 | 79 | 0.485684 |
5bf652148ef9303522bf45ed8274b945a849e2f3 | 3,041 | py | Python | vunit/test/unit/test_database.py | imd1/vunit | c5693af6a46547da117f45d03e492ad17385b23f | [
"Artistic-2.0"
] | 2 | 2021-05-13T19:57:55.000Z | 2021-05-21T13:28:39.000Z | vunit/test/unit/test_database.py | imd1/vunit | c5693af6a46547da117f45d03e492ad17385b23f | [
"Artistic-2.0"
] | 1 | 2018-06-17T18:01:25.000Z | 2018-06-22T11:07:33.000Z | vunit/test/unit/test_database.py | imd1/vunit | c5693af6a46547da117f45d03e492ad17385b23f | [
"Artistic-2.0"
] | 3 | 2020-01-17T08:27:53.000Z | 2020-01-30T22:03:20.000Z | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015-2016, Lars Asplund lars.anders.asplund@gmail.com
"""
Test the database related classes
"""
import unittest
from os.path import join, dirname
from vunit.database import DataBase, PickledDataBase
from vunit.ostools import renew_path
class TestDataBase(unittest.TestCase):
"""
The the byte based database
"""
key1 = b"key2"
key2 = b"key1"
value1 = b"value1"
value2 = b"value2"
def setUp(self):
self.output_path = join(dirname(__file__), "test_database_out")
renew_path(self.output_path)
def create_database(self, new=False):
return DataBase(join(self.output_path, "database"), new=new)
def test_add_items(self):
database = self.create_database()
self.assertTrue(self.key1 not in database)
self.assertTrue(self.key2 not in database)
database[self.key1] = self.value1
self.assertTrue(self.key1 in database)
self.assertTrue(self.key2 not in database)
self.assertEqual(database[self.key1], self.value1)
database[self.key2] = self.value2
self.assertTrue(self.key1 in database)
self.assertTrue(self.key2 in database)
self.assertEqual(database[self.key1], self.value1)
self.assertEqual(database[self.key2], self.value2)
def test_is_persistent(self):
self.test_add_items()
database = self.create_database()
self.assertEqual(database[self.key1], self.value1)
self.assertEqual(database[self.key2], self.value2)
def test_new_database_is_created(self):
self.test_add_items()
database = self.create_database(new=True)
self.assertTrue(self.key1 not in database)
self.assertTrue(self.key2 not in database)
def test_missing_key_raises_keyerror(self):
database = self.create_database()
self.assertRaises(KeyError, lambda: database[self.key1])
def test_can_overwrite_key(self):
database = self.create_database()
database[self.key1] = self.value1
database[self.key2] = self.value2
self.assertEqual(database[self.key1], self.value1)
self.assertEqual(database[self.key2], self.value2)
database[self.key1] = self.value2
self.assertEqual(database[self.key1], self.value2)
self.assertEqual(database[self.key2], self.value2)
database[self.key2] = self.value1
self.assertEqual(database[self.key1], self.value2)
self.assertEqual(database[self.key2], self.value1)
class TestPickedDataBase(TestDataBase):
"""
Test the picked database
Re-uses test from TestDataBase class
"""
value1 = (1, "foo", set([1, 2, 3]))
value2 = (3, 4, 5, ("foo", "bar"))
def create_database(self, new=False):
return PickledDataBase(TestDataBase.create_database(self, new))
| 31.677083 | 75 | 0.679382 |
70c8b930094ade0127354d3df22fe5c320409c5d | 38,991 | py | Python | src/transformers/models/roformer/modeling_flax_roformer.py | bhavika/transformers | 65cf33e7e53cd46313f3655f274b3f6ca0fd679d | [
"Apache-2.0"
] | 31 | 2022-02-02T13:13:41.000Z | 2022-03-29T08:37:20.000Z | src/transformers/models/roformer/modeling_flax_roformer.py | guang7400613/transformers | 28e091430eea9e0d40839e56fd0d57aec262f5f9 | [
"Apache-2.0"
] | 2 | 2022-03-14T10:13:16.000Z | 2022-03-14T11:50:27.000Z | src/transformers/models/roformer/modeling_flax_roformer.py | guang7400613/transformers | 28e091430eea9e0d40839e56fd0d57aec262f5f9 | [
"Apache-2.0"
] | 2 | 2022-03-21T04:32:39.000Z | 2022-03-22T01:02:49.000Z | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Flax RoFormer model."""
from typing import Callable, Optional, Tuple
import numpy as np
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from flax.linen.attention import dot_product_attention_weights
from jax import lax
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_flax_outputs import (
FlaxBaseModelOutput,
FlaxMaskedLMOutput,
FlaxMultipleChoiceModelOutput,
FlaxQuestionAnsweringModelOutput,
FlaxSequenceClassifierOutput,
FlaxTokenClassifierOutput,
)
from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
from ...utils import logging
from .configuration_roformer import RoFormerConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "junnyu/roformer_chinese_base"
_CONFIG_FOR_DOC = "RoFormerConfig"
_TOKENIZER_FOR_DOC = "RoFormerTokenizer"
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"junnyu/roformer_chinese_small",
"junnyu/roformer_chinese_base",
"junnyu/roformer_chinese_char_small",
"junnyu/roformer_chinese_char_base",
"junnyu/roformer_small_discriminator",
"junnyu/roformer_small_generator"
# See all RoFormer models at https://huggingface.co/models?filter=roformer
]
ROFORMER_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to
general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`RoFormerConfig`]): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
"""
ROFORMER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`numpy.ndarray` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`RoFormerTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
head_mask (`numpy.ndarray` of shape `({0})`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
# Copied from transformers.models.marian.modeling_flax_marian.create_sinusoidal_positions
def create_sinusoidal_positions(n_pos, dim):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
sentinel = dim // 2 + dim % 2
out = np.zeros_like(position_enc)
out[:, 0:sentinel] = np.sin(position_enc[:, 0::2])
out[:, sentinel:] = np.cos(position_enc[:, 1::2])
return jnp.array(out)
class FlaxRoFormerEmbeddings(nn.Module):
"""Construct the embeddings from word and token_type embeddings."""
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.word_embeddings = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.token_type_embeddings = nn.Embed(
self.config.type_vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
def __call__(self, input_ids, token_type_ids, attention_mask, deterministic: bool = True):
# Embed
inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
# Sum all embeddings
hidden_states = inputs_embeds + token_type_embeddings
# Layer Norm
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
return hidden_states
class FlaxRoFormerSelfAttention(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self) -> None:
if self.config.hidden_size % self.config.num_attention_heads != 0:
raise ValueError(
"`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`\
: {self.config.num_attention_heads}"
)
self.query = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
self.key = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
self.value = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
self.rotary_value = self.config.rotary_value
def __call__(
self,
hidden_states,
attention_mask,
sinusoidal_pos,
layer_head_mask,
deterministic=True,
output_attentions: bool = False,
):
head_dim = self.config.hidden_size // self.config.num_attention_heads
query_states = self.query(hidden_states).reshape(
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
)
value_states = self.value(hidden_states).reshape(
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
)
key_states = self.key(hidden_states).reshape(
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
)
if sinusoidal_pos is not None:
if self.rotary_value:
query_states, key_states, value_states = self.apply_rotary_position_embeddings(
sinusoidal_pos, query_states, key_states, value_states
)
else:
query_states, key_states = self.apply_rotary_position_embeddings(
sinusoidal_pos, query_states, key_states
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e10).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
dropout_rng = self.make_rng("dropout")
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attention_probs_dropout_prob,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
@staticmethod
def apply_rotary_position_embeddings(sinusoidal_pos, query_layer, key_layer, value_layer=None):
sin, cos = sinusoidal_pos.split(2, axis=-1)
sin_pos = jnp.stack([sin, sin], axis=-1).reshape(sinusoidal_pos.shape)
cos_pos = jnp.stack([cos, cos], axis=-1).reshape(sinusoidal_pos.shape)
def rotate_layer(layer, sin_pos, cos_pos):
rotate_half_layer = jnp.stack([-layer[..., 1::2], layer[..., ::2]], axis=-1).reshape(layer.shape)
rotary_matrix_cos = jnp.einsum("bslh,...sh->bslh", layer, cos_pos)
rotary_matrix_sin = jnp.einsum("bslh,...sh->bslh", rotate_half_layer, sin_pos)
return rotary_matrix_cos + rotary_matrix_sin
query_layer = rotate_layer(query_layer, sin_pos, cos_pos)
key_layer = rotate_layer(key_layer, sin_pos, cos_pos)
if value_layer is not None:
value_layer = rotate_layer(value_layer, sin_pos, cos_pos)
return query_layer, key_layer, value_layer
return query_layer, key_layer
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->RoFormer
class FlaxRoFormerSelfOutput(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class FlaxRoFormerAttention(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.self = FlaxRoFormerSelfAttention(self.config, dtype=self.dtype)
self.output = FlaxRoFormerSelfOutput(self.config, dtype=self.dtype)
def __call__(
self,
hidden_states,
attention_mask,
sinusoidal_pos,
layer_head_mask,
deterministic=True,
output_attentions: bool = False,
):
# Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
# FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
# with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
attn_outputs = self.self(
hidden_states,
attention_mask,
sinusoidal_pos,
layer_head_mask=layer_head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0]
hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_outputs[1],)
return outputs
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->RoFormer
class FlaxRoFormerIntermediate(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.intermediate_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.activation = ACT2FN[self.config.hidden_act]
def __call__(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->RoFormer
class FlaxRoFormerOutput(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
def __call__(self, hidden_states, attention_output, deterministic: bool = True):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.LayerNorm(hidden_states + attention_output)
return hidden_states
class FlaxRoFormerLayer(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.attention = FlaxRoFormerAttention(self.config, dtype=self.dtype)
self.intermediate = FlaxRoFormerIntermediate(self.config, dtype=self.dtype)
self.output = FlaxRoFormerOutput(self.config, dtype=self.dtype)
def __call__(
self,
hidden_states,
attention_mask,
sinusiodal_pos,
layer_head_mask,
deterministic: bool = True,
output_attentions: bool = False,
):
attention_outputs = self.attention(
hidden_states,
attention_mask,
sinusiodal_pos,
layer_head_mask=layer_head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
)
attention_output = attention_outputs[0]
hidden_states = self.intermediate(attention_output)
hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
outputs = (hidden_states,)
if output_attentions:
outputs += (attention_outputs[1],)
return outputs
class FlaxRoFormerLayerCollection(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxRoFormerLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
]
def __call__(
self,
hidden_states,
attention_mask,
sinusoidal_pos,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
# Check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.shape[0] != (len(self.layers)):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for \
{head_mask.shape[0]}."
)
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = layer(
hidden_states,
attention_mask,
sinusoidal_pos,
layer_head_mask=head_mask[i] if head_mask is not None else None,
deterministic=deterministic,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions += (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = (hidden_states,)
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class FlaxRoFormerEncoder(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.embed_positions = create_sinusoidal_positions(
self.config.max_position_embeddings, self.config.hidden_size // self.config.num_attention_heads
)
self.layer = FlaxRoFormerLayerCollection(self.config, dtype=self.dtype)
def __call__(
self,
hidden_states,
attention_mask,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
sinusoidal_pos = self.embed_positions[: hidden_states.shape[1], :]
return self.layer(
hidden_states,
attention_mask,
sinusoidal_pos,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPredictionHeadTransform with Bert->RoFormer
class FlaxRoFormerPredictionHeadTransform(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
self.activation = ACT2FN[self.config.hidden_act]
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
def __call__(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
return self.LayerNorm(hidden_states)
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLMPredictionHead with Bert->RoFormer
class FlaxRoFormerLMPredictionHead(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
def setup(self):
self.transform = FlaxRoFormerPredictionHeadTransform(self.config, dtype=self.dtype)
self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False)
self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
def __call__(self, hidden_states, shared_embedding=None):
hidden_states = self.transform(hidden_states)
if shared_embedding is not None:
hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
hidden_states = self.decoder(hidden_states)
bias = jnp.asarray(self.bias, self.dtype)
hidden_states += bias
return hidden_states
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOnlyMLMHead with Bert->RoFormer
class FlaxRoFormerOnlyMLMHead(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.predictions = FlaxRoFormerLMPredictionHead(self.config, dtype=self.dtype)
def __call__(self, hidden_states, shared_embedding=None):
hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding)
return hidden_states
class FlaxRoFormerClassificationHead(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.out_proj = nn.Dense(
self.config.num_labels,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
def __call__(self, hidden_states, deterministic=True):
hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.dense(hidden_states)
hidden_states = nn.tanh(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class FlaxRoFormerPreTrainedModel(FlaxPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RoFormerConfig
base_model_prefix = "roformer"
module_class: nn.Module = None
def __init__(
self,
config: RoFormerConfig,
input_shape: Tuple = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
**kwargs
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
token_type_ids = jnp.zeros_like(input_ids)
attention_mask = jnp.ones_like(input_ids)
head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
return self.module.init(rngs, input_ids, attention_mask, token_type_ids, head_mask, return_dict=False)[
"params"
]
@add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def __call__(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
head_mask=None,
params: dict = None,
dropout_rng: jax.random.PRNGKey = None,
train: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# init input tensors if not passed
if token_type_ids is None:
token_type_ids = jnp.zeros_like(input_ids)
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if head_mask is None:
head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
return self.module.apply(
{"params": params or self.params},
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
jnp.array(token_type_ids, dtype="i4"),
jnp.array(head_mask, dtype="i4"),
not train,
output_attentions,
output_hidden_states,
return_dict,
rngs=rngs,
)
class FlaxRoFormerModule(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.embeddings = FlaxRoFormerEmbeddings(self.config, dtype=self.dtype)
self.encoder = FlaxRoFormerEncoder(self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
hidden_states = self.embeddings(input_ids, token_type_ids, attention_mask, deterministic=deterministic)
outputs = self.encoder(
hidden_states,
attention_mask,
head_mask=head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if not return_dict:
return (hidden_states,) + outputs[1:]
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top.",
ROFORMER_START_DOCSTRING,
)
class FlaxRoFormerModel(FlaxRoFormerPreTrainedModel):
module_class = FlaxRoFormerModule
append_call_sample_docstring(
FlaxRoFormerModel, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC
)
class FlaxRoFormerForMaskedLMModule(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype)
self.cls = FlaxRoFormerOnlyMLMHead(config=self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.roformer(
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.roformer.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
else:
shared_embedding = None
# Compute the prediction scores
logits = self.cls(hidden_states, shared_embedding=shared_embedding)
if not return_dict:
return (logits,) + outputs[1:]
return FlaxMaskedLMOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""RoFormer Model with a `language modeling` head on top.""", ROFORMER_START_DOCSTRING)
class FlaxRoFormerForMaskedLM(FlaxRoFormerPreTrainedModel):
module_class = FlaxRoFormerForMaskedLMModule
append_call_sample_docstring(
FlaxRoFormerForMaskedLM,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxMaskedLMOutput,
_CONFIG_FOR_DOC,
mask="<mask>",
)
class FlaxRoFormerForSequenceClassificationModule(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype)
self.classifier = FlaxRoFormerClassificationHead(config=self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.roformer(
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output, deterministic=deterministic)
if not return_dict:
return (logits,) + outputs[1:]
return FlaxSequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROFORMER_START_DOCSTRING,
)
class FlaxRoFormerForSequenceClassification(FlaxRoFormerPreTrainedModel):
module_class = FlaxRoFormerForSequenceClassificationModule
append_call_sample_docstring(
FlaxRoFormerForSequenceClassification,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxSequenceClassifierOutput,
_CONFIG_FOR_DOC,
)
class FlaxRoFormerForMultipleChoiceModule(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.classifier = nn.Dense(1, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
num_choices = input_ids.shape[1]
input_ids = input_ids.reshape(-1, input_ids.shape[-1])
attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1])
token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1])
# Model
outputs = self.roformer(
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# Equivalent to sequence_summary call in the PyTorch implementation
hidden_states = outputs[0]
pooled_output = hidden_states[:, -1]
pooled_output = self.dropout(pooled_output, deterministic=deterministic)
logits = self.classifier(pooled_output)
reshaped_logits = logits.reshape(-1, num_choices)
if not return_dict:
return (reshaped_logits,) + outputs[2:]
return FlaxMultipleChoiceModelOutput(
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROFORMER_START_DOCSTRING,
)
class FlaxRoFormerForMultipleChoice(FlaxRoFormerPreTrainedModel):
module_class = FlaxRoFormerForMultipleChoiceModule
overwrite_call_docstring(
FlaxRoFormerForMultipleChoice, ROFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
append_call_sample_docstring(
FlaxRoFormerForMultipleChoice,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxMultipleChoiceModelOutput,
_CONFIG_FOR_DOC,
)
class FlaxRoFormerForTokenClassificationModule(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.roformer(
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
logits = self.classifier(hidden_states)
if not return_dict:
return (logits,) + outputs[1:]
return FlaxTokenClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROFORMER_START_DOCSTRING,
)
class FlaxRoFormerForTokenClassification(FlaxRoFormerPreTrainedModel):
module_class = FlaxRoFormerForTokenClassificationModule
append_call_sample_docstring(
FlaxRoFormerForTokenClassification,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxTokenClassifierOutput,
_CONFIG_FOR_DOC,
)
class FlaxRoFormerForQuestionAnsweringModule(nn.Module):
config: RoFormerConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype)
self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.roformer(
input_ids,
attention_mask,
token_type_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.qa_outputs(hidden_states)
start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if not return_dict:
return (start_logits, end_logits) + outputs[1:]
return FlaxQuestionAnsweringModelOutput(
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROFORMER_START_DOCSTRING,
)
class FlaxRoFormerForQuestionAnswering(FlaxRoFormerPreTrainedModel):
module_class = FlaxRoFormerForQuestionAnsweringModule
append_call_sample_docstring(
FlaxRoFormerForQuestionAnswering,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxQuestionAnsweringModelOutput,
_CONFIG_FOR_DOC,
)
| 35.870285 | 119 | 0.669513 |
2d3e639a8421b13f82680aa2ab3a9dedc5444fcf | 70,590 | py | Python | ironic/drivers/modules/drac/raid.py | Hellcatlk/ironic | e15440370cca1f1a998d3607910697c3129d040a | [
"Apache-2.0"
] | 1 | 2021-03-29T03:48:47.000Z | 2021-03-29T03:48:47.000Z | ironic/drivers/modules/drac/raid.py | Hellcatlk/ironic | e15440370cca1f1a998d3607910697c3129d040a | [
"Apache-2.0"
] | null | null | null | ironic/drivers/modules/drac/raid.py | Hellcatlk/ironic | e15440370cca1f1a998d3607910697c3129d040a | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DRAC RAID specific methods
"""
from collections import defaultdict
import math
from futurist import periodics
from ironic_lib import metrics_utils
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
import tenacity
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import raid as raid_common
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.drac import common as drac_common
from ironic.drivers.modules.drac import job as drac_job
from ironic.drivers.modules.redfish import raid as redfish_raid
from ironic.drivers.modules.redfish import utils as redfish_utils
drac_exceptions = importutils.try_import('dracclient.exceptions')
drac_constants = importutils.try_import('dracclient.constants')
sushy = importutils.try_import('sushy')
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
_CURRENT_RAID_CONTROLLER_MODE = "RAIDCurrentControllerMode"
_REQUESTED_RAID_CONTROLLER_MODE = "RAIDRequestedControllerMode"
_EHBA_MODE = "Enhanced HBA"
_RAID_MODE = "RAID"
RAID_LEVELS = {
'0': {
'min_disks': 1,
'max_disks': 1000,
'type': 'simple',
'overhead': 0
},
'1': {
'min_disks': 2,
'max_disks': 2,
'type': 'simple',
'overhead': 1
},
'5': {
'min_disks': 3,
'max_disks': 1000,
'type': 'simple',
'overhead': 1
},
'6': {
'min_disks': 4,
'max_disks': 1000,
'type': 'simple',
'overhead': 2
},
'1+0': {
'type': 'spanned',
'span_type': '1'
},
'5+0': {
'type': 'spanned',
'span_type': '5'
},
'6+0': {
'type': 'spanned',
'span_type': '6'
}
}
def list_raid_controllers(node):
"""List the RAID controllers of the node.
:param node: an ironic node object.
:returns: a list of RAIDController objects from dracclient.
:raises: DracOperationError on an error from python-dracclient.
"""
client = drac_common.get_drac_client(node)
try:
return client.list_raid_controllers()
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to get the list of RAID controllers '
'for node %(node_uuid)s. Reason: %(error)s.',
{'node_uuid': node.uuid, 'error': exc})
raise exception.DracOperationError(error=exc)
def list_virtual_disks(node):
"""List the virtual disks of the node.
:param node: an ironic node object.
:returns: a list of VirtualDisk objects from dracclient.
:raises: DracOperationError on an error from python-dracclient.
"""
client = drac_common.get_drac_client(node)
try:
return client.list_virtual_disks()
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to get the list of virtual disks '
'for node %(node_uuid)s. Reason: %(error)s.',
{'node_uuid': node.uuid, 'error': exc})
raise exception.DracOperationError(error=exc)
def list_physical_disks(node):
"""List the physical disks of the node.
:param node: an ironic node object.
:returns: a list of PhysicalDisk objects from dracclient.
:raises: DracOperationError on an error from python-dracclient.
"""
client = drac_common.get_drac_client(node)
try:
return client.list_physical_disks()
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to get the list of physical disks '
'for node %(node_uuid)s. Reason: %(error)s.',
{'node_uuid': node.uuid, 'error': exc})
raise exception.DracOperationError(error=exc)
def _is_raid_controller(node, raid_controller_fqdd, raid_controllers=None):
"""Find out if object's fqdd is for a raid controller or not
:param node: an ironic node object
:param raid_controller_fqdd: The object's fqdd we are testing to see
if it is a raid controller or not.
:param raid_controllers: A list of RAIDControllers used to check for
the presence of BOSS cards. If None, the
iDRAC will be queried for the list of
controllers.
:returns: boolean, True if the device is a RAID controller,
False if not.
"""
client = drac_common.get_drac_client(node)
try:
return client.is_raid_controller(raid_controller_fqdd,
raid_controllers)
except drac_exceptions.BaseClientException as exc:
LOG.error('Unable to determine if controller %(raid_controller_fqdd)s '
'on node %(node_uuid)s is a RAID controller. '
'Reason: %(error)s. ',
{'raid_controller_fqdd': raid_controller_fqdd,
'node_uuid': node.uuid, 'error': exc})
raise exception.DracOperationError(error=exc)
def _validate_job_queue(node, raid_controller=None):
"""Validate that there are no pending jobs for this controller.
:param node: an ironic node object.
:param raid_controller: id of the RAID controller.
"""
kwargs = {}
if raid_controller:
kwargs["name_prefix"] = "Config:RAID:%s" % raid_controller
drac_job.validate_job_queue(node, **kwargs)
def create_virtual_disk(node, raid_controller, physical_disks, raid_level,
size_mb, disk_name=None, span_length=None,
span_depth=None):
"""Create a single virtual disk on a RAID controller.
The created virtual disk will be in pending state. The DRAC card will do
the actual configuration once the changes are applied by calling the
``commit_config`` method.
:param node: an ironic node object.
:param raid_controller: id of the RAID controller.
:param physical_disks: ids of the physical disks.
:param raid_level: RAID level of the virtual disk.
:param size_mb: size of the virtual disk.
:param disk_name: name of the virtual disk. (optional)
:param span_depth: Number of spans in virtual disk. (optional)
:param span_length: Number of disks per span. (optional)
:returns: a dictionary containing the commit_needed key with a boolean
value indicating whether a config job must be created for the
values to be applied.
:raises: DracOperationError on an error from python-dracclient.
"""
# This causes config to fail, because the boot mode is set via a config
# job.
_validate_job_queue(node, raid_controller)
client = drac_common.get_drac_client(node)
try:
return client.create_virtual_disk(raid_controller, physical_disks,
raid_level, size_mb, disk_name,
span_length, span_depth)
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to create virtual disk for node '
'%(node_uuid)s. Reason: %(error)s.',
{'node_uuid': node.uuid,
'error': exc})
raise exception.DracOperationError(error=exc)
def delete_virtual_disk(node, virtual_disk):
"""Delete a single virtual disk on a RAID controller.
The deleted virtual disk will be in pending state. The DRAC card will do
the actual configuration once the changes are applied by calling the
``commit_config`` method.
:param node: an ironic node object.
:param virtual_disk: id of the virtual disk.
:returns: a dictionary containing the commit_needed key with a boolean
value indicating whether a config job must be created for the
values to be applied.
:raises: DracOperationError on an error from python-dracclient.
"""
# NOTE(mgoddard): Cannot specify raid_controller as we don't know it.
_validate_job_queue(node)
client = drac_common.get_drac_client(node)
try:
return client.delete_virtual_disk(virtual_disk)
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to delete virtual disk '
'%(virtual_disk_fqdd)s for node %(node_uuid)s. '
'Reason: %(error)s.',
{'virtual_disk_fqdd': virtual_disk,
'node_uuid': node.uuid,
'error': exc})
raise exception.DracOperationError(error=exc)
def _reset_raid_config(node, raid_controller):
"""Delete all virtual disk and unassign all hotspares physical disk
:param node: an ironic node object.
:param raid_controller: id of the RAID controller.
:returns: a dictionary containing
- The is_commit_required needed key with a
boolean value indicating whether a config job must be created
for the values to be applied.
- The is_reboot_required key with a RebootRequired enumerated
value indicating whether the server must be rebooted to
reset configuration.
:raises: DracOperationError on an error from python-dracclient.
"""
try:
_validate_job_queue(node, raid_controller)
client = drac_common.get_drac_client(node)
return client.reset_raid_config(raid_controller)
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to delete all virtual disk '
'and unassign all hotspares '
'on %(raid_controller_fqdd)s '
'for node %(node_uuid)s. '
'Reason: %(error)s.',
{'raid_controller_fqdd': raid_controller,
'node_uuid': node.uuid,
'error': exc})
raise exception.DracOperationError(error=exc)
def clear_foreign_config(node, raid_controller):
"""Free up the foreign drives.
:param node: an ironic node object.
:param raid_controller: id of the RAID controller.
:returns: a dictionary containing
- The is_commit_required needed key with a
boolean value indicating whether a config job must be created
for the values to be applied.
- The is_reboot_required key with a RebootRequired enumerated
value indicating whether the server must be rebooted to
clear foreign configuration.
:raises: DracOperationError on an error from python-dracclient.
"""
try:
_validate_job_queue(node, raid_controller)
client = drac_common.get_drac_client(node)
return client.clear_foreign_config(raid_controller)
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to free foreign driver '
'on %(raid_controller_fqdd)s '
'for node %(node_uuid)s. '
'Reason: %(error)s.',
{'raid_controller_fqdd': raid_controller,
'node_uuid': node.uuid,
'error': exc})
raise exception.DracOperationError(error=exc)
def set_raid_settings(node, controller_fqdd, settings):
"""Sets the RAID configuration
It sets the pending_value parameter for each of the attributes
passed in. For the values to be applied, a config job must
be created.
:param node: an ironic node object.
:param controller_fqdd: the ID of the RAID controller.
:param settings: a dictionary containing the proposed values, with
each key being the name of attribute and the value
being the proposed value.
:returns: a dictionary containing:
- The is_commit_required key with a boolean value indicating
whether a config job must be created for the values to be
applied.
- The is_reboot_required key with a RebootRequired enumerated
value indicating whether the server must be rebooted for the
values to be applied. Possible values are true and false.
:raises: DRACOperationFailed on error reported back by the DRAC
interface
"""
try:
drac_job.validate_job_queue(node)
client = drac_common.get_drac_client(node)
return client.set_raid_settings(controller_fqdd, settings)
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to set raid settings '
'on %(raid_controller_fqdd)s '
'for node %(node_uuid)s. '
'Reason: %(error)s.',
{'raid_controller_fqdd': controller_fqdd,
'node_uuid': node.uuid,
'error': exc})
raise exception.DracOperationError(error=exc)
def list_raid_settings(node):
"""List the RAID configuration settings
:param node: an ironic node object.
:returns: a dictionary with the RAID settings using InstanceID as the
key. The attributes are RAIDEnumerableAttribute,
RAIDStringAttribute and RAIDIntegerAttribute objects.
:raises: DRACOperationFailed on error reported back by the DRAC
interface
"""
try:
drac_job.validate_job_queue(node)
client = drac_common.get_drac_client(node)
return client.list_raid_settings()
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to list raid settings '
'for node %(node_uuid)s. '
'Reason: %(error)s.',
{'node_uuid': node.uuid,
'error': exc})
raise exception.DracOperationError(error=exc)
def change_physical_disk_state(node, mode=None,
controllers_to_physical_disk_ids=None):
"""Convert disks RAID status
This method converts the requested physical disks from
RAID to JBOD or vice versa. It does this by only converting the
disks that are not already in the correct state.
:param node: an ironic node object.
:param mode: the mode to change the disks either to RAID or JBOD.
:param controllers_to_physical_disk_ids: Dictionary of controllers and
corresponding disk ids to convert to the requested mode.
:return: a dictionary containing:
- conversion_results, a dictionary that maps controller ids
to the conversion results for that controller.
The conversion results are a dict that contains:
- The is_commit_required key with the value always set to
True indicating that a config job must be created to
complete disk conversion.
- The is_reboot_required key with a RebootRequired
enumerated value indicating whether the server must be
rebooted to complete disk conversion.
:raises: DRACOperationError on an error from python-dracclient.
"""
try:
drac_job.validate_job_queue(node)
client = drac_common.get_drac_client(node)
return client.change_physical_disk_state(
mode, controllers_to_physical_disk_ids)
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to change physical drives '
'to %(mode)s mode for node %(node_uuid)s. '
'Reason: %(error)s.',
{'mode': mode, 'node_uuid': node.uuid, 'error': exc})
raise exception.DracOperationError(error=exc)
def commit_config(node, raid_controller, reboot=False, realtime=False):
"""Apply all pending changes on a RAID controller.
:param node: an ironic node object.
:param raid_controller: id of the RAID controller.
:param reboot: indicates whether a reboot job should be automatically
created with the config job. (optional, defaults to False)
:param realtime: indicates RAID controller supports realtime.
(optional, defaults to False)
:returns: id of the created job
:raises: DracOperationError on an error from python-dracclient.
"""
client = drac_common.get_drac_client(node)
try:
return client.commit_pending_raid_changes(
raid_controller=raid_controller,
reboot=reboot,
realtime=realtime)
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to commit pending RAID config for'
' controller %(raid_controller_fqdd)s on node '
'%(node_uuid)s. Reason: %(error)s.',
{'raid_controller_fqdd': raid_controller,
'node_uuid': node.uuid,
'error': exc})
raise exception.DracOperationError(error=exc)
def _change_physical_disk_mode(node, mode=None,
controllers_to_physical_disk_ids=None,
substep="completed"):
"""Physical drives conversion from RAID to JBOD or vice-versa.
:param node: an ironic node object.
:param mode: the mode to change the disks either to RAID or JBOD.
:param controllers_to_physical_disk_ids: Dictionary of controllers and
corresponding disk ids to convert to the requested mode.
:returns: states.CLEANWAIT if deletion is in progress asynchronously
or None if it is completed.
"""
change_disk_state = change_physical_disk_state(
node, mode, controllers_to_physical_disk_ids)
controllers = list()
conversion_results = change_disk_state['conversion_results']
for controller_id, result in conversion_results.items():
controller = {'raid_controller': controller_id,
'is_reboot_required': result['is_reboot_required'],
'is_commit_required': result['is_commit_required']}
controllers.append(controller)
return _commit_to_controllers(
node,
controllers, substep=substep)
def abandon_config(node, raid_controller):
"""Deletes all pending changes on a RAID controller.
:param node: an ironic node object.
:param raid_controller: id of the RAID controller.
:raises: DracOperationError on an error from python-dracclient.
"""
client = drac_common.get_drac_client(node)
try:
client.abandon_pending_raid_changes(raid_controller)
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to delete pending RAID config '
'for controller %(raid_controller_fqdd)s on node '
'%(node_uuid)s. Reason: %(error)s.',
{'raid_controller_fqdd': raid_controller,
'node_uuid': node.uuid,
'error': exc})
raise exception.DracOperationError(error=exc)
def _calculate_spans(raid_level, disks_count):
"""Calculates number of spans for a RAID level given a physical disk count
:param raid_level: RAID level of the virtual disk.
:param disk_count: number of physical disks used for the virtual disk.
:returns: number of spans.
"""
if raid_level in ['0', '1', '5', '6']:
return 1
elif raid_level in ['5+0', '6+0']:
return 2
elif raid_level in ['1+0']:
return disks_count >> 1
else:
reason = (_('Cannot calculate spans for RAID level "%s"') %
raid_level)
raise exception.DracOperationError(error=reason)
def _usable_disks_count(raid_level, disks_count):
"""Calculates the number of disks usable for a RAID level
...given a physical disk count
:param raid_level: RAID level of the virtual disk.
:param disk_count: number of physical disks used for the virtual disk.
:returns: number of disks.
"""
if raid_level in ['0', '1', '5', '6']:
return disks_count
elif raid_level in ['5+0', '6+0', '1+0']:
# largest even number less than disk_count
return (disks_count >> 1) << 1
else:
reason = (_('RAID level %(raid_level)s is not supported by the '
'driver. Supported RAID levels: %(supported_raid_levels)s')
% {'raid_level': raid_level,
'supported_raid_levels': list(RAID_LEVELS)})
raise exception.DracOperationError(error=reason)
def _raid_level_min_disks(raid_level, spans_count=1):
try:
raid_level_info = RAID_LEVELS[raid_level]
except KeyError:
reason = (_('RAID level %(raid_level)s is not supported by the '
'driver. Supported RAID levels: %(supported_raid_levels)s')
% {'raid_level': raid_level,
'supported_raid_levels': list(RAID_LEVELS)})
raise exception.DracOperationError(error=reason)
if raid_level_info['type'] == 'spanned':
if spans_count <= 1:
reason = _('Spanned RAID volumes cannot contain a single span')
raise exception.DracOperationError(error=reason)
span_type = raid_level_info['span_type']
raid_level_info = RAID_LEVELS[span_type]
return raid_level_info['min_disks'] * spans_count
def _raid_level_max_disks(raid_level, spans_count=1):
try:
raid_level_info = RAID_LEVELS[raid_level]
except KeyError:
reason = (_('RAID level %(raid_level)s is not supported by the '
'driver. Supported RAID levels: %(supported_raid_levels)s')
% {'raid_level': raid_level,
'supported_raid_levels': list(RAID_LEVELS)})
raise exception.DracOperationError(error=reason)
if raid_level_info['type'] == 'spanned':
if spans_count <= 1:
reason = _('Spanned RAID volumes cannot contain a single span')
raise exception.DracOperationError(error=reason)
span_type = raid_level_info['span_type']
raid_level_info = RAID_LEVELS[span_type]
return raid_level_info['max_disks'] * spans_count
def _raid_level_overhead(raid_level, spans_count=1):
try:
raid_level_info = RAID_LEVELS[raid_level]
except KeyError:
reason = (_('RAID level %(raid_level)s is not supported by the '
'driver. Supported RAID levels: %(supported_raid_levels)s')
% {'raid_level': raid_level,
'supported_raid_levels': list(RAID_LEVELS)})
raise exception.DracOperationError(error=reason)
if raid_level_info['type'] == 'spanned':
if spans_count <= 1:
reason = _('Spanned RAID volumes cannot contain a single span')
raise exception.DracOperationError(error=reason)
span_type = raid_level_info['span_type']
raid_level_info = RAID_LEVELS[span_type]
return raid_level_info['overhead'] * spans_count
def _max_volume_size_mb(raid_level, physical_disks, free_space_mb,
spans_count=1, stripe_size_kb=64 * units.Ki):
# restrict the size to the smallest available space
free_spaces = [free_space_mb[disk] for disk in physical_disks]
size_kb = min(free_spaces) * units.Ki
# NOTE(ifarkas): using math.floor so we get a volume size that does not
# exceed the available space
stripes_per_disk = int(math.floor(float(size_kb) / stripe_size_kb))
disks_count = len(physical_disks)
overhead_disks_count = _raid_level_overhead(raid_level, spans_count)
return int(stripes_per_disk * stripe_size_kb
* (disks_count - overhead_disks_count) / units.Ki)
def _volume_usage_per_disk_mb(logical_disk, physical_disks, spans_count=1,
stripe_size_kb=64 * units.Ki):
disks_count = len(physical_disks)
overhead_disks_count = _raid_level_overhead(logical_disk['raid_level'],
spans_count)
volume_size_kb = logical_disk['size_mb'] * units.Ki
# NOTE(ifarkas): using math.ceil so we get the largest disk usage
# possible, so we can avoid over-committing
stripes_per_volume = math.ceil(float(volume_size_kb) / stripe_size_kb)
stripes_per_disk = math.ceil(
float(stripes_per_volume) / (disks_count - overhead_disks_count))
return int(stripes_per_disk * stripe_size_kb / units.Ki)
def _find_configuration(logical_disks, physical_disks, pending_delete):
"""Find RAID configuration.
This method transforms the RAID configuration defined in Ironic to a format
that is required by dracclient. This includes matching the physical disks
to RAID volumes when it's not pre-defined, or in general calculating
missing properties.
:param logical_disks: list of logical disk definitions.
:param physical_disks: list of physical disk definitions.
:param pending_delete: Whether there is a pending deletion of virtual
disks that should be accounted for.
"""
# shared physical disks of RAID volumes size_gb='MAX' should be
# deprioritized during the matching process to reserve as much space as
# possible. Reserved means it won't be used during matching.
volumes_with_reserved_physical_disks = [
volume for volume in logical_disks
if ('physical_disks' in volume and volume['size_mb'] == 'MAX'
and volume.get('share_physical_disks', False))]
reserved_physical_disks = [
disk for disk in physical_disks
for volume in volumes_with_reserved_physical_disks
if disk.id in volume['physical_disks']]
# we require each logical disk contain only homogeneous physical disks, so
# sort them by type
physical_disks_by_type = {}
reserved_physical_disks_by_type = {}
free_space_mb = {}
for disk in physical_disks:
# calculate free disk space
free_space_mb[disk] = _get_disk_free_size_mb(disk, pending_delete)
disk_type = (disk.controller, disk.media_type, disk.interface_type,
disk.size_mb)
if disk_type not in physical_disks_by_type:
physical_disks_by_type[disk_type] = []
reserved_physical_disks_by_type[disk_type] = []
if disk in reserved_physical_disks:
reserved_physical_disks_by_type[disk_type].append(disk)
else:
physical_disks_by_type[disk_type].append(disk)
# exclude non-shared physical disks (predefined by the user) from
# physical_disks_by_type because they are not going to be used during
# matching
for volume in logical_disks:
if ('physical_disks' in volume
and not volume.get('share_physical_disks', False)):
for disk in physical_disks:
if disk.id in volume['physical_disks']:
disk_type = (disk.controller, disk.media_type,
disk.interface_type, disk.size_mb)
if disk in physical_disks_by_type[disk_type]:
physical_disks_by_type[disk_type].remove(disk)
processed_volumes = []
# step 1 - process volumes with predefined disks and exact size
for volume in [volume for volume in logical_disks
if ('physical_disks' in volume
and volume['size_mb'] != 'MAX')]:
_calculate_volume_props(volume, physical_disks, free_space_mb)
processed_volumes.append(volume)
# step 2 - process volumes without predefined disks
volumes_without_disks = [disk for disk in logical_disks
if 'physical_disks' not in disk]
if volumes_without_disks:
result, free_space_mb = (
_assign_disks_to_volume(volumes_without_disks,
physical_disks_by_type, free_space_mb,
pending_delete))
if not result:
# try again using the reserved physical disks in addition
for disk_type, disks in physical_disks_by_type.items():
physical_disks_by_type[disk_type] += (
reserved_physical_disks_by_type[disk_type])
result, free_space_mb = (
_assign_disks_to_volume(volumes_without_disks,
physical_disks_by_type,
free_space_mb,
pending_delete))
if not result:
error_msg = _('failed to find matching physical disks for all '
'logical disks')
LOG.error('DRAC driver failed to create RAID '
'configuration. Reason: %(error)s.',
{'error': error_msg})
raise exception.DracOperationError(error=error_msg)
processed_volumes += volumes_without_disks
# step 3 - process volumes with predefined disks and size_mb == 'MAX'
for volume in [volume for volume in logical_disks
if ('physical_disks' in volume
and volume['size_mb'] == 'MAX')]:
_calculate_volume_props(volume, physical_disks, free_space_mb)
processed_volumes.append(volume)
return processed_volumes
def _calculate_volume_props(logical_disk, physical_disks, free_space_mb):
selected_disks = [disk for disk in physical_disks
if disk.id in logical_disk['physical_disks']]
spans_count = _calculate_spans(
logical_disk['raid_level'], len(selected_disks))
if len(selected_disks) % spans_count != 0:
error_msg = _('invalid number of physical disks was provided')
raise exception.DracOperationError(error=error_msg)
disks_per_span = int(len(selected_disks) / spans_count)
# Best practice is to not pass span_length and span_depth when creating a
# RAID10. The iDRAC will dynamically calculate these values using maximum
# values obtained from the RAID controller.
logical_disk['span_depth'] = None
logical_disk['span_length'] = None
if logical_disk['raid_level'] != '1+0':
logical_disk['span_depth'] = spans_count
logical_disk['span_length'] = disks_per_span
max_volume_size_mb = _max_volume_size_mb(
logical_disk['raid_level'], selected_disks, free_space_mb,
spans_count=spans_count)
if logical_disk['size_mb'] == 'MAX':
if max_volume_size_mb == 0:
error_msg = _("size set to 'MAX' but could not allocate physical "
"disk space")
raise exception.DracOperationError(error=error_msg)
logical_disk['size_mb'] = max_volume_size_mb
elif max_volume_size_mb < logical_disk['size_mb']:
if max_volume_size_mb == 0:
error_msg = _('not enough physical disk space for the logical '
'disk')
raise exception.DracOperationError(error=error_msg)
disk_usage = _volume_usage_per_disk_mb(logical_disk, selected_disks,
spans_count=spans_count)
for disk in selected_disks:
if free_space_mb[disk] < disk_usage:
error_msg = _('not enough free space on physical disks for the '
'logical disk')
raise exception.DracOperationError(error=error_msg)
else:
free_space_mb[disk] -= disk_usage
if 'controller' not in logical_disk:
logical_disk['controller'] = selected_disks[0].controller
def _assign_disks_to_volume(logical_disks, physical_disks_by_type,
free_space_mb, pending_delete):
logical_disk = logical_disks.pop(0)
raid_level = logical_disk['raid_level']
# iterate over all possible configurations
for (controller, disk_type,
interface_type, size_mb), disks in physical_disks_by_type.items():
if ('disk_type' in logical_disk
and logical_disk['disk_type'] != disk_type):
continue
if ('interface_type' in logical_disk
and logical_disk['interface_type'] != interface_type):
continue
# filter out disks without free disk space
disks = [disk for disk in disks if free_space_mb[disk] > 0]
# sort disks by free size which is important if we have max disks limit
# on a volume
disks = sorted(
disks,
key=lambda disk: free_space_mb[disk])
# filter out disks already in use if sharing is disabled
if ('share_physical_disks' not in logical_disk
or not logical_disk['share_physical_disks']):
initial_free_size_mb = {
disk: _get_disk_free_size_mb(disk, pending_delete)
for disk in disks
}
disks = [disk for disk in disks
if initial_free_size_mb[disk] == free_space_mb[disk]]
max_spans = _calculate_spans(raid_level, len(disks))
min_spans = min([2, max_spans])
min_disks = _raid_level_min_disks(raid_level,
spans_count=min_spans)
max_disks = _raid_level_max_disks(raid_level,
spans_count=max_spans)
candidate_max_disks = min([max_disks, len(disks)])
for disks_count in range(min_disks, candidate_max_disks + 1):
if ('number_of_physical_disks' in logical_disk
and (logical_disk['number_of_physical_disks']
!= disks_count)):
continue
# skip invalid disks_count
if disks_count != _usable_disks_count(logical_disk['raid_level'],
disks_count):
continue
selected_disks = disks[0:disks_count]
candidate_volume = logical_disk.copy()
candidate_free_space_mb = free_space_mb.copy()
candidate_volume['physical_disks'] = [disk.id for disk
in selected_disks]
try:
_calculate_volume_props(candidate_volume, selected_disks,
candidate_free_space_mb)
except exception.DracOperationError:
continue
if len(logical_disks) > 0:
result, candidate_free_space_mb = (
_assign_disks_to_volume(logical_disks,
physical_disks_by_type,
candidate_free_space_mb,
pending_delete))
if result:
logical_disks.append(candidate_volume)
return (True, candidate_free_space_mb)
else:
logical_disks.append(candidate_volume)
return (True, candidate_free_space_mb)
else:
# put back the logical_disk to queue
logical_disks.insert(0, logical_disk)
return (False, free_space_mb)
def _filter_logical_disks(logical_disks, include_root_volume,
include_nonroot_volumes):
filtered_disks = []
for disk in logical_disks:
if include_root_volume and disk.get('is_root_volume'):
filtered_disks.append(disk)
if include_nonroot_volumes and not disk.get('is_root_volume'):
filtered_disks.append(disk)
return filtered_disks
def _create_config_job(node, controller, reboot=False, realtime=False,
raid_config_job_ids=[],
raid_config_parameters=[]):
job_id = commit_config(node, raid_controller=controller,
reboot=reboot, realtime=realtime)
raid_config_job_ids.append(job_id)
if controller not in raid_config_parameters:
raid_config_parameters.append(controller)
LOG.info('Change has been committed to RAID controller '
'%(controller)s on node %(node)s. '
'DRAC job id: %(job_id)s',
{'controller': controller, 'node': node.uuid,
'job_id': job_id})
return {'raid_config_job_ids': raid_config_job_ids,
'raid_config_parameters': raid_config_parameters}
def _validate_volume_size(node, logical_disks):
new_physical_disks = list_physical_disks(node)
free_space_mb = {}
new_processed_volumes = []
for disk in new_physical_disks:
free_space_mb[disk] = disk.free_size_mb
for logical_disk in logical_disks:
selected_disks = [disk for disk in new_physical_disks
if disk.id in logical_disk['physical_disks']]
spans_count = _calculate_spans(
logical_disk['raid_level'], len(selected_disks))
new_max_vol_size_mb = _max_volume_size_mb(
logical_disk['raid_level'],
selected_disks,
free_space_mb,
spans_count=spans_count)
if logical_disk['size_mb'] > new_max_vol_size_mb:
logical_disk['size_mb'] = new_max_vol_size_mb
LOG.info("Logical size does not match so calculating volume "
"properties for current logical_disk")
_calculate_volume_props(
logical_disk, new_physical_disks, free_space_mb)
new_processed_volumes.append(logical_disk)
if new_processed_volumes:
return new_processed_volumes
return logical_disks
def _switch_to_raid_mode(node, controller_fqdd):
"""Convert the controller mode from Enhanced HBA to RAID mode
:param node: an ironic node object
:param controller_fqdd: the ID of the RAID controller.
:returns: a dictionary containing
- The raid_controller key with a ID of the
RAID controller value.
- The is_commit_required needed key with a
boolean value indicating whether a config job must be created
for the values to be applied.
- The is_reboot_required key with a RebootRequired enumerated
value indicating whether the server must be rebooted to
switch the controller mode to RAID.
"""
# wait for pending jobs to complete
drac_job.wait_for_job_completion(node)
raid_attr = "{}:{}".format(controller_fqdd,
_REQUESTED_RAID_CONTROLLER_MODE)
settings = {raid_attr: _RAID_MODE}
settings_results = set_raid_settings(
node, controller_fqdd, settings)
controller = {
'raid_controller': controller_fqdd,
'is_reboot_required': settings_results['is_reboot_required'],
'is_commit_required': settings_results['is_commit_required']}
return controller
def _commit_to_controllers(node, controllers, substep="completed"):
"""Commit changes to RAID controllers on the node.
:param node: an ironic node object
:param controllers: a list of dictionary containing
- The raid_controller key with raid controller
fqdd value indicating on which raid configuration
job needs to be perform.
- The is_commit_required needed key with a
boolean value indicating whether a config job must
be created.
- The is_reboot_required key with a RebootRequired
enumerated value indicating whether the server must
be rebooted only if raid controller does not support
realtime.
:param substep: contain sub cleaning or deploy step which executes any raid
configuration job if set after cleaning or deploy step.
(default to completed)
:returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment) if
configuration is in progress asynchronously or None if it is
completed.
"""
# remove controller which does not require configuration job
controllers = [controller for controller in controllers
if controller['is_commit_required']]
if not controllers:
LOG.debug('No changes on any of the controllers on node %s',
node.uuid)
driver_internal_info = node.driver_internal_info
driver_internal_info['raid_config_substep'] = substep
driver_internal_info['raid_config_parameters'] = []
node.driver_internal_info = driver_internal_info
node.save()
return
driver_internal_info = node.driver_internal_info
driver_internal_info['raid_config_substep'] = substep
driver_internal_info['raid_config_parameters'] = []
if 'raid_config_job_ids' not in driver_internal_info:
driver_internal_info['raid_config_job_ids'] = []
optional = drac_constants.RebootRequired.optional
# all realtime controllers
all_realtime = all(
(cntlr['is_reboot_required'] == optional)
and not(cntlr.get('is_ehba_mode'))
for cntlr in controllers)
# check any controller with ehba mode
any_ehba_controllers = any(
cntrl.get('is_ehba_mode') is True for cntrl in controllers)
raid_config_job_ids = []
raid_config_parameters = []
if all_realtime:
for controller in controllers:
realtime_controller = controller['raid_controller']
job_details = _create_config_job(
node, controller=realtime_controller,
reboot=False, realtime=True,
raid_config_job_ids=raid_config_job_ids,
raid_config_parameters=raid_config_parameters)
elif any_ehba_controllers:
commit_to_ehba_controllers = []
for controller in controllers:
if controller.get('is_ehba_mode'):
job_details = _create_config_job(
node, controller=controller['raid_controller'],
reboot=False, realtime=True,
raid_config_job_ids=raid_config_job_ids,
raid_config_parameters=raid_config_parameters)
ehba_controller = _switch_to_raid_mode(
node, controller['raid_controller'])
commit_to_ehba_controllers.append(
ehba_controller['raid_controller'])
else:
job_details = _create_config_job(
node, controller=controller['raid_controller'],
reboot=False, realtime=False,
raid_config_job_ids=raid_config_job_ids,
raid_config_parameters=raid_config_parameters)
for controller in commit_to_ehba_controllers:
LOG.debug("Create job with Reboot to apply configuration "
"changes for ehba controllers")
job_details = _create_config_job(
node, controller=controller,
reboot=(controller == commit_to_ehba_controllers[-1]),
realtime=False, raid_config_job_ids=raid_config_job_ids,
raid_config_parameters=raid_config_parameters)
else:
for controller in controllers:
mix_controller = controller['raid_controller']
reboot = (controller == controllers[-1])
job_details = _create_config_job(
node, controller=mix_controller,
reboot=reboot, realtime=False,
raid_config_job_ids=raid_config_job_ids,
raid_config_parameters=raid_config_parameters)
driver_internal_info['raid_config_job_ids'].extend(job_details[
'raid_config_job_ids'])
driver_internal_info['raid_config_parameters'].extend(job_details[
'raid_config_parameters'])
node.driver_internal_info = driver_internal_info
# Signal whether the node has been rebooted, that we do not need to execute
# the step again, and that this completion of this step is triggered
# through async polling.
# NOTE(mgoddard): set_async_step_flags calls node.save().
deploy_utils.set_async_step_flags(
node,
reboot=not all_realtime,
skip_current_step=True,
polling=True)
return deploy_utils.get_async_step_return_state(node)
def _create_virtual_disks(task, node):
logical_disks_to_create = node.driver_internal_info[
'logical_disks_to_create']
# Check valid properties attached to voiume after drives conversion
isVolValidationNeeded = node.driver_internal_info[
'volume_validation']
if isVolValidationNeeded:
logical_disks_to_create = _validate_volume_size(
node, logical_disks_to_create)
controllers = list()
for logical_disk in logical_disks_to_create:
controller = dict()
controller_cap = create_virtual_disk(
node,
raid_controller=logical_disk['controller'],
physical_disks=logical_disk['physical_disks'],
raid_level=logical_disk['raid_level'],
size_mb=logical_disk['size_mb'],
disk_name=logical_disk.get('name'),
span_length=logical_disk.get('span_length'),
span_depth=logical_disk.get('span_depth'))
controller['raid_controller'] = logical_disk['controller']
controller['is_reboot_required'] = controller_cap[
'is_reboot_required']
controller['is_commit_required'] = controller_cap[
'is_commit_required']
if controller not in controllers:
controllers.append(controller)
return _commit_to_controllers(node, controllers)
def _controller_in_hba_mode(raid_settings, controller_fqdd):
controller_mode = raid_settings.get(
'{}:{}'.format(controller_fqdd, _CURRENT_RAID_CONTROLLER_MODE))
return _EHBA_MODE in controller_mode.current_value
def _controller_supports_ehba_mode(settings, controller_fqdd):
raid_cntrl_attr = "{}:{}".format(controller_fqdd,
_CURRENT_RAID_CONTROLLER_MODE)
current_cntrl_mode = settings.get(raid_cntrl_attr)
if not current_cntrl_mode:
return False
else:
return _EHBA_MODE in current_cntrl_mode.possible_values
def _get_disk_free_size_mb(disk, pending_delete):
"""Return the size of free space on the disk in MB.
:param disk: a PhysicalDisk object.
:param pending_delete: Whether there is a pending deletion of all virtual
disks.
"""
return disk.size_mb if pending_delete else disk.free_size_mb
def _wait_till_realtime_ready(task):
"""Waits till real time operations are ready to be executed.
Useful for RAID operations where almost all controllers support
real time configuration, but controllers might not be ready for
it by the time IPA starts executing steps. It can take minute or
bit more to be ready for real time configuration.
:param task: TaskManager object containing the node.
:raises RedfishError: If can't find OEM extension or it fails to
execute
"""
try:
_retry_till_realtime_ready(task)
except tenacity.RetryError:
LOG.debug('Retries exceeded while waiting for real-time ready '
'for node %(node)s. Will proceed with out real-time '
'ready state', {'node': task.node.uuid})
@tenacity.retry(
stop=(tenacity.stop_after_attempt(30)),
wait=tenacity.wait_fixed(10),
retry=tenacity.retry_if_result(lambda result: not result))
def _retry_till_realtime_ready(task):
"""Retries till real time operations are ready to be executed.
:param task: TaskManager object containing the node.
:raises RedfishError: If can't find OEM extension or it fails to
execute
:raises RetryError: If retries exceeded and still not ready for real-time
"""
return _is_realtime_ready(task)
def _is_realtime_ready(task):
"""Gets is real time ready status
Uses sushy-oem-idrac extension.
:param task: TaskManager object containing the node.
:returns: True, if real time operations are ready, otherwise False.
:raises RedfishError: If can't find OEM extension or it fails to
execute
"""
system = redfish_utils.get_system(task.node)
for manager in system.managers:
try:
manager_oem = manager.get_oem_extension('Dell')
except sushy.exceptions.OEMExtensionNotFoundError as e:
error_msg = (_("Search for Sushy OEM extension Python package "
"'sushy-oem-idrac' failed for node %(node)s. "
"Ensure it is installed. Error: %(error)s") %
{'node': task.node.uuid, 'error': e})
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
try:
return manager_oem.lifecycle_service.is_realtime_ready()
except sushy.exceptions.SushyError as e:
LOG.debug("Failed to get real time ready status with system "
"%(system)s manager %(manager)s for node %(node)s. Will "
"try next manager, if available. Error: %(error)s",
{'system': system.uuid if system.uuid else
system.identity,
'manager': manager.uuid if manager.uuid else
manager.identity,
'node': task.node.uuid,
'error': e})
continue
break
else:
error_msg = (_("iDRAC Redfish get real time ready status failed for "
"node %(node)s, because system %(system)s has no "
"manager%(no_manager)s.") %
{'node': task.node.uuid,
'system': system.uuid if system.uuid else
system.identity,
'no_manager': '' if not system.managers else
' which could'})
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
class DracRedfishRAID(redfish_raid.RedfishRAID):
"""iDRAC Redfish interface for RAID related actions.
Includes iDRAC specific adjustments for RAID related actions.
"""
@base.clean_step(priority=0, abortable=False, argsinfo={
'create_root_volume': {
'description': (
'This specifies whether to create the root volume. '
'Defaults to `True`.'
),
'required': False
},
'create_nonroot_volumes': {
'description': (
'This specifies whether to create the non-root volumes. '
'Defaults to `True`.'
),
'required': False
},
'delete_existing': {
'description': (
'Setting this to `True` indicates to delete existing RAID '
'configuration prior to creating the new configuration. '
'Default value is `False`.'
),
'required': False,
}
})
def create_configuration(self, task, create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=False):
"""Create RAID configuration on the node.
This method creates the RAID configuration as read from
node.target_raid_config. This method
by default will create all logical disks.
:param task: TaskManager object containing the node.
:param create_root_volume: Setting this to False indicates
not to create root volume that is specified in the node's
target_raid_config. Default value is True.
:param create_nonroot_volumes: Setting this to False indicates
not to create non-root volumes (all except the root volume) in
the node's target_raid_config. Default value is True.
:param delete_existing: Setting this to True indicates to delete RAID
configuration prior to creating the new configuration. Default is
False.
:returns: states.CLEANWAIT if RAID configuration is in progress
asynchronously or None if it is complete.
:raises: RedfishError if there is an error creating the configuration
"""
_wait_till_realtime_ready(task)
return super(DracRedfishRAID, self).create_configuration(
task, create_root_volume, create_nonroot_volumes,
delete_existing)
@base.clean_step(priority=0)
@base.deploy_step(priority=0)
def delete_configuration(self, task):
"""Delete RAID configuration on the node.
:param task: TaskManager object containing the node.
:returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment)
if deletion is in progress asynchronously or None if it is
complete.
"""
_wait_till_realtime_ready(task)
return super(DracRedfishRAID, self).delete_configuration(task)
def _validate_vendor(self, task):
pass # for now assume idrac-redfish is used with iDRAC BMC, thus pass
class DracWSManRAID(base.RAIDInterface):
def get_properties(self):
"""Return the properties of the interface."""
return drac_common.COMMON_PROPERTIES
@base.deploy_step(priority=0,
argsinfo=base.RAID_APPLY_CONFIGURATION_ARGSINFO)
def apply_configuration(self, task, raid_config, create_root_volume=True,
create_nonroot_volumes=False,
delete_existing=True):
return super(DracWSManRAID, self).apply_configuration(
task, raid_config, create_root_volume=create_root_volume,
create_nonroot_volumes=create_nonroot_volumes,
delete_existing=delete_existing)
@METRICS.timer('DracRAID.create_configuration')
@base.clean_step(priority=0, abortable=False, argsinfo={
'create_root_volume': {
'description': (
'This specifies whether to create the root volume. '
'Defaults to `True`.'
),
'required': False
},
'create_nonroot_volumes': {
'description': (
'This specifies whether to create the non-root volumes. '
'Defaults to `True`.'
),
'required': False
},
"delete_existing": {
"description": (
"Setting this to 'True' indicates to delete existing RAID "
"configuration prior to creating the new configuration. "
"Default value is 'False'."
),
"required": False,
}
})
def create_configuration(self, task,
create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=False):
"""Create the RAID configuration.
This method creates the RAID configuration on the given node.
:param task: a TaskManager instance containing the node to act on.
:param create_root_volume: If True, a root volume is created
during RAID configuration. Otherwise, no root volume is
created. Default is True.
:param create_nonroot_volumes: If True, non-root volumes are
created. If False, no non-root volumes are created. Default
is True.
:param delete_existing: Setting this to True indicates to delete RAID
configuration prior to creating the new configuration. Default is
False.
:returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment)
if creation is in progress asynchronously or None if it is
completed.
:raises: MissingParameterValue, if node.target_raid_config is missing
or empty.
:raises: DracOperationError on an error from python-dracclient.
"""
node = task.node
logical_disks = node.target_raid_config['logical_disks']
for disk in logical_disks:
if disk['size_gb'] == 'MAX' and 'physical_disks' not in disk:
raise exception.InvalidParameterValue(
_("create_configuration called with invalid "
"target_raid_configuration for node %(node_id)s. "
"'physical_disks' is missing from logical_disk while "
"'size_gb'='MAX' was requested: "
"%(logical_disk)s") % {'node_id': node.uuid,
'logical_disk': disk})
if disk['size_gb'] == 'MAX':
disk['size_mb'] = 'MAX'
else:
disk['size_mb'] = disk['size_gb'] * units.Ki
del disk['size_gb']
if delete_existing:
self._delete_configuration_no_commit(task)
physical_disks = list_physical_disks(node)
logical_disks = _find_configuration(logical_disks, physical_disks,
pending_delete=delete_existing)
logical_disks_to_create = _filter_logical_disks(
logical_disks, create_root_volume, create_nonroot_volumes)
controllers_to_physical_disk_ids = defaultdict(list)
for logical_disk in logical_disks_to_create:
# Not applicable to JBOD logical disks.
if logical_disk['raid_level'] == 'JBOD':
continue
for physical_disk_name in logical_disk['physical_disks']:
controllers_to_physical_disk_ids[
logical_disk['controller']].append(
physical_disk_name)
# adding logical_disks to driver_internal_info to create virtual disks
driver_internal_info = node.driver_internal_info
driver_internal_info[
"logical_disks_to_create"] = logical_disks_to_create
commit_results = None
if logical_disks_to_create:
LOG.debug(
"Converting physical disks configured to back RAID "
"logical disks to RAID mode for node %(node_uuid)s ",
{"node_uuid": node.uuid})
raid_mode = drac_constants.RaidStatus.raid
commit_results = _change_physical_disk_mode(
node, raid_mode,
controllers_to_physical_disk_ids,
substep="create_virtual_disks")
volume_validation = True if commit_results else False
driver_internal_info['volume_validation'] = volume_validation
node.driver_internal_info = driver_internal_info
node.save()
if commit_results:
return commit_results
else:
LOG.debug("Controller does not support drives conversion "
"so creating virtual disks")
return _create_virtual_disks(task, node)
@METRICS.timer('DracRAID.delete_configuration')
@base.clean_step(priority=0)
@base.deploy_step(priority=0)
def delete_configuration(self, task):
"""Delete the RAID configuration.
:param task: a TaskManager instance containing the node to act on.
:returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment)
if deletion is in progress asynchronously or None if it is
completed.
:raises: DracOperationError on an error from python-dracclient.
"""
controllers = self._delete_configuration_no_commit(task)
return _commit_to_controllers(task.node, controllers,
substep="delete_foreign_config")
@METRICS.timer('DracRAID.get_logical_disks')
def get_logical_disks(self, task):
"""Get the RAID configuration of the node.
:param task: a TaskManager instance containing the node to act on.
:returns: A dictionary of properties.
:raises: DracOperationError on an error from python-dracclient.
"""
node = task.node
logical_disks = []
for disk in list_virtual_disks(node):
logical_disk = {
'id': disk.id,
'controller': disk.controller,
'size_gb': int(disk.size_mb / units.Ki),
'raid_level': disk.raid_level
}
if disk.name is not None:
logical_disk['name'] = disk.name
logical_disks.append(logical_disk)
return {'logical_disks': logical_disks}
@METRICS.timer('DracRAID._query_raid_config_job_status')
@periodics.periodic(
spacing=CONF.drac.query_raid_config_job_status_interval)
def _query_raid_config_job_status(self, manager, context):
"""Periodic task to check the progress of running RAID config jobs."""
filters = {'reserved': False, 'maintenance': False}
fields = ['driver_internal_info']
node_list = manager.iter_nodes(fields=fields, filters=filters)
for (node_uuid, driver, conductor_group,
driver_internal_info) in node_list:
try:
lock_purpose = 'checking async raid configuration jobs'
with task_manager.acquire(context, node_uuid,
purpose=lock_purpose,
shared=True) as task:
if not isinstance(task.driver.raid, DracWSManRAID):
continue
job_ids = driver_internal_info.get('raid_config_job_ids')
if not job_ids:
continue
self._check_node_raid_jobs(task)
except exception.NodeNotFound:
LOG.info("During query_raid_config_job_status, node "
"%(node)s was not found and presumed deleted by "
"another process.", {'node': node_uuid})
except exception.NodeLocked:
LOG.info("During query_raid_config_job_status, node "
"%(node)s was already locked by another process. "
"Skip.", {'node': node_uuid})
@METRICS.timer('DracRAID._check_node_raid_jobs')
def _check_node_raid_jobs(self, task):
"""Check the progress of running RAID config jobs of a node."""
node = task.node
raid_config_job_ids = node.driver_internal_info['raid_config_job_ids']
finished_job_ids = []
for config_job_id in raid_config_job_ids:
config_job = drac_job.get_job(node, job_id=config_job_id)
if config_job is None or config_job.status == 'Completed':
finished_job_ids.append(config_job_id)
elif config_job.status == 'Failed':
finished_job_ids.append(config_job_id)
self._set_raid_config_job_failure(node)
if not finished_job_ids:
return
task.upgrade_lock()
self._delete_cached_config_job_id(node, finished_job_ids)
if not node.driver_internal_info.get('raid_config_job_failure',
False):
if 'raid_config_substep' in node.driver_internal_info:
substep = node.driver_internal_info['raid_config_substep']
if substep == 'delete_foreign_config':
foreign_drives = self._execute_foreign_drives(task, node)
if foreign_drives is None:
return self._convert_drives(task, node)
elif substep == 'physical_disk_conversion':
self._convert_drives(task, node)
elif substep == "create_virtual_disks":
return _create_virtual_disks(task, node)
elif substep == 'completed':
self._complete_raid_substep(task, node)
else:
self._complete_raid_substep(task, node)
else:
self._clear_raid_substep(node)
self._clear_raid_config_job_failure(node)
self._set_failed(task, config_job)
def _execute_foreign_drives(self, task, node):
controllers = list()
jobs_required = False
for controller_id in node.driver_internal_info[
'raid_config_parameters']:
controller_cap = clear_foreign_config(
node, controller_id)
controller = {
'raid_controller': controller_id,
'is_reboot_required': controller_cap['is_reboot_required'],
'is_commit_required': controller_cap['is_commit_required']}
controllers.append(controller)
jobs_required = jobs_required or controller_cap[
'is_commit_required']
if not jobs_required:
LOG.info(
"No foreign drives detected, so "
"resume %s", "cleaning" if node.clean_step else "deployment")
return None
else:
return _commit_to_controllers(
node,
controllers,
substep='physical_disk_conversion')
def _complete_raid_substep(self, task, node):
self._clear_raid_substep(node)
self._resume(task)
def _convert_drives(self, task, node):
jbod = drac_constants.RaidStatus.jbod
drives_results = _change_physical_disk_mode(
node, mode=jbod)
if drives_results is None:
LOG.debug("Controller does not support drives "
"conversion on %(node_uuid)s",
{'node_uuid': node.uuid})
self._complete_raid_substep(task, node)
def _clear_raid_substep(self, node):
driver_internal_info = node.driver_internal_info
driver_internal_info.pop('raid_config_substep', None)
driver_internal_info.pop('raid_config_parameters', None)
node.driver_internal_info = driver_internal_info
node.save()
def _set_raid_config_job_failure(self, node):
driver_internal_info = node.driver_internal_info
driver_internal_info['raid_config_job_failure'] = True
node.driver_internal_info = driver_internal_info
node.save()
def _clear_raid_config_job_failure(self, node):
driver_internal_info = node.driver_internal_info
del driver_internal_info['raid_config_job_failure']
node.driver_internal_info = driver_internal_info
node.save()
def _delete_cached_config_job_id(self, node, finished_config_job_ids=None):
if finished_config_job_ids is None:
finished_config_job_ids = []
driver_internal_info = node.driver_internal_info
unfinished_job_ids = [job_id for job_id
in driver_internal_info['raid_config_job_ids']
if job_id not in finished_config_job_ids]
driver_internal_info['raid_config_job_ids'] = unfinished_job_ids
node.driver_internal_info = driver_internal_info
node.save()
def _set_failed(self, task, config_job):
error_msg = (_("Failed config job: %(config_job_id)s. "
"Message: '%(message)s'.") %
{'config_job_id': config_job.id,
'message': config_job.message})
log_msg = ("RAID configuration job failed for node %(node)s. "
"%(error)s" %
{'node': task.node.uuid, 'error': error_msg})
if task.node.clean_step:
manager_utils.cleaning_error_handler(task, error_msg)
else:
manager_utils.deploying_error_handler(task, log_msg, error_msg)
def _resume(self, task):
raid_common.update_raid_info(
task.node, self.get_logical_disks(task))
if task.node.clean_step:
manager_utils.notify_conductor_resume_clean(task)
else:
manager_utils.notify_conductor_resume_deploy(task)
def _delete_configuration_no_commit(self, task):
"""Delete existing RAID configuration without committing the change.
:param task: A TaskManager instance.
:returns: A set of names of RAID controllers which need RAID changes to
be committed.
"""
node = task.node
controllers = list()
drac_raid_controllers = list_raid_controllers(node)
drac_raid_settings = list_raid_settings(node)
for cntrl in drac_raid_controllers:
if _is_raid_controller(node, cntrl.id, drac_raid_controllers):
controller = dict()
if _controller_supports_ehba_mode(
drac_raid_settings,
cntrl.id) and _controller_in_hba_mode(
drac_raid_settings, cntrl.id):
controller['is_ehba_mode'] = True
controller_cap = _reset_raid_config(node, cntrl.id)
controller["raid_controller"] = cntrl.id
controller["is_reboot_required"] = controller_cap[
"is_reboot_required"]
controller["is_commit_required"] = controller_cap[
"is_commit_required"]
controllers.append(controller)
return controllers
class DracRAID(DracWSManRAID):
"""Class alias of class DracWSManRAID.
This class provides ongoing support of the deprecated 'idrac' RAID
interface implementation entrypoint.
All bug fixes and new features should be implemented in its base
class, DracWSManRAID. That makes them available to both the
deprecated 'idrac' and new 'idrac-wsman' entrypoints. Such changes
should not be made to this class.
"""
def __init__(self):
super(DracRAID, self).__init__()
LOG.warning("RAID interface 'idrac' is deprecated and may be removed "
"in a future release. Use 'idrac-wsman' instead.")
| 40.96924 | 79 | 0.634424 |
02ed0e67c34d55f70445f5dbaa89364940472aba | 50 | py | Python | war3observer/__init__.py | sides/war3observer | 898981ec73ba033f5f705098e13dd7f7de1f34e5 | [
"MIT"
] | 26 | 2019-12-15T11:10:19.000Z | 2022-02-24T12:45:45.000Z | war3observer/__init__.py | warlockbrawl/war3observer | 898981ec73ba033f5f705098e13dd7f7de1f34e5 | [
"MIT"
] | 9 | 2019-01-24T12:33:24.000Z | 2019-11-09T00:37:04.000Z | war3observer/__init__.py | warlockbrawl/war3observer | 898981ec73ba033f5f705098e13dd7f7de1f34e5 | [
"MIT"
] | 6 | 2019-01-12T18:14:52.000Z | 2019-10-12T10:19:04.000Z | __version__ = '0.9.0'
from .server import Server
| 12.5 | 26 | 0.72 |
3f7551930cf8a20062a20e8b7448ce8c2f087f55 | 14,509 | py | Python | notion/store.py | yashgorana/notion-py | ef6d2ae58e50c2b3c95fa1e15250086db29e2400 | [
"MIT"
] | null | null | null | notion/store.py | yashgorana/notion-py | ef6d2ae58e50c2b3c95fa1e15250086db29e2400 | [
"MIT"
] | null | null | null | notion/store.py | yashgorana/notion-py | ef6d2ae58e50c2b3c95fa1e15250086db29e2400 | [
"MIT"
] | null | null | null | import datetime
import json
import threading
import uuid
from collections import defaultdict
from copy import deepcopy
from dictdiffer import diff
from inspect import signature
from threading import Lock
from pathlib import Path
from tzlocal import get_localzone
from .logger import logger
from .settings import CACHE_DIR
from .utils import extract_id
class MissingClass(object):
def __bool__(self):
return False
Missing = MissingClass()
class Callback(object):
def __init__(
self, callback, record, callback_id=None, extra_kwargs={}, watch_children=True
):
self.callback = callback
self.record = record
self.callback_id = callback_id or str(uuid.uuid4())
self.extra_kwargs = extra_kwargs
def __call__(self, difference, old_val, new_val):
kwargs = {}
kwargs.update(self.extra_kwargs)
kwargs["record"] = self.record
kwargs["callback_id"] = self.callback_id
kwargs["difference"] = difference
kwargs["changes"] = self.record._convert_diff_to_changelist(
difference, old_val, new_val
)
logger.debug("Firing callback {} with kwargs: {}".format(self.callback, kwargs))
# trim down the parameters we'll be passing, to include only those the callback will accept
params = signature(self.callback).parameters
if not any(["**" in str(param) for param in params.values()]):
# there's no "**kwargs" in the callback signature, so remove any unaccepted params
for arg in list(kwargs.keys()):
if arg not in params:
del kwargs[arg]
# perform the callback, gracefully handling any exceptions
try:
# trigger the callback within its own thread, so it won't block others if it's long-running
threading.Thread(target=self.callback, kwargs=kwargs, daemon=True).start()
except Exception as e:
logger.error(
"Error while processing callback for {}: {}".format(
repr(self.record), repr(e)
)
)
def __eq__(self, val):
if isinstance(val, str):
return self.callback_id.startswith(val)
elif isinstance(val, Callback):
return self.callback_id == val.callback_id
else:
return False
class RecordStore(object):
def __init__(self, client, cache_key=None):
self._mutex = Lock()
self._client = client
self._cache_key = cache_key
self._values = defaultdict(lambda: defaultdict(dict))
self._role = defaultdict(lambda: defaultdict(str))
self._collection_row_ids = {}
self._callbacks = defaultdict(lambda: defaultdict(list))
self._records_to_refresh = {}
self._pages_to_refresh = []
with self._mutex:
self._load_cache()
def _get(self, table, id):
return self._values[table].get(id, Missing)
def add_callback(self, record, callback, callback_id=None, extra_kwargs={}):
assert callable(
callback
), "The callback must be a 'callable' object, such as a function."
self.remove_callbacks(record._table, record.id, callback_id)
callback_obj = Callback(
callback, record, callback_id=callback_id, extra_kwargs=extra_kwargs
)
self._callbacks[record._table][record.id].append(callback_obj)
return callback_obj
def remove_callbacks(self, table, id, callback_or_callback_id_prefix=""):
"""
Remove all callbacks for the record specified by `table` and `id` that have a callback_id
starting with the string `callback_or_callback_id_prefix`, or are equal to the provided callback.
"""
if callback_or_callback_id_prefix is None:
return
callbacks = self._callbacks[table][id]
while callback_or_callback_id_prefix in callbacks:
callbacks.remove(callback_or_callback_id_prefix)
def _get_cache_path(self, attribute):
return str(
Path(CACHE_DIR).joinpath("{}{}.json".format(self._cache_key, attribute))
)
def _load_cache(self, attributes=("_values", "_role", "_collection_row_ids")):
if not self._cache_key:
return
for attr in attributes:
try:
with open(self._get_cache_path(attr)) as f:
if attr == "_collection_row_ids":
self._collection_row_ids.update(json.load(f))
else:
for k, v in json.load(f).items():
getattr(self, attr)[k].update(v)
except (FileNotFoundError, ValueError):
pass
def set_collection_rows(self, collection_id, row_ids):
if collection_id in self._collection_row_ids:
old_ids = set(self._collection_row_ids[collection_id])
new_ids = set(row_ids)
added = new_ids - old_ids
removed = old_ids - new_ids
for id in added:
self._trigger_callbacks(
"collection",
collection_id,
[("row_added", "rows", id)],
old_ids,
new_ids,
)
for id in removed:
self._trigger_callbacks(
"collection",
collection_id,
[("row_removed", "rows", id)],
old_ids,
new_ids,
)
self._collection_row_ids[collection_id] = row_ids
self._save_cache("_collection_row_ids")
def get_collection_rows(self, collection_id):
return self._collection_row_ids.get(collection_id, [])
def _save_cache(self, attribute):
if not self._cache_key:
return
with open(self._get_cache_path(attribute), "w") as f:
json.dump(getattr(self, attribute), f)
def _trigger_callbacks(self, table, id, difference, old_val, new_val):
for callback_obj in self._callbacks[table][id]:
callback_obj(difference, old_val, new_val)
def get_role(self, table, id, force_refresh=False):
self.get(table, id, force_refresh=force_refresh)
return self._role[table].get(id, None)
def get(self, table, id, force_refresh=False):
id = extract_id(id)
# look up the record in the current local dataset
result = self._get(table, id)
# if it's not found, try refreshing the record from the server
if result is Missing or force_refresh:
if table == "block":
self.call_load_page_chunk(id)
else:
self.call_get_record_values(**{table: id})
result = self._get(table, id)
return result if result is not Missing else None
def _update_record(self, table, id, value=None, role=None):
callback_queue = []
with self._mutex:
if role:
logger.debug("Updating 'role' for {}/{} to {}".format(table, id, role))
self._role[table][id] = role
self._save_cache("_role")
if value:
logger.debug(
"Updating 'value' for {}/{} to {}".format(table, id, value)
)
old_val = self._values[table][id]
difference = list(
diff(
old_val,
value,
ignore=["version", "last_edited_time", "last_edited_by"],
expand=True,
)
)
self._values[table][id] = value
self._save_cache("_values")
if old_val and difference:
logger.debug("Value changed! Difference: {}".format(difference))
callback_queue.append((table, id, difference, old_val, value))
# run callbacks outside the mutex to avoid lockups
for cb in callback_queue:
self._trigger_callbacks(*cb)
def call_get_record_values(self, **kwargs):
"""
Call the server's getRecordValues endpoint to update the local record store. The keyword arguments map
table names into lists of (or singular) record IDs to load for that table. Use True to refresh all known
records for that table.
"""
requestlist = []
for table, ids in kwargs.items():
# ensure "ids" is a proper list
if ids is True:
ids = list(self._values.get(table, {}).keys())
if isinstance(ids, str):
ids = [ids]
# if we're in a transaction, add the requested IDs to a queue to refresh when the transaction completes
if self._client.in_transaction():
self._records_to_refresh[table] = list(
set(self._records_to_refresh.get(table, []) + ids)
)
continue
requestlist += [{"table": table, "id": extract_id(id)} for id in ids]
if requestlist:
logger.debug(
"Calling 'getRecordValues' endpoint for requests: {}".format(
requestlist
)
)
results = self._client.post(
"getRecordValues", {"requests": requestlist}
).json()["results"]
for request, result in zip(requestlist, results):
self._update_record(
request["table"],
request["id"],
value=result.get("value"),
role=result.get("role"),
)
def get_current_version(self, table, id):
values = self._get(table, id)
if values and "version" in values:
return values["version"]
else:
return -1
def call_load_page_chunk(self, page_id):
if self._client.in_transaction():
self._pages_to_refresh.append(page_id)
return
data = {
"page": {"id": page_id},
"limit": 100,
"cursor": {"stack": []},
"chunkNumber": 0,
"verticalColumns": False,
}
recordmap = self._client.post("loadPageChunk", data).json()["recordMap"]
self.store_recordmap(recordmap)
def store_recordmap(self, recordmap):
for table, records in recordmap.items():
if not isinstance(records, dict):
continue
for id, record in records.items():
if not isinstance(record, dict):
continue
self._update_record(
table, id, value=record.get("value"), role=record.get("role")
)
def call_query_collection(
self,
collection_id,
collection_view_id,
search="",
type="table",
aggregate=[],
aggregations=[],
filter={},
sort=[],
calendar_by="",
group_by="",
):
assert not (
aggregate and aggregations
), "Use only one of `aggregate` or `aggregations` (old vs new format)"
# convert singletons into lists if needed
if isinstance(aggregate, dict):
aggregate = [aggregate]
if isinstance(sort, dict):
sort = [sort]
data = {
"collectionId": collection_id,
"collectionViewId": collection_view_id,
"loader": {
"limit": 10000,
"loadContentCover": True,
"searchQuery": search,
"userLocale": "en",
"userTimeZone": str(get_localzone()),
"type": type,
},
"query": {
"aggregate": aggregate,
"aggregations": aggregations,
"filter": filter,
"sort": sort,
},
}
response = self._client.post("queryCollection", data).json()
self.store_recordmap(response["recordMap"])
return response["result"]
def handle_post_transaction_refreshing(self):
for block_id in self._pages_to_refresh:
self.call_load_page_chunk(block_id)
self._pages_to_refresh = []
self.call_get_record_values(**self._records_to_refresh)
self._records_to_refresh = {}
def run_local_operations(self, operations):
"""
Called to simulate the results of running the operations on the server, to keep the record store in sync
even when we haven't completed a refresh (or we did a refresh but the database hadn't actually updated yet...)
"""
for operation in operations:
self.run_local_operation(**operation)
def run_local_operation(self, table, id, path, command, args):
with self._mutex:
path = deepcopy(path)
new_val = deepcopy(self._values[table][id])
ref = new_val
# loop and descend down the path until it's consumed, or if we're doing a "set", there's one key left
while (len(path) > 1) or (path and command != "set"):
comp = path.pop(0)
if comp not in ref:
ref[comp] = [] if "list" in command else {}
ref = ref[comp]
if command == "update":
assert isinstance(ref, dict)
ref.update(args)
elif command == "set":
assert isinstance(ref, dict)
if path:
ref[path[0]] = args
else:
# this is the case of "setting the top level" (i.e. creating a record)
ref.clear()
ref.update(args)
elif command == "listAfter":
assert isinstance(ref, list)
if "after" in args:
ref.insert(ref.index(args["after"]) + 1, args["id"])
else:
ref.append(args["id"])
elif command == "listBefore":
assert isinstance(ref, list)
if "before" in args:
ref.insert(ref.index(args["before"]), args["id"])
else:
ref.insert(0, args["id"])
elif command == "listRemove":
try:
ref.remove(args["id"])
except ValueError:
pass
self._update_record(table, id, value=new_val)
| 35.216019 | 118 | 0.558067 |
b4f729116df73c24897dbd9b10c0de978895753d | 5,119 | py | Python | nuitka/build/SconsHacks.py | zu3st/Nuitka | 41d002085dbd23b15d90b71b5cfbb51670402b16 | [
"Apache-2.0"
] | null | null | null | nuitka/build/SconsHacks.py | zu3st/Nuitka | 41d002085dbd23b15d90b71b5cfbb51670402b16 | [
"Apache-2.0"
] | null | null | null | nuitka/build/SconsHacks.py | zu3st/Nuitka | 41d002085dbd23b15d90b71b5cfbb51670402b16 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Hacks for scons that we apply.
We blacklist some tools from the standard scan, there is e.g. no need to ask
what fortran version we have installed to compile with Nuitka.
Also we hack the gcc version detection to fix some bugs in it, and to avoid
scanning for g++ when we have a gcc installer, but only if that is not too
version.
"""
import os
import re
import subprocess
import SCons.Tool.gcc # pylint: disable=I0021,import-error
from SCons.Script import Environment # pylint: disable=I0021,import-error
from nuitka.Tracing import scons_details_logger
from .SconsUtils import decodeData, getExecutablePath, isGccName
# Cache for detected versions.
v_cache = {}
# Prevent these programs from being found, avoiding the burden of tool init.
blacklisted_tools = (
# TODO: Where the fallback is needed, g++ needs to scanned or else it
# cannot be used.
# "g++",
"c++",
"f95",
"f90",
"f77",
"gfortran",
"ifort",
"javah",
"tar",
"dmd",
"gdc",
"flex",
"bison",
"ranlib",
"ar",
"ldc2",
"pdflatex",
"pdftex",
"latex",
"tex",
"dvipdf",
"dvips",
"gs",
"swig",
"ifl",
"rpcgen",
"rpmbuild",
"bk",
"p4",
"m4",
"ml",
"icc",
"sccs",
"rcs",
"cvs",
"as",
"gas",
"nasm",
)
def _myDetectVersion(env, clvar):
clvar0 = os.path.basename(clvar[0])
if isGccName(clvar0) or "clang" in clvar0:
command = clvar + ("-dumpversion",)
else:
command = clvar + ("--version",)
# pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['-dumpversion'],
pipe = SCons.Action._subproc( # pylint: disable=protected-access
env, command, stdin="devnull", stderr="devnull", stdout=subprocess.PIPE
)
line = pipe.stdout.readline()
# Non-GNU compiler's output (like AIX xlc's) may exceed the stdout buffer:
# So continue with reading to let the child process actually terminate.
while pipe.stdout.readline():
pass
ret = pipe.wait()
if ret != 0:
scons_details_logger.info(
"Error, error exit from %r (%d) gave %r."
% (command, ret, pipe.stderr.read())
)
return None
if str is not bytes and type(line) is bytes:
line = decodeData(line)
line = line.strip()
match = re.findall(r"[0-9]+(?:\.[0-9]+)+", line)
if match:
version = match[0]
else:
# gcc 8 or higher
version = line.strip()
version = tuple(int(part) for part in version.split("."))
return version
# From gcc.py of Scons
def myDetectVersion(env, cc):
"""Return the version of the GNU compiler, or None if it is not a GNU compiler."""
cc = env.subst(cc)
if not cc:
return None
if "++" in os.path.basename(cc):
return None
clvar = list(SCons.Util.CLVar(cc))
# Make path absolute, to improve cache hit rate.
command_path = getExecutablePath(clvar[0], env)
if command_path is not None:
clvar[0] = command_path
clvar = tuple(clvar)
if clvar not in v_cache:
v_cache[clvar] = _myDetectVersion(env, clvar)
scons_details_logger.info("CC %r version check gives %r" % (cc, v_cache[clvar]))
return v_cache[clvar]
def myDetect(self, progs):
# Don't consider Fortran, tar, D, c++, we don't need it. We do manual
# fallback
for blacklisted_tool in blacklisted_tools:
if blacklisted_tool in progs:
return None
return orig_detect(self, progs)
# The original value will be used in our form.
orig_detect = Environment.Detect
def getEnhancedToolDetect():
SCons.Tool.gcc.detect_version = myDetectVersion
return myDetect
def makeGccUseLinkerFile(source_dir, source_files, env):
tmp_linker_filename = os.path.join(source_dir, "@link_input.txt")
env["LINKCOM"] = env["LINKCOM"].replace(
"$SOURCES", "@%s" % env.get("ESCAPE", lambda x: x)(tmp_linker_filename)
)
with open(tmp_linker_filename, "w") as tmpfile:
for filename in source_files:
filename = ".".join(filename.split(".")[:-1]) + ".o"
if os.name == "nt":
filename = filename.replace(os.path.sep, "/")
tmpfile.write('"%s"\n' % filename)
tmpfile.write(env.subst("$SOURCES"))
| 26.117347 | 88 | 0.630787 |
4579996c11ebf106cf3d97a6b0ddcf8773bd986e | 289 | py | Python | leonardo_theme_portfolioitem/__init__.py | dresl/leonardo-theme-portfolioitem | d431f06d1a1c83a054585cb1c207b4d12302aaf6 | [
"Apache-2.0"
] | null | null | null | leonardo_theme_portfolioitem/__init__.py | dresl/leonardo-theme-portfolioitem | d431f06d1a1c83a054585cb1c207b4d12302aaf6 | [
"Apache-2.0"
] | null | null | null | leonardo_theme_portfolioitem/__init__.py | dresl/leonardo-theme-portfolioitem | d431f06d1a1c83a054585cb1c207b4d12302aaf6 | [
"Apache-2.0"
] | null | null | null |
LEONARDO_APPS = [
'leonardo_theme_portfolioitem',
'leonardo_module_analytics',
]
LEONARDO_CSS_FILES = [
'css/animate.css',
'css/fakeLoader.css',
'css/nprogress.css',
]
LEONARDO_JS_FILES = [
'js/fakeLoader.min.js',
'js/portfolio.js',
'js/nprogress.js',
]
| 16.055556 | 35 | 0.650519 |
96ef9173b4431e5f4593119463d368d471582528 | 4,882 | py | Python | doc/tutorials/1_basics/plot_6-mesh-quality-inspection.py | mjziebarth/gimli | 196ac4d6dd67e0326cccc44a87b367f64051e490 | [
"Apache-2.0"
] | 224 | 2015-02-20T21:36:24.000Z | 2022-03-30T07:27:43.000Z | doc/tutorials/1_basics/plot_6-mesh-quality-inspection.py | ivek1312/gimli | 5fafebb7c96dd0e04e2616df402fa27a01609d63 | [
"Apache-2.0"
] | 341 | 2015-05-21T14:39:51.000Z | 2022-03-31T01:54:07.000Z | doc/tutorials/1_basics/plot_6-mesh-quality-inspection.py | ivek1312/gimli | 5fafebb7c96dd0e04e2616df402fa27a01609d63 | [
"Apache-2.0"
] | 107 | 2015-01-24T14:40:21.000Z | 2022-02-25T12:12:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
Quality of unstructured meshes
==============================
**Problem:**
Accurate numerical solutions require high quality meshes. In the case of
unstructured triangular meshes (or tetrahedral meshes in 3D), relatively
large and small angles can lead to discretization errors. Large angles can
cause interpolation errors, while small angles can lead to ill-conditioned
stiffness matrices.
**Identification:**
Some common 2D quality measures are
implemented in :mod:`pygimli.meshtools` and will be used in this tutorial.
In 3D, we recommend to export the mesh in VTK format and inspect mesh
quality with ParaView (Filters -> Alphapetical -> Mesh quality).
**Solution:**
Meshes can be improved by adjusting cell sizes (`area` keyword) and the
minimum allowed angle (`quality` keyword). :term:`Gmsh` and other more
advanced meshing tools also provide powerful mesh optimization algorithms.
However, the numerical accurary may be improved at the expense of increased
cell counts and thus longer computation times.
"""
import matplotlib.pyplot as plt
import numpy as np
import pygimli as pg
from pygimli.meshtools import polytools as plc
from pygimli.meshtools import quality
################################################################################
# We start by creating a mesh with a refined object inside.
world = plc.createWorld(start=[-10, 0], end=[10, -10], marker=1,
worldMarker=False)
c1 = plc.createCircle(pos=[0.0, -5.0], radius=3.0, area=.3)
################################################################################
# When calling the :func:`pg.meshtools.createMesh` function, a quality parameter
# can be forwarded to Triangle, which prescribes the minimum angle allowed in
# the final mesh. We can asses its effectiveness by creating different meshes
# and plotting the resulting quality. For this purpose, we define a showQuality
# function, which also plots a histogram of the mesh qualities.
def showQuality(mesh, qualities):
fig, axes = plt.subplots(1, 2)
axes[1].hist(qualities, color="grey")
pg.show(mesh, qualities, ax=axes[0], cMin=0.5, cMax=1, hold=True,
logScale=False, label="Mesh quality", cmap="RdYlGn", showMesh=True)
s = "Min: %.2f, Mean: %.2f, Max: %.2f" % (
np.min(qualities), np.mean(qualities), np.max(qualities))
axes[1].set_title(s)
axes[1].set_xlabel("Mesh quality")
axes[1].set_ylabel("Frequency")
axes[1].set_xlim(0, 1)
# Figure resizing according to mesh dimesions
x = mesh.xmax() - mesh.xmin()
y = mesh.ymax() - mesh.ymin()
width, height = fig.get_size_inches()
fig.set_figheight(height * 1.3 * (y / x))
fig.tight_layout()
for q in 10, 20, 30:
m = pg.meshtools.createMesh([world, c1], quality=q)
showQuality(m, quality(m))
################################################################################
# Note that there is a decreasing number of problematic triangles (marked in
# red). However, the number of cells is increased siginficantly to achieve this.
################################################################################
# Quality measures
# ----------------
#
# There are numerous measures related to the area/volume, boundary lengths and
# angles of a cell (see [#f1]_ for a review). A straightforward measure
# considers the minimum angle in a triangle (normalized by 60 degrees). More
# sophisticated measures also take into account the cell size. A very common
# measure, often referred to as :math:`\eta`, relates the area of a triangle
# :math:`a` to its edge lengths :math:`l_1,l_2,l_3`.
#
# .. math::
# \eta = \frac{4\sqrt{3}a}{l_1^2 + l_2^2 + l_3^2}
#
# The normalization factor :math:`4\sqrt{3}` ensures that a perfect triangle
# (equal edges) has a quality of 1. A popular measure also applicable for other
# cell types is the *Normalized shape ratio (NSR)*, which relates the
# circumradius :math:`R` to the inradius of cell (:math:`r`).
#
# .. math::
# \rho = \frac{2r}{R}
#
# Again the factor 2 (3 in 3D) ensures that a perfect triangle has a quality of
# 1, whereas a flat triangle would have a quality of 0. The above mentioned
# quality measures are plotted below for the same mesh.
world = plc.createWorld(start=[-5, 0], end=[5, -5], marker=1,
worldMarker=False, area=2.)
c1 = plc.createCircle(pos=[0.0, -2.0], radius=1.0, area=.3)
mesh = pg.meshtools.createMesh([world, c1])
for measure in "minimumAngle", "eta", "nsr":
showQuality(mesh, quality(mesh, measure))
plt.title(measure)
plt.show()
################################################################################
# **References:**
#
# .. [#f1] Field, D. A. (2000), Qualitative measures for initial meshes. Int. J.
# Numer. Meth. Engng., 47: 887–906.
| 39.370968 | 80 | 0.634166 |
a15c77a61dbde8440bd58a32d33ee5ac19361427 | 2,486 | py | Python | lib/rucio/api/scope.py | fno2010/rucio | 47e93cfbe5887071c70de4ba815c1bbdddfac2ce | [
"Apache-2.0"
] | null | null | null | lib/rucio/api/scope.py | fno2010/rucio | 47e93cfbe5887071c70de4ba815c1bbdddfac2ce | [
"Apache-2.0"
] | null | null | null | lib/rucio/api/scope.py | fno2010/rucio | 47e93cfbe5887071c70de4ba815c1bbdddfac2ce | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright CERN since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rucio.api.permission
import rucio.common.exception
from rucio.core import scope as core_scope
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.schema import validate_schema
def list_scopes(filter_={}, vo='def'):
"""
Lists all scopes.
:param filter_: Dictionary of attributes by which the input data should be filtered
:param vo: The VO to act on.
:returns: A list containing all scopes.
"""
# If filter is empty, create a new dict to avoid overwriting the function's default
if not filter_:
filter_ = {}
if 'scope' in filter_:
filter_['scope'] = InternalScope(scope=filter_['scope'], vo=vo)
else:
filter_['scope'] = InternalScope(scope='*', vo=vo)
return [scope.external for scope in core_scope.list_scopes(filter_=filter_)]
def add_scope(scope, account, issuer, vo='def'):
"""
Creates a scope for an account.
:param account: The account name.
:param scope: The scope identifier.
:param issuer: The issuer account.
:param vo: The VO to act on.
"""
validate_schema(name='scope', obj=scope, vo=vo)
kwargs = {'scope': scope, 'account': account}
if not rucio.api.permission.has_permission(issuer=issuer, vo=vo, action='add_scope', kwargs=kwargs):
raise rucio.common.exception.AccessDenied('Account %s can not add scope' % (issuer))
scope = InternalScope(scope, vo=vo)
account = InternalAccount(account, vo=vo)
core_scope.add_scope(scope, account)
def get_scopes(account, vo='def'):
"""
Gets a list of all scopes for an account.
:param account: The account name.
:param vo: The VO to act on.
:returns: A list containing the names of all scopes for this account.
"""
account = InternalAccount(account, vo=vo)
return [scope.external for scope in core_scope.get_scopes(account)]
| 31.468354 | 104 | 0.702333 |
1af77a97dd8ed343480b641737ff20ae7449a0f6 | 1,051 | py | Python | pdfviewer/hoverbutton.py | Project-TRT/Enc-Viewer | 42218945ffef23f27b7b699bcb56b039fe6a189c | [
"MIT"
] | 37 | 2018-11-10T12:00:27.000Z | 2022-03-22T04:03:39.000Z | pdfviewer/hoverbutton.py | Project-TRT/Enc-Viewer | 42218945ffef23f27b7b699bcb56b039fe6a189c | [
"MIT"
] | null | null | null | pdfviewer/hoverbutton.py | Project-TRT/Enc-Viewer | 42218945ffef23f27b7b699bcb56b039fe6a189c | [
"MIT"
] | 20 | 2019-01-07T14:44:24.000Z | 2022-02-02T17:05:34.000Z | from tkinter import *
from PIL import Image, ImageTk
from pdfviewer.tooltip import ToolTip
class HoverButton(Button):
def __init__(self, master, tool_tip=None, image_path=None, keep_pressed=False, **kw):
Button.__init__(self, master=master, **kw)
self.defaultBackground = self["background"]
self.bind("<Enter>", self.on_enter)
self.bind("<Leave>", self.on_leave)
if keep_pressed:
self.bind("<Button-1>", self.on_click)
if image_path:
self.image = ImageTk.PhotoImage(Image.open(image_path))
self.configure(image=self.image)
if tool_tip:
ToolTip(self, text=tool_tip)
def on_click(self, e):
if self['background'] == self.defaultBackground:
self['background'] = self['activebackground']
else:
self['background'] = self.defaultBackground
def on_enter(self, e):
self['background'] = self['activebackground']
def on_leave(self, e):
self['background'] = self.defaultBackground
| 32.84375 | 89 | 0.632731 |
0f5bca7405cbcbfa31cb3fd8bafb27bd83475474 | 7,661 | py | Python | zzr_configs/rotated_blend_mask_add_atten_loss_used_PFN_with_ms_isaid.py | ZZR8066/AerialDetection | 34c732b61d7df9a832a2a072e8b6abbe8031cb07 | [
"Apache-2.0"
] | 6 | 2020-07-30T02:45:35.000Z | 2022-02-08T13:47:26.000Z | zzr_configs/rotated_blend_mask_add_atten_loss_used_PFN_with_ms_isaid.py | ZZR8066/AerialDetection_and_Segmenation | 34c732b61d7df9a832a2a072e8b6abbe8031cb07 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:51:10.000Z | 2021-12-26T22:28:08.000Z | zzr_configs/rotated_blend_mask_add_atten_loss_used_PFN_with_ms_isaid.py | ZZR8066/AerialDetection_and_Segmenation | 34c732b61d7df9a832a2a072e8b6abbe8031cb07 | [
"Apache-2.0"
] | 3 | 2020-11-09T03:11:16.000Z | 2021-11-02T09:30:39.000Z | # model settings
model = dict(
type='RotateBlendMaskRCNN',
# pretrained='/disk2/zzr/resnet50.pth',
pretrained='/disk2/zzr/resnet101.pth',
backbone=dict(
type='ResNet',
# depth=50,
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHeadRbbox',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=16,
target_means=[0., 0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2, 0.1],
reg_class_agnostic=True,
with_module=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
rbbox_roi_extractor=dict(
type='RboxSingleRoIExtractor',
roi_layer=dict(type='RoIAlignRotated', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
rbbox_head = dict(
type='SharedFCBBoxHeadRbbox',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=16,
target_means=[0., 0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1, 0.05],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
basis_head=dict(
type='PFPNHead',
num_levels=4,
num_bases=4,
basis_stride=4,
num_classes=16,
planes=128,
in_channels=256,
segm_loss_weight=0.3
),
atten_rroi_extractor=dict(
type='RboxSingleRoIExtractor',
roi_layer=dict(type='RoIAlignRotated', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
atten_head=dict(
type='AttenFCNLossHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=4,
loss_atten_weight=0.3,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
blender=dict(
type='Blender',
num_classes=16,
base_size=56,
rroi_extractor=dict(
type='RboxSingleRoIExtractor',
roi_layer=dict(type='RoIAlignRotated', out_size=56, sample_num=2),
out_channels=4,
featmap_strides=[2])))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssignerRbbox',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomRbboxSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=56,
expand_scale=1.0,
pos_weight=-1,
debug=False)
])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr = 0.05,
nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.3),
max_per_img = 100,
mask_thr_binary=0.5)
)
# dataset settings
dataset_type = 'iSAIDDataset'
data_root = '/disk2/zzr/dataset_isaid/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=1,
workers_per_gpu=0,
# workers_per_gpu=0,
train=dict(
type=dataset_type,
balance_cat=False,
ann_file=data_root + 'train/instancesonly_filtered_train_useful_standard.json',
img_prefix=data_root + 'train/images/',
img_scale=[(400,400),(600,600),(800, 800),(1000,1000),(1200,1200)],
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=True,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'val/instancesonly_filtered_val_standard.json',
img_prefix=data_root + 'val/images/',
img_scale=(800, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'val/instancesonly_filtered_val_standard.json',
img_prefix=data_root + 'val/images/',
# ann_file=data_root + 'test/test_info.json',
# img_prefix=data_root + 'test/images/',
img_scale=(800, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.00125, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 22])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = '/disk2/zzr/work_dirs/rotated_blend_mask_rcnn_r101_isaid_num_bases_4_add_atten_loss_used_PFPN_with_ms'
load_from = None
resume_from = None
workflow = [('train', 1)] | 31.142276 | 113 | 0.575773 |
a4bbd88c20e5ea0428d3c43d868f228a325854e7 | 2,748 | py | Python | missile.py | teepster/thaipy_march2022 | bf0a09ac6baa0f74b70b762e8890af6ef3375538 | [
"MIT"
] | null | null | null | missile.py | teepster/thaipy_march2022 | bf0a09ac6baa0f74b70b762e8890af6ef3375538 | [
"MIT"
] | null | null | null | missile.py | teepster/thaipy_march2022 | bf0a09ac6baa0f74b70b762e8890af6ef3375538 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pygame
import math
def blit_rotate(image, pos, originPos, angle):
# https://stackoverflow.com/questions/59909942/how-can-you-rotate-an-image-around-an-off-center-pivot-in-pygame
image_rect = image.get_rect(topleft = (pos[0] - originPos[0], pos[1]-originPos[1]))
offset_center_to_pivot = pygame.math.Vector2(pos) - image_rect.center
rotated_offset = offset_center_to_pivot.rotate(-angle)
rotated_image_center = (pos[0] - rotated_offset.x, pos[1] - rotated_offset.y)
rotated_image = pygame.transform.rotate(image, angle)
rotated_image_rect = rotated_image.get_rect(center = rotated_image_center)
return rotated_image, rotated_image_rect
class Missile(pygame.sprite.Sprite):
ASSET_FILE = "./missile.png"
REDUCTION_SCALE = 8
PIVOTX = 213
PIVOTY = 510
VELOCITY = 12
def __init__(self, launch_pos, destination_pos):
pygame.sprite.Sprite.__init__(self)
load_image = pygame.image.load(self.ASSET_FILE).convert_alpha()
w,h = load_image.get_size()
sw,sh = w//self.REDUCTION_SCALE, h//self.REDUCTION_SCALE
self.upright_rocket_img = pygame.transform.scale(load_image,(sw,sh))
self.src_pos = launch_pos
self.des_pos = destination_pos
# what is my rotation?
dest_x,dest_y = self.des_pos
source_x,source_y = self.src_pos
# i hate the pg coordinate system. :-(
dx = source_x-dest_x
dy = source_y-dest_y
if dx == 0:
self.rotation = 0
else:
self.rotation = math.degrees( math.atan( dx / dy ) )
# trajectory precalculations
# the following values are constant because rotation is constant
self.scaled_pivot_x, self.scaled_pivot_y = self.PIVOTX//self.REDUCTION_SCALE, self.PIVOTY//self.REDUCTION_SCALE
self.sinrotation = math.sin(math.radians(self.rotation))
self.cosrotation = math.cos(math.radians(self.rotation))
self.distance_per_tick = math.sqrt(self.VELOCITY*self.sinrotation * self.VELOCITY*self.sinrotation +\
self.VELOCITY*self.cosrotation * self.VELOCITY*self.cosrotation
)
self.max_distance = math.sqrt( dx*dx + dy*dy )
self.distance_traveled = 0.0
self.image, self.rect = blit_rotate(self.upright_rocket_img,
self.src_pos,
(self.scaled_pivot_x, self.scaled_pivot_y),
self.rotation
)
def update(self):
self.distance_traveled += self.distance_per_tick
if self.distance_traveled > self.max_distance:
self.kill()
else:
dx,dy = self.distance_traveled*self.sinrotation, self.distance_traveled*self.cosrotation
current_pos = self.src_pos[0] - int(dx), self.src_pos[1] - int(dy)
self.image, self.rect = blit_rotate(self.upright_rocket_img,
current_pos,
(self.scaled_pivot_x, self.scaled_pivot_y),
self.rotation
)
| 36.64 | 113 | 0.725983 |
ac7f983673e43d6c09d2e53a917df9da118f3c82 | 2,133 | py | Python | posts/api/views.py | videetssinghai/Blog-Rest-Api | e81e0c1969b170cb482d2fdf7f2883efc42c69db | [
"MIT"
] | null | null | null | posts/api/views.py | videetssinghai/Blog-Rest-Api | e81e0c1969b170cb482d2fdf7f2883efc42c69db | [
"MIT"
] | null | null | null | posts/api/views.py | videetssinghai/Blog-Rest-Api | e81e0c1969b170cb482d2fdf7f2883efc42c69db | [
"MIT"
] | null | null | null | from rest_framework.generics import (CreateAPIView, ListAPIView, RetrieveAPIView, DestroyAPIView, UpdateAPIView, RetrieveUpdateAPIView)
from posts.models import Post
from .serializers import PostListSerializer, PostDetailsSerializer, PostCreateUpdateSerializer
from rest_framework.permissions import (
AllowAny, IsAuthenticated, IsAdminUser, IsAuthenticatedOrReadOnly
)
from .permissions import IsOwnerOrReadOnly
from django.db.models import Q
from rest_framework.filters import (
SearchFilter,
OrderingFilter,
)
from .pagination import PostLimitOffsetPaginnation, PostPageNumberPagniation
class PostCreateAPIView(CreateAPIView):
queryset = Post.objects.all()
serializer_class = PostCreateUpdateSerializer
permission_classes = [IsAuthenticated]
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class PostListAPIView(ListAPIView):
serializer_class = PostListSerializer
filter_backends = [SearchFilter]
search_fields = [ 'title' , 'content','user__first_name',]
pagination_class = PostPageNumberPagniation
def get_queryset(self,*args,**kwargs):
query = self.request.GET.get("q")
queryset_list = Post.objects.all()
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query) |
Q(content__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
return queryset_list
class PostDetailAPIView(RetrieveAPIView):
queryset = Post.objects.all()
serializer_class = PostDetailsSerializer
lookup_field = 'slug'
class PostUpdateAPIView(RetrieveUpdateAPIView):
queryset = Post.objects.all()
serializer_class = PostCreateUpdateSerializer
lookup_field = 'slug'
permission_classes = [IsOwnerOrReadOnly]
def perform_update(self, serializer):
serializer.save(user=self.request.user)
class PostDeleteAPIView(DestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostDetailsSerializer
lookup_field = 'slug'
| 29.219178 | 135 | 0.742616 |
349e81c28085fd6fd2bec865c98a713e5798e909 | 2,342 | py | Python | segint_research_django/segint_api/migrations/0028_auto_20200728_1508.py | VarianAPIs/SegInt-Research | 3b7aa71ada46cbb35a428a00eb9f2a5c43f15d51 | [
"MIT"
] | 3 | 2021-04-15T14:24:24.000Z | 2022-03-23T17:07:06.000Z | segint_research_django/segint_api/migrations/0028_auto_20200728_1508.py | VarianAPIs/SegInt-Research | 3b7aa71ada46cbb35a428a00eb9f2a5c43f15d51 | [
"MIT"
] | null | null | null | segint_research_django/segint_api/migrations/0028_auto_20200728_1508.py | VarianAPIs/SegInt-Research | 3b7aa71ada46cbb35a428a00eb9f2a5c43f15d51 | [
"MIT"
] | 1 | 2021-04-21T15:05:09.000Z | 2021-04-21T15:05:09.000Z | # Generated by Django 3.0.7 on 2020-07-28 15:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('segint_api', '0027_auto_20200728_1456'),
]
operations = [
migrations.AlterField(
model_name='modelchanneldescription',
name='dimensions_max_x',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='dimensions_max_y',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='dimensions_max_z',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='dimensions_min_x',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='dimensions_min_y',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='dimensions_min_z',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='spacing_max_x',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='spacing_max_y',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='spacing_max_z',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='spacing_min_x',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='spacing_min_y',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='modelchanneldescription',
name='spacing_min_z',
field=models.FloatField(default=0),
),
]
| 31.648649 | 50 | 0.581554 |
4291e29250d29b1fa3e4d71a4d971224cb996bed | 1,823 | py | Python | 222-Count-Complete-Tree-Nodes/solution01.py | Eroica-cpp/LeetCode | 07276bd11558f3d0e32bec768b09e886de145f9e | [
"CC-BY-3.0",
"MIT"
] | 7 | 2015-05-05T22:21:30.000Z | 2021-03-13T04:04:15.000Z | 222-Count-Complete-Tree-Nodes/solution01.py | Eroica-cpp/LeetCode | 07276bd11558f3d0e32bec768b09e886de145f9e | [
"CC-BY-3.0",
"MIT"
] | null | null | null | 222-Count-Complete-Tree-Nodes/solution01.py | Eroica-cpp/LeetCode | 07276bd11558f3d0e32bec768b09e886de145f9e | [
"CC-BY-3.0",
"MIT"
] | 2 | 2018-12-26T08:13:25.000Z | 2020-07-18T20:18:24.000Z | #!/usr/bin/python
# ==============================================================================
# Author: Tao Li (taoli@ucsd.edu)
# Date: Jun 5, 2015
# Question: 222-Count-Complete-Tree-Nodes
# Link: https://leetcode.com/problems/count-complete-tree-nodes/
# ==============================================================================
# Given a complete binary tree, count the number of nodes.
#
# Definition of a complete binary tree from Wikipedia:
#
# In a complete binary tree every level, except possibly the last, is
# completely filled, and all nodes in the last level are as far left
# as possible. It can have between 1 and 2h nodes inclusive at the last level h.
# ==============================================================================
# Method: BFS
# Time Complexity: Exp
# Space Complexity: O(n)
# Note: an ok version but TLE
# ==============================================================================
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer}
def countNodes(self, root):
if not root:
return 0
level = [root]
counter = 0
while None not in level:
counter += 1
new = []
for node in level:
new.append(node.left)
new.append(node.right)
level = new
total = 2**counter - 1
total += len([i for i in level if i is not None])
return total
if __name__ == '__main__':
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
print Solution().countNodes(root) | 31.982456 | 80 | 0.503566 |
b893ae105e977247049473031958f9556ca969d9 | 995 | py | Python | printing/fonts/__init__.py | notatallshaw/Grail-0.6 | 2b850a4a3dcfcb6cb4cf891f2c2f9ee0509c7b43 | [
"CNRI-Jython"
] | 22 | 2021-05-17T07:01:04.000Z | 2021-10-31T09:03:29.000Z | printing/fonts/__init__.py | SimiCode/Grail-Web-Browser | 16b86d3215068d334eacf6153b71a748eed53d3d | [
"CNRI-Jython"
] | 17 | 2021-05-17T04:39:49.000Z | 2021-09-06T14:50:39.000Z | printing/fonts/__init__.py | SimiCode/Grail-Web-Browser | 16b86d3215068d334eacf6153b71a748eed53d3d | [
"CNRI-Jython"
] | 6 | 2021-05-17T07:37:01.000Z | 2022-01-09T10:26:24.000Z | """PostScript font metrics package.
This package is used by the html2ps.py standalone script, and Grail's
PostScript printing dialog to gather the correct font metrics for
printing.
Exported functions:
font_from_name(psfontname)
returns a PSFont derived object for metrics calculation
"""
import regsub
def font_from_name(psfontname):
# PostScript fonts use dash delimiters, while Python module names
# use underscores.
modulename = 'PSFont_' + regsub.gsub('-', '_', psfontname)
# no need to do any caching since the import mechanism does that
# for us!
module = __import__(modulename, globals(), locals())
return module.font
# Need different code here for ni than for 1.5 packages
try:
__ # This fails with 1.5 packages, succeeds when using ni
except NameError:
# 1.5 packages -- nothing more to do
pass
else:
# Backward compatible solution for ni
for name in ['font_from_name', '__doc__']:
setattr(__, name, vars()[name])
| 28.428571 | 69 | 0.716583 |
32637b5b29c8a00f5ad349df629ccab239037fce | 29,518 | py | Python | Lib/test/test_threading.py | certik/python-3.2 | 8c024c1e08248a4640429e3761905ae308d64e44 | [
"PSF-2.0"
] | 1 | 2019-12-31T18:13:30.000Z | 2019-12-31T18:13:30.000Z | Lib/test/test_threading.py | priya-sharma-prog/python-3.2 | 8c024c1e08248a4640429e3761905ae308d64e44 | [
"PSF-2.0"
] | 1 | 2019-07-04T09:18:21.000Z | 2019-07-04T19:14:03.000Z | Lib/test/test_threading.py | priya-sharma-prog/python-3.2 | 8c024c1e08248a4640429e3761905ae308d64e44 | [
"PSF-2.0"
] | 13 | 2015-04-02T16:49:38.000Z | 2021-10-17T20:14:14.000Z | # Very rudimentary test of threading module
import test.support
from test.support import verbose, strip_python_stderr, import_module
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
from test.script_helper import assert_python_ok, assert_python_failure
import subprocess
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = _thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = _thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
def test_6_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
@unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
def test_main():
test.support.run_unittest(LockTests, PyRLockTests, CRLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
BarrierTests
)
if __name__ == "__main__":
test_main()
| 37.459391 | 87 | 0.57531 |
f92fbad81d7a62f2c6ac7c051f85b48923ed8a97 | 584 | py | Python | packages/api-server/api_server/models/building_map.py | baviera08/romi-dashboard | ac3a15014ad3c3bdac523a6550934a06653cfba1 | [
"Apache-2.0"
] | 23 | 2021-04-13T23:01:12.000Z | 2022-03-21T02:15:24.000Z | packages/api-server/api_server/models/building_map.py | baviera08/romi-dashboard | ac3a15014ad3c3bdac523a6550934a06653cfba1 | [
"Apache-2.0"
] | 326 | 2021-03-10T17:32:17.000Z | 2022-03-30T04:42:14.000Z | packages/api-server/api_server/models/building_map.py | baviera08/romi-dashboard | ac3a15014ad3c3bdac523a6550934a06653cfba1 | [
"Apache-2.0"
] | 13 | 2021-04-10T10:33:36.000Z | 2022-02-22T15:39:58.000Z | from typing import List
from . import tortoise_models as ttm
from .ros_pydantic import rmf_building_map_msgs
class AffineImage(rmf_building_map_msgs.AffineImage):
data: str
class Level(rmf_building_map_msgs.Level):
images: List[AffineImage]
class BuildingMap(rmf_building_map_msgs.BuildingMap):
levels: List[Level]
@staticmethod
def from_tortoise(tortoise: ttm.BuildingMap) -> "BuildingMap":
return BuildingMap(**tortoise.data)
async def save(self) -> None:
await ttm.BuildingMap.update_or_create({"data": self.dict()}, id_=self.name)
| 24.333333 | 84 | 0.746575 |
d5703c466ba425597183a01b4ab1865e8a7f3b20 | 3,327 | py | Python | home_server_config.py | eea/eea.docker.matrix.synapse | e90079e1b4b031388c616f89fd6bc7afdd936302 | [
"Apache-2.0"
] | null | null | null | home_server_config.py | eea/eea.docker.matrix.synapse | e90079e1b4b031388c616f89fd6bc7afdd936302 | [
"Apache-2.0"
] | 1 | 2018-02-02T11:51:19.000Z | 2018-02-02T16:58:59.000Z | home_server_config.py | eea/eea.docker.matrix.synapse | e90079e1b4b031388c616f89fd6bc7afdd936302 | [
"Apache-2.0"
] | 2 | 2018-04-16T13:02:21.000Z | 2018-04-16T20:09:44.000Z | #!/usr/bin/env python
import yaml
import os
import distutils.util
import sys
db_type = os.getenv('DATABASE', 'sqlite')
postgres_host = os.getenv('POSTGRES_HOST', 'db')
postgres_port = os.getenv('POSTGRES_PORT', 5432)
db_name = os.getenv('DB_NAME', 'synapse')
db_user = os.getenv('DB_USER', 'postgres')
db_pasword = os.getenv('DB_PASSWORD', '')
email_from = os.getenv('EMAIL_FROM', 'Riot EEA <eea@eea.com>')
riot_base_url = os.getenv('RIOT_BASE_URL', '')
public_base_url = os.getenv('PUBLIC_BASE_URL', '')
identity_url = os.getenv('IDENTITY_URL', 'http://identity:8090')
smtp_host = os.getenv('SMTP_HOST', 'postfix')
smtp_port = os.getenv('SMTP_PORT', '25')
turn_allow_guests = os.getenv('TURN_GUESTS', False)
mxisd_token = os.getenv('MXISD_TOKEN', '')
mxisd_as_token = os.getenv('MXISD_AS_TOKEN', 'testmxisd')
enable_registration = bool(distutils.util.strtobool(os.getenv('REGISTRATION_ENABLED', 'no')))
print 'DATABASE:', db_type
print 'POSTGRES_HOST:', postgres_host
print 'POSTGRES_PORT:', postgres_port
print 'DB_NAME:', db_name
print 'DB_USER:', db_user
print 'DB_PASSWORD:', db_pasword
print 'REGISTRATION_ENABLED:', enable_registration
if db_type not in ('sqlite', 'postgresql'):
print "DATABASE env is wrong: %s" % (db_type)
sys.exit(1)
if len(sys.argv)>1:
filename = sys.argv[1]
else:
filename = "/data/homeserver.yaml"
file = open(filename)
yaml_doc = yaml.load(file)
# default values
yaml_doc['pid_file'] = '/data/homeserver.pid'
yaml_doc['log_file'] = '/data/homeserver.log'
yaml_doc['web_client'] = False
yaml_doc['web_client_location'] = '/webclient'
yaml_doc['uploads_path'] = '/uploads'
yaml_doc['media_store_path'] = '/data/media_store'
yaml_doc['enable_registration'] = enable_registration
yaml_doc['turn_allow_guests'] = turn_allow_guests
if db_type == 'sqlite':
yaml_doc['database'] = {'name': 'sqlite3', 'args': {'database': '/data/homeserver.db'}}
elif db_type == 'postgresql':
yaml_doc['database'] = {'name': 'psycopg2', 'args': {'user': db_user, 'password': db_pasword, 'database': db_name, 'host': postgres_host, 'cp_min': 5, 'cp_max': 10}}
yaml_doc['email'] = {'enable_notifs': 'True', 'smtp_host': smtp_host, 'smtp_port': smtp_port, 'notif_from': email_from, 'app_name': 'Matrix', 'template_dir': '/synapse_templates', 'notif_template_html': 'notif_mail.html', 'notif_template_text': 'notif_mail.txt', 'notif_for_new_users': 'True', 'riot_base_url': riot_base_url}
yaml_doc['password_providers'] = [{ 'module': 'rest_auth_provider.RestAuthProvider', 'config': { 'endpoint': identity_url} } ]
yaml_doc['public_baseurl'] = public_base_url
if '/data/appservice-mxisd.yaml' in yaml_doc['app_service_config_files']:
yaml_doc['app_service_config_files'].remove('/data/appservice-mxisd.yaml')
if mxisd_token:
yaml_doc['app_service_config_files'].append('/data/appservice-mxisd.yaml')
mxisd_config = { 'id': 'appservice-mxisd', 'url': identity_url, 'as_token': mxisd_as_token, 'hs_token': mxisd_token, 'sender_localpart': "appservice-mxisd", 'namespaces': { 'users': [ { 'regex': '@*', 'exclusive': False }], 'aliases': [], 'rooms': [] } }
with open("/data/appservice-mxisd.yaml", "w") as f:
yaml.dump(mxisd_config, f, default_flow_style = False)
with open(filename, "w") as f:
yaml.dump(yaml_doc, f, default_flow_style = False)
| 40.084337 | 325 | 0.715359 |
139af67f3dc467e96089ee973a589e5779886a8a | 2,128 | py | Python | scripts/get_reaxys_data.py | SilviaAmAm/scscore | 2d351ae1b6e390625c6ad56d50e4eb4ba0e20ece | [
"MIT"
] | 57 | 2018-01-30T20:04:33.000Z | 2022-03-29T11:30:56.000Z | scripts/get_reaxys_data.py | SilviaAmAm/scscore | 2d351ae1b6e390625c6ad56d50e4eb4ba0e20ece | [
"MIT"
] | 12 | 2018-02-05T16:49:44.000Z | 2021-01-20T14:51:03.000Z | scripts/get_reaxys_data.py | SilviaAmAm/scscore | 2d351ae1b6e390625c6ad56d50e4eb4ba0e20ece | [
"MIT"
] | 29 | 2018-01-30T22:15:28.000Z | 2022-03-09T20:03:43.000Z | import os
from pymongo import MongoClient
import rdkit.Chem as Chem
'''
Get examples from Reaxys where...
(a) we can parse the reactants and products
(b) there is a single product (product salts == multiple products)
(c) there is at least one instance that is explicitly single step
This is meant to work with data hosted in a MongoDB
While we can't include the actual data, this shows our preprocessing pipeline. The saved file
consists of the reaction smiles string, the maximum number of atoms in the reactants or products,
and the document ID for traceability.
'''
limit = 10 # small for demonstration
client = MongoClient('mongodb://username:password@host.address/admin', 27017)
reaction_db = client['database_name']['reactions']
instance_db = client['database_name']['instances']
project_root = os.path.dirname(os.path.dirname(__file__))
with open(os.path.join(project_root, 'data', 'reaxys_limit%i.txt' % limit), 'w') as f:
i = 0
for rx_doc in reaction_db.find({'RXN_SMILES': {'$exists': True}}, ['_id', 'RXN_SMILES', 'RX_NVAR']).sort('_id', 1):
try:
r, p = rx_doc['RXN_SMILES'].split('>>')
if (not r) or (not p) or ('.' in p):
continue
r_mol = Chem.MolFromSmiles(str(r))
p_mol = Chem.MolFromSmiles(str(p))
if (not r_mol) or (not p_mol):
continue
rxd_id_list = ['%i-%i' % (rx_doc['_id'], j) for j in range(1, int(rx_doc['RX_NVAR']) + 1)]
single_step = False
for rxd_doc in instance_db.find({'_id': {'$in': rxd_id_list}}, ['RXD_STP']):
if rxd_doc['RXD_STP'] == ['1']:
single_step = True
break
if not single_step:
continue
[a.ClearProp('molAtomMapNumber') for a in r_mol.GetAtoms() if a.HasProp('molAtomMapNumber')]
[a.ClearProp('molAtomMapNumber') for a in p_mol.GetAtoms() if a.HasProp('molAtomMapNumber')]
n = max(r_mol.GetNumAtoms(), p_mol.GetNumAtoms())
f.write('%s>>%s %i %i\n' % (Chem.MolToSmiles(r_mol,True), Chem.MolToSmiles(p_mol,True), n, rx_doc['_id']))
i += 1
if i % 1000 == 0:
print('Wrote %i' % i)
if i >= limit:
break
except Exception as e:
print(e)
| 38 | 117 | 0.663064 |
88ab6f32751baac31cda210300c417fd25c9bce1 | 2,852 | py | Python | ayush/src/ayush/first_step_on_vertex_visit.py | Ayush8120/Improved-MR-DFS-PX4 | 1f64db7b801cb97a2ccb26a7de95d1c92b0666f4 | [
"MIT"
] | 6 | 2021-08-02T13:52:37.000Z | 2022-02-23T11:47:33.000Z | ayush/src/ayush/first_step_on_vertex_visit.py | Ayush8120/Improved-MR-DFS-PX4 | 1f64db7b801cb97a2ccb26a7de95d1c92b0666f4 | [
"MIT"
] | 1 | 2021-07-30T20:46:19.000Z | 2021-08-31T17:37:17.000Z | ayush/src/ayush/first_step_on_vertex_visit.py | Ayush8120/Improved-MR-DFS-PX4 | 1f64db7b801cb97a2ccb26a7de95d1c92b0666f4 | [
"MIT"
] | 1 | 2022-01-18T10:42:35.000Z | 2022-01-18T10:42:35.000Z | import rospy
import numpy as np
import pandas as pd
from ayush.initialize_graph import update_iteration, update_present_location
from ayush.mergematrices import MergeMatrices
from ayush.order_matrix import order_matrix
'''
Class for representing the Completed Edge Attributes, contains:
- row_tags : tags of the nodes of that edge name
- top: numpy array of the incidence angle from start node to end node
- bottom : numpy array of the incidence angke from end node to start node
- col_vector : numpy array of vertically stacked entries of top and bottom numpy arrays
- matrix : Dataframe that can be given as input to merge_matrices()
'''
class Id:
def __init__(self,start,end_node,edge,incidence_matrix):
self.row_tags= []
self.row_tags.append(start)
self.row_tags.append(end_node)
self.top = np.array([-1*incidence_matrix[ord(start) - 65,ord(end_node)-65]])
self.bottom = np.array([-1*incidence_matrix[ord(end_node) - 65,ord(start)-65]])
self.col_vector = np.vstack((self.top,self.bottom))
self.matrix = pd.DataFrame(data = self.col_vector,
index = self.row_tags,
columns = [edge])
'''
Once we get to know what our next node to travel is we call this function to take care
Arguments : Robot Object List, robot index, Vertex Object List , iteration, start node name, end node name, full incidence_matrix
Returns : completed edge incidence matrix, updated Robot Object List, updated Vertex Object List
'''
def what_to_do_if_next_node_known(R,k,V,n,start,end_node,incidence_matrix = 0):
if(ord(start) < ord(end_node)):
edge = start + end_node
else:
edge = end_node + start
id = Id(start,end_node,edge,incidence_matrix)
[V,R] = first_step_on_arriving_at_vertex(V,ord(end_node) - 65,R,k,id,n)
return id,R,V
'''
#V : list of Vertex objects , j : arrived vertex index
#R : list of Robot objects , k : robot index
#id: column vector corresponding to the completed edge traversed in reaching j vertex ; instance of Id class
#n : at which update the kth robot reaches the jth vertex
'''
def first_step_on_arriving_at_vertex(V,j,R,k,id,n):
if(V[j].iteration == 0):
V[j].iteration = n-1
[temp,E1_cap] = MergeMatrices(id.matrix,R[k].incidence_matrix) # temp : pandas Dataframe
[R[k].incidence_matrix,E1_cap] = MergeMatrices(temp, V[j].incidence_matrix)
update_present_location(R[k],id.row_tags[1])
R[k].iteration += 1
V[j].iteration += 1
[R[k].incidence_matrix, C] = order_matrix(R[k].incidence_matrix,E1_cap)
V[j].incidence_matrix = R[k].incidence_matrix
V[j].row_tags = R[k].row_tags
V[j].edge_tags = R[k].edge_tags
return V,R
| 43.212121 | 130 | 0.674264 |
565a85cce4c38eb615396c9fd30126f9c7640047 | 10,952 | py | Python | tests/unit/modules/test_services.py | mxswift/taurus | 15c22226665367bd9d5ef4e2b98663d0535ff1a2 | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_services.py | mxswift/taurus | 15c22226665367bd9d5ef4e2b98663d0535ff1a2 | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_services.py | mxswift/taurus | 15c22226665367bd9d5ef4e2b98663d0535ff1a2 | [
"Apache-2.0"
] | null | null | null | import json
import os
import shutil
import zipfile
from os.path import join
from bzt import NormalShutdown, ToolError, TaurusConfigError
from bzt.engine import Service, Provisioning, EngineModule
from bzt.modules.blazemeter import CloudProvisioning
from bzt.modules.services import Unpacker, InstallChecker, AndroidEmulatorLoader, AppiumLoader, PipInstaller
from bzt.utils import get_files_recursive, EXE_SUFFIX, JavaVM, Node
from tests.unit import BZTestCase, RESOURCES_DIR, EngineEmul
from tests.unit.mocks import ModuleMock, BZMock
class TestPipInstaller(BZTestCase):
def setUp(self):
engine = EngineEmul()
engine.config.merge({'services': {'pip-installer': []}})
self.obj = PipInstaller()
self.obj.engine = engine
class TestZipFolder(BZTestCase):
def test_pack_and_send_to_blazemeter(self):
obj = CloudProvisioning()
obj.engine = EngineEmul()
obj.engine.config.merge({
"execution": {
"executor": "selenium",
"concurrency": 5500,
"locations": {
"us-east-1": 1,
"us-west": 2},
"scenario": {
"script": RESOURCES_DIR + "selenium/junit/java_package"}},
"modules": {
"selenium": "bzt.modules.selenium.SeleniumExecutor",
"cloud": "bzt.modules.blazemeter.CloudProvisioning",
"junit": "bzt.modules.java.JUnitTester"},
"provisioning": "cloud"
})
obj.engine.unify_config()
obj.parameters = obj.engine.config['execution'][0]
obj.settings["token"] = "FakeToken"
mock = BZMock(obj.user)
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/web/elfinder/1?cmd=open&target=s1_Lw': {"files": []},
'https://a.blazemeter.com/api/v4/multi-tests?projectId=1&name=Taurus+Cloud+Test': {"result": []},
'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Cloud+Test': {
"result": [{"id": 1, 'name': 'Taurus Cloud Test', "configuration": {"type": "taurus"}}]
},
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {"id": 1, 'workspaceId': 1}},
'https://a.blazemeter.com/api/v4/multi-tests': {"result": {}},
'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Cloud+Test': {
"result": {"id": 1, "configuration": {"type": "taurus"}}
},
'https://a.blazemeter.com/api/v4/tests/1/files': {}
})
mock.mock_patch.update({'https://a.blazemeter.com/api/v4/tests/1': {"result": {}}})
obj.prepare()
unpack_cfgs = obj.engine.config.get(Service.SERV)
self.assertEqual(len(unpack_cfgs), 1)
self.assertEqual(unpack_cfgs[0]['module'], Unpacker.UNPACK)
self.assertEqual(unpack_cfgs[0][Unpacker.FILES], ['java_package.zip'])
self.assertTrue(zipfile.is_zipfile(obj.engine.artifacts_dir + '/java_package.zip'))
@staticmethod
def __get_user_info():
with open(RESOURCES_DIR + "json/blazemeter-api-user.json") as fhd:
return json.loads(fhd.read())
def test_receive_and_unpack_on_worker(self):
obj = Unpacker()
obj.engine = EngineEmul()
obj.engine.config.merge({
"execution": {
"executor": "selenium",
"concurrency": 5500,
"scenario": {
"script": "java_package.zip"}},
"modules": {
"selenium": "bzt.modules.selenium.SeleniumExecutor",
"cloud": "bzt.modules.blazemeter.CloudProvisioning"},
"provisioning": "local"
})
obj.engine.file_search_paths = [obj.engine.artifacts_dir]
obj.parameters["files"] = ["java_package.zip"]
# create archive and put it in artifact dir
source = RESOURCES_DIR + "selenium/junit/java_package"
zip_name = obj.engine.create_artifact('java_package', '.zip')
with zipfile.ZipFile(zip_name, 'w') as zip_file:
for filename in get_files_recursive(source):
zip_file.write(filename, filename[len(os.path.dirname(source)):])
obj.prepare()
# check unpacked tree
destination = obj.engine.artifacts_dir + '/java_package'
result_tree = set(filename[len(destination):] for filename in get_files_recursive(destination))
original_tree = set(filename[len(source):] for filename in get_files_recursive(source))
self.assertEqual(result_tree, original_tree)
def test_no_work_prov(self):
obj = Service()
obj.engine = EngineEmul()
obj.engine.config[Provisioning.PROV] = 'cloud'
obj.parameters['run-at'] = 'local'
self.assertFalse(obj.should_run())
obj.parameters['run-at'] = 'cloud'
self.assertTrue(obj.should_run())
class TestToolInstaller(BZTestCase):
def test_regular(self):
obj = InstallChecker()
obj.engine = EngineEmul()
obj.engine.config.get("modules")["base"] = EngineModule.__module__ + "." + EngineModule.__name__
obj.engine.config.get("modules")["dummy"] = ModuleMock.__module__ + "." + ModuleMock.__name__
self.assertRaises(NormalShutdown, obj.prepare)
def test_problematic(self):
obj = InstallChecker()
obj.engine = EngineEmul()
obj.engine.config.get("modules")["err"] = "hello there"
self.assertRaises(ToolError, obj.prepare)
def test_include_only_good(self):
obj = InstallChecker()
obj.engine = EngineEmul()
obj.engine.config.get("modules")["base"] = EngineModule.__module__ + "." + EngineModule.__name__
obj.engine.config.get("modules")["dummy"] = ModuleMock.__module__ + "." + ModuleMock.__name__
obj.engine.config.get("modules")["err"] = "hello there"
obj.settings["include"] = ["base", "dummy"]
self.assertRaises(NormalShutdown, obj.prepare)
def test_exclude_problematic(self):
obj = InstallChecker()
obj.engine = EngineEmul()
obj.engine.config.get("modules")["err"] = "hello there"
obj.settings["exclude"] = ["err"]
self.assertRaises(NormalShutdown, obj.prepare)
def test_include_string(self):
obj = InstallChecker()
obj.engine = EngineEmul()
obj.engine.config.get("modules")["base"] = EngineModule.__module__ + "." + EngineModule.__name__
obj.engine.config.get("modules")["dummy"] = ModuleMock.__module__ + "." + ModuleMock.__name__
obj.engine.config.get("modules")["err"] = "hello there"
obj.settings["include"] = "base,dummy"
self.assertRaises(NormalShutdown, obj.prepare)
class TestAndroidEmulatorLoader(BZTestCase):
def setUp(self):
engine = EngineEmul()
engine.config.merge({'services': {'android-emulator-loader': {}}})
self.check_if_emulator_started = AndroidEmulatorLoader.tool_is_started
AndroidEmulatorLoader.tool_is_started = lambda slf: True
self.android = AndroidEmulatorLoader()
self.android.engine = engine
self.android.settings = engine.config['services']['android-emulator-loader']
def tearDown(self):
AndroidEmulatorLoader.tool_is_started = self.check_if_emulator_started
def test_no_sdk(self):
os.environ['ANDROID_HOME'] = ''
self.assertRaises(TaurusConfigError, self.android.prepare)
def test_sdk_from_conf(self):
os.environ['ANDROID_HOME'] = ''
self.android.settings['path'] = 'from_config'
self.assertRaises(ToolError, self.android.prepare)
self.assertIn('from_config', self.android.tool_path)
def test_sdk_from_env(self):
sdk_path = join(self.android.engine.artifacts_dir, 'there_is_no_sdk')
os.environ['ANDROID_HOME'] = sdk_path
self.assertRaises(ToolError, self.android.prepare)
self.assertIn(sdk_path, self.android.tool_path)
def test_no_avd(self):
self.create_fake_android_emulator()
self.android.prepare()
self.assertRaises(TaurusConfigError, self.android.startup)
def test_two_way(self):
config_path = join(self.android.engine.artifacts_dir, 'sdk', 'tools', 'emulator' + EXE_SUFFIX)
self.android.settings['path'] = config_path
env_path = 'from_env'
os.environ['ANDROID_HOME'] = env_path
self.create_fake_android_emulator()
self.android.settings['avd'] = 'my_little_android'
self.android.prepare()
self.assertEqual(config_path, self.android.tool_path)
self.android.startup()
self.android.shutdown()
self.android.post_process()
def create_fake_android_emulator(self):
sdk_dir = join(self.android.engine.artifacts_dir, 'sdk')
src_dir = RESOURCES_DIR + 'android-emulator'
dest_dir = join(sdk_dir, 'tools')
os.mkdir(sdk_dir)
os.mkdir(dest_dir)
tool_path = join(dest_dir, 'emulator' + EXE_SUFFIX)
shutil.copy2(join(src_dir, 'emulator' + EXE_SUFFIX), dest_dir)
os.chmod(tool_path, 0o755)
shutil.copy2(join(src_dir, 'emulator.py'), join(dest_dir, 'emulator.py'))
self.android.settings['path'] = tool_path
class TestAppiumLoader(BZTestCase):
def setUp(self):
engine = EngineEmul()
engine.config.merge({'services': {'appium-loader': {}}})
self.check_if_appium_started = AppiumLoader.tool_is_started
AppiumLoader.tool_is_started = lambda slf: True
self.appium = AppiumLoader()
self.appium.engine = engine
self.appium.settings = engine.config['services']['appium-loader']
self.check_if_node_installed = Node.check_if_installed
self.check_if_java_installed = JavaVM.check_if_installed
Node.check_if_installed = lambda slf: True
JavaVM.check_if_installed = lambda slf: True
def tearDown(self):
AppiumLoader.tool_is_started = self.check_if_appium_started
Node.check_if_installed = self.check_if_node_installed
JavaVM.check_if_installed = self.check_if_java_installed
def test_appium_not_installed(self):
self.appium.settings['path'] = 'wrong_path'
self.assertRaises(ToolError, self.appium.prepare)
def test_appium_full_cycle(self):
self.create_fake_appium()
self.appium.prepare()
self.appium.startup()
self.appium.shutdown()
self.appium.post_process()
def create_fake_appium(self):
src_dir = RESOURCES_DIR + 'appium'
dest_dir = self.appium.engine.artifacts_dir
shutil.copy2(join(src_dir, 'appium' + EXE_SUFFIX), dest_dir)
os.chmod(join(dest_dir, 'appium' + EXE_SUFFIX), 0o755)
shutil.copy2(join(src_dir, 'appium.py'), dest_dir)
self.appium.settings['path'] = join(dest_dir, 'appium' + EXE_SUFFIX)
| 42.449612 | 109 | 0.64244 |
751e50a54abecbd58af0d85f1980beef607fd401 | 8,364 | py | Python | immutable/core_py3.py | khinsen/ImmutablePy | ddad7edf90bbf4ddb81daf9a189ea6f7796c1f16 | [
"CECILL-B"
] | null | null | null | immutable/core_py3.py | khinsen/ImmutablePy | ddad7edf90bbf4ddb81daf9a189ea6f7796c1f16 | [
"CECILL-B"
] | null | null | null | immutable/core_py3.py | khinsen/ImmutablePy | ddad7edf90bbf4ddb81daf9a189ea6f7796c1f16 | [
"CECILL-B"
] | null | null | null | import abc
import collections
import itertools
import types
#
# The metaclass is meant to be used only once, for defining the class
# Immutable. Its role is to rename the __init__ method and to prevent
# subclasses of Immutable to redefine mutation methods.
#
class ImmutableMeta(abc.ABCMeta):
def __new__(mcls, name, bases, namespace):
if name != 'Immutable' or bases != (object,) \
or namespace['__module__'] != mcls.__module__:
if '__init__' in namespace:
init_method = namespace['__init__']
def init_wrapper(self, *args, **kwargs):
self.__init_caller__(init_method, *args, **kwargs)
namespace['__init__'] = init_wrapper
for methodname in ['__setattr__', '__delattr__',
'__setitem__', '__delitem__']:
if methodname in namespace:
raise TypeError("method %s not allowed " % methodname +
"in an immutable type")
cls = super(ImmutableMeta, mcls).__new__(mcls, name, bases, namespace)
return cls
#
# Immutable is an abstract base class for immutable Python objects.
# It allows assignments to attributes only inside __init__, and verifies
# that all attribute values are themselves immutable.
#
class Immutable(object, metaclass=ImmutableMeta):
__locked = False
__init_nesting = 0
def __init_caller__(self, method, *args, **kwargs):
if self.__locked:
raise ValueError("immutable object already initialized")
try:
if method is not None:
self.__init_nesting += 1
method(self, *args, **kwargs)
self.__init_nesting -= 1
if self.__init_nesting == 0:
for attr, value in self.__dict__.items():
if not isinstance(value, Immutable):
raise TypeError("value of attribute %s not immutable"
% attr)
finally:
if self.__init_nesting == 0:
self.__locked = True
def __init__(self):
self.__init_caller__(None)
def __setattr__(self, *args):
if self.__locked:
raise TypeError("immutable instances cannot be modified")
object.__setattr__(self, *args)
def __delattr__(self, *args):
if self.__locked:
raise TypeError("immutable instances cannot be modified")
object.__delattr__(self, *args)
def __setitem__(self, *args):
if self.__locked:
raise TypeError("immutable instances cannot be modified")
object.__setitem__(self, *args)
def __delitem__(self, *args):
if self.__locked:
raise TypeError("immutable instances cannot be modified")
object.__delitem__(self, *args)
# Default implementation of equality, based on value equality
# of all attributes.
def __eq__(self, other):
return self.__class__ is other.__class__ \
and set(self.__dict__.keys()) == set(other.__dict__.keys()) \
and all(self.__dict__[k] == other.__dict__[k]
for k in self.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
# Default implementation of hash
def __hash__(self):
return hash(tuple(self.__dict__.values()))
# Standard Python types that are immutable
Immutable.register(bool)
Immutable.register(int)
Immutable.register(int)
Immutable.register(float)
Immutable.register(str)
Immutable.register(str)
Immutable.register(type(None))
Immutable.register(types.BuiltinFunctionType)
Immutable.register(types.FunctionType)
Immutable.register(types.BuiltinMethodType)
Immutable.register(types.MethodType)
Immutable.register(types.GeneratorType)
#
# An ImmutableTuple differs from a standard tuple in that all its elements
# must be immutable.
#
class ImmutableTuple(tuple):
__slots__ = []
def __init__(self, *args):
for i, elt in enumerate(self):
if not isinstance(elt, Immutable):
raise TypeError("tuple element %s is not immutable" % i)
def __add__(self, other):
return ImmutableTuple(tuple.__add__(self, other))
# __getslice__ is called in Python 2.x
def __getslice__(self, start, end):
return ImmutableTuple(tuple.__getslice__(self, start, end))
# __getitem__ with slice argument is called in Python 3.x
def __getitem__(self, item):
if isinstance(item, slice):
return ImmutableTuple(tuple.__getitem__(self, item))
else:
return tuple.__getitem__(self, item)
def append(self, item):
return self + ImmutableTuple((item,))
Immutable.register(ImmutableTuple)
#
# An ImmutableSet differs from a frozenset in that all its elements
# must be immutable.
#
class ImmutableSet(frozenset):
__slots__ = []
def __new__(cls, *args):
obj = super(ImmutableSet, cls).__new__(cls, *args)
for i, elt in enumerate(obj):
if not isinstance(elt, Immutable):
raise TypeError("set element %s is not immutable" % i)
return obj
def add(self, item):
return ImmutableSet(itertools.chain(iter(self), (item,)))
# Add a wrapper around all built-in methods of frozenset that return sets.
def fix_method(method_name):
method = getattr(frozenset, method_name)
def wrapper(self, *args, **kwargs):
result = method(self, *args, **kwargs)
return ImmutableSet(result)
setattr(ImmutableSet, method_name, wrapper)
method_names = ['__' + prefix + name + '__'
for name in ['and', 'or', 'sub', 'xor']
for prefix in ['', 'r']] + \
['copy', 'difference', 'intersection',
'symmetric_difference', 'union']
for method_name in method_names:
fix_method(method_name)
Immutable.register(ImmutableSet)
#
# An ImmutableDict has immutable keys and values and doesn't permit any
# modifications after initialization.
#
class ImmutableDict(collections.Mapping):
__slots__ = ['_d']
__locked = False
def __init__(self, *args, **kwargs):
self._d = dict(*args, **kwargs)
for key, value in self._d.items():
if not isinstance(key, Immutable):
raise TypeError("key %s is not immutable"
% str(key))
if not isinstance(value, Immutable):
raise TypeError("value for key %s is not immutable"
% str(key))
__locked = True
def __getitem__(self, item):
return self._d[item]
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
def __hash__(self):
return hash(frozenset(iter(self.items())))
def __setattr__(self, attr, value):
if self.__locked:
raise TypeError("immutable objects cannot be modified")
object.__setattr__(self, attr, value)
def __delattr__(self, attr):
raise TypeError("immutable objects cannot be modified")
def update(self, *args, **kwargs):
d = self._d.copy()
d.update(*args, **kwargs)
return ImmutableDict(d)
Immutable.register(ImmutableDict)
#
# A utility function to convert nested combinations of lists, tuples,
# sets, and dictionaries to an immutable equivalent.
#
_immutable_conversion = {}
def immutable(obj):
if isinstance(obj, Immutable):
return obj
elif isinstance(obj, list) or isinstance(obj, tuple):
return ImmutableTuple(immutable(x) for x in obj)
elif isinstance(obj, set) or isinstance(obj, frozenset):
return ImmutableSet(immutable(x) for x in obj)
elif isinstance(obj, dict):
return ImmutableDict((immutable(key), immutable(value))
for key, value in obj.items())
else:
for klass, converter in _immutable_conversion.items():
if isinstance(obj, klass):
return converter(obj)
raise ValueError("object has no known immutable equivalent")
def register_immutable_converter(klass, converter):
if klass in _immutable_conversion:
raise ValueError("converter for %s already set" % str(klass))
_immutable_conversion[klass] = converter
| 33.725806 | 78 | 0.632712 |
18b23486d9b547bf29e5ad177405304043378c68 | 7,278 | py | Python | qiskit/aqua/components/optimizers/aqgd.py | Cristian-Malinescu/qiskit-aqua | b29596800447c3130a20ec72a18b7fd8ed9fdb2f | [
"Apache-2.0"
] | null | null | null | qiskit/aqua/components/optimizers/aqgd.py | Cristian-Malinescu/qiskit-aqua | b29596800447c3130a20ec72a18b7fd8ed9fdb2f | [
"Apache-2.0"
] | null | null | null | qiskit/aqua/components/optimizers/aqgd.py | Cristian-Malinescu/qiskit-aqua | b29596800447c3130a20ec72a18b7fd8ed9fdb2f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Analytic Quantum Gradient Descent (AQGD) optimizer """
import logging
from copy import deepcopy
from numpy import pi, absolute, array, zeros
from qiskit.aqua.utils.validation import validate_range_exclusive_max
from .optimizer import Optimizer, OptimizerSupportLevel
logger = logging.getLogger(__name__)
class AQGD(Optimizer):
"""Analytic Quantum Gradient Descent (AQGD) optimizer.
Performs gradient descent optimization with a momentum term and analytic gradients
for parametrized quantum gates, i.e. Pauli Rotations. See, for example:
* K. Mitarai, M. Negoro, M. Kitagawa, and K. Fujii. (2018).
Quantum circuit learning. Phys. Rev. A 98, 032309.
https://arxiv.org/abs/1803.00745
* Maria Schuld, Ville Bergholm, Christian Gogolin, Josh Izaac, Nathan Killoran. (2019).
Evaluating analytic gradients on quantum hardware. Phys. Rev. A 99, 032331.
https://arxiv.org/abs/1811.11184
for further details on analytic gradients of parametrized quantum gates.
Gradients are computed "analytically" using the quantum circuit when evaluating
the objective function.
"""
_OPTIONS = ['maxiter', 'eta', 'tol', 'disp']
def __init__(self,
maxiter: int = 1000,
eta: float = 3.0,
tol: float = 1e-6,
disp: bool = False,
momentum: float = 0.25) -> None:
"""
Args:
maxiter: Maximum number of iterations, each iteration evaluation gradient.
eta: The coefficient of the gradient update. Increasing this value
results in larger step sizes: param = previous_param - eta * deriv
tol: The convergence criteria that must be reached before stopping.
Optimization stops when: absolute(loss - previous_loss) < tol
disp: Set to True to display convergence messages.
momentum: Bias towards the previous gradient momentum in current update.
Must be within the bounds: [0,1)
"""
validate_range_exclusive_max('momentum', momentum, 0, 1)
super().__init__()
self._eta = eta
self._maxiter = maxiter
self._tol = tol if tol is not None else 1e-6
self._disp = disp
self._momentum_coeff = momentum
self._previous_loss = None
def get_support_level(self):
""" Return support level dictionary """
return {
'gradient': OptimizerSupportLevel.ignored,
'bounds': OptimizerSupportLevel.ignored,
'initial_point': OptimizerSupportLevel.required
}
def deriv(self, j, params, obj):
"""
Obtains the analytical quantum derivative of the objective function with
respect to the jth parameter.
Args:
j (int): Index of the parameter to compute the derivative of.
params (array): Current value of the parameters to evaluate
the objective function at.
obj (callable): Objective function.
Returns:
float: The derivative of the objective function w.r.t. j
"""
# create a copy of the parameters with the positive shift
plus_params = deepcopy(params)
plus_params[j] += pi / 2
# create a copy of the parameters with the negative shift
minus_params = deepcopy(params)
minus_params[j] -= pi / 2
# return the derivative value
return 0.5 * (obj(plus_params) - obj(minus_params))
def update(self, j, params, deriv, mprev):
"""
Updates the jth parameter based on the derivative and previous momentum
Args:
j (int): Index of the parameter to compute the derivative of.
params (array): Current value of the parameters to evaluate
the objective function at.
deriv (float): Value of the derivative w.r.t. the jth parameter
mprev (array): Array containing all of the parameter momentums
Returns:
tuple: params, new momentums
"""
mnew = self._eta * (deriv * (1 - self._momentum_coeff) + mprev[j] * self._momentum_coeff)
params[j] -= mnew
return params, mnew
def converged(self, objval, n=2):
"""
Determines if the objective function has converged by finding the difference between
the current value and the previous n values.
Args:
objval (float): Current value of the objective function.
n (int): Number of previous steps which must be within the convergence criteria
in order to be considered converged. Using a larger number will prevent
the optimizer from stopping early.
Returns:
bool: Whether or not the optimization has converged.
"""
if self._previous_loss is None:
self._previous_loss = [objval + 2 * self._tol] * n
if all(absolute(objval - prev) < self._tol for prev in self._previous_loss):
# converged
return True
# store previous function evaluations
for i in range(n):
if i < n - 1:
self._previous_loss[i] = self._previous_loss[i + 1]
else:
self._previous_loss[i] = objval
return False
def optimize(self, num_vars, objective_function, gradient_function=None,
variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function,
variable_bounds, initial_point)
params = array(initial_point)
iter_count = 0
momentum = zeros(shape=(num_vars,))
objval = objective_function(params)
if self._disp:
print("Iteration: " + str(iter_count) + " \t| Energy: " + str(objval))
minobj = objval
minparams = params
while iter_count < self._maxiter and not self.converged(objval):
for j in range(num_vars):
# update parameters in order based on quantum gradient
derivative = self.deriv(j, params, objective_function)
params, momentum[j] = self.update(j, params, derivative, momentum)
# check the value of the objective function
objval = objective_function(params)
# keep the best parameters
if objval < minobj:
minobj = objval
minparams = params
# update the iteration count
iter_count += 1
if self._disp:
print("Iteration: " + str(iter_count) + " \t| Energy: " + str(objval))
return minparams, minobj, iter_count
| 38.104712 | 97 | 0.620912 |
f39f8d815d870cd5792dbb2a04450890658e197f | 57,487 | py | Python | scripts/build_db_create_ignf_from_xml.py | asinghvi17/PROJ | a4c5cc1e42559f1d92c6b7655680d11f1eead703 | [
"MIT"
] | null | null | null | scripts/build_db_create_ignf_from_xml.py | asinghvi17/PROJ | a4c5cc1e42559f1d92c6b7655680d11f1eead703 | [
"MIT"
] | null | null | null | scripts/build_db_create_ignf_from_xml.py | asinghvi17/PROJ | a4c5cc1e42559f1d92c6b7655680d11f1eead703 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: PROJ
# Purpose: Build SRS and coordinate transform database from IGNF registry
# Author: Even Rouault <even.rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2018, Even Rouault <even.rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
from lxml import etree
import os
import requests
import sys
# url = "http://librairies.ign.fr/geoportail/resources/IGNF.xml"
url = "https://geodesie.ign.fr/contenu/fichiers/IGNF.v3.1.0.xml"
if len(sys.argv) not in (1, 2) or (len(sys.argv) == 2 and sys.argv[1].startswith('-')):
print('Usage: build_db_create_ignf.py [path_to_IGNF.xml]')
print('')
print('If local filename is not used, then %s is downloaded.' % url)
sys.exit(1)
def escape_literal(x):
return x.replace("'", "''")
def strip_ns_prefix(tree):
for element in tree.iter('*'):
element.tag = etree.QName(element).localname
for att in element.attrib:
newkey = att[att.find('}')+1:]
val = element.attrib[att]
del element.attrib[att]
element.attrib[newkey] = val
return tree
all_sql = []
all_sql_concat = [] # for concatenated_operation
if len(sys.argv) == 1:
r = requests.get(url)
root = etree.fromstring(r.content)
else:
IGNF_file = sys.argv[1]
tree = etree.parse(open(IGNF_file, 'rt'))
root = tree.getroot()
root = strip_ns_prefix(root)
# Retrieve and insert metada
version = root.find('versionNumber').find('CharacterString').text
date = root.find('versionDate').find('Date').text
all_sql.append("""INSERT INTO "metadata" VALUES('IGNF.SOURCE', '%s');""" % escape_literal(url))
all_sql.append("""INSERT INTO "metadata" VALUES('IGNF.VERSION', '%s');""" % version)
all_sql.append("""INSERT INTO "metadata" VALUES('IGNF.DATE', '%s');""" % date)
def get_epsg_code(txt):
assert ':EPSG:' in txt
return txt[txt.rfind(':')+1:]
def ingest_ellipsoids(root, all_sql):
mapEllpsId = {}
for ellps in root.iter('ellipsoid'):
E = ellps.find('Ellipsoid')
id = E.attrib['id']
names = [name.text for name in E.iter('name')]
#print(id, names)
if len(names) == 2:
mapEllpsId[id] = ('EPSG', get_epsg_code(names[1]))
else:
assert len(names) == 1
assert E.find('secondDefiningParameter').find('SecondDefiningParameter').find('isSphere').text == 'sphere'
mapEllpsId[id] = ('IGNF', id)
all_sql.append("""INSERT INTO "ellipsoid" VALUES('IGNF','%s','%s',NULL,'PROJ', 'EARTH', %s,'EPSG','9001',0,NULL,0);""" % (id, names[0], E.find('semiMajorAxis').text))
return mapEllpsId
def ingest_prime_meridians(root, all_sql):
mapPmId = {}
for pm in root.iter('primeMeridian'):
PM = pm.find('PrimeMeridian')
id = PM.attrib['id']
names = [name.text for name in PM.iter('name')]
#print(id, names)
assert len(names) == 2
mapPmId[id] = ('EPSG', get_epsg_code(names[1]))
return mapPmId
def extract_id_from_href(txt):
assert '#' in txt
return txt[txt.rfind('#')+1:]
def ingest_datums(root, all_sql, mapEllpsId, mapPmId):
mapDatumId = {}
invalidDatumId = set()
mapVerticalDatumId = {}
for datum in root.iter('datum'):
node = datum.find('GeodeticDatum')
if node is not None:
id = node.attrib['id']
names = [_name.text for _name in node.iter('name')]
if len(names) == 2:
if id == 'REG0020002':
assert get_epsg_code(names[1]) == '6275'
print('Error in registry. it points to EPSG:6275 instead of EPSG:6807 for NTF Paris')
mapDatumId[id] = ('EPSG', '6807')
else:
mapDatumId[id] = ('EPSG', get_epsg_code(names[1]))
else:
assert len(names) == 1, names
pmNode = node.find('usesPrimeMeridian')
if pmNode is None or 'href' not in pmNode.attrib:
print('Invalid GeodeticDatum: ' + id)
invalidDatumId.add(id)
continue
pmCode = extract_id_from_href(pmNode.attrib['href'])
assert pmCode in mapPmId
ellpsCode = extract_id_from_href(node.find('usesEllipsoid').attrib['href'])
assert ellpsCode in mapEllpsId
# We sheat by using EPSG:1262 = World for area of use
sql = """INSERT INTO "geodetic_datum" VALUES('IGNF','%s','%s',NULL,NULL,'%s','%s','%s','%s','EPSG','1262',NULL,0);""" % (id, names[0], mapEllpsId[ellpsCode][0], mapEllpsId[ellpsCode][1], mapPmId[pmCode][0], mapPmId[pmCode][1])
all_sql.append(sql)
mapDatumId[id] = ('IGNF', id)
else:
node = datum.find('VerticalDatum')
if node is not None:
id = node.attrib['id']
names = [_name.text for _name in node.iter('name')]
sql = """INSERT INTO "vertical_datum" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262',NULL,0);"""% (id, names[0])
all_sql.append(sql)
mapVerticalDatumId[id] = ('IGNF', id)
else:
assert False
return mapDatumId, mapVerticalDatumId, invalidDatumId
mapEllpsId = ingest_ellipsoids(root, all_sql)
mapPmId = ingest_prime_meridians(root, all_sql)
mapDatumId, mapVerticalDatumId, invalidDatumId = ingest_datums(root, all_sql, mapEllpsId, mapPmId)
areaOfUseMap = {}
def get_area_of_use(domainOfValidity):
extent = domainOfValidity.find('EX_Extent')
desc = extent.find('description').find('CharacterString').text
if desc is None:
return 'EPSG', '1262'
if desc in areaOfUseMap:
return areaOfUseMap[desc]
geographicElement = extent.find('geographicElement')
if geographicElement is None:
print('No geographicElement for area of use ' + desc)
return 'EPSG', '1262'
code = str(len(areaOfUseMap) + 1)
areaOfUseMap[desc] = ['IGNF', code ]
EX_GeographicBoundingBox = geographicElement.find('EX_GeographicBoundingBox')
south = EX_GeographicBoundingBox.find('southBoundLatitude').find('Decimal').text
west = EX_GeographicBoundingBox.find('westBoundLongitude').find('Decimal').text
north = EX_GeographicBoundingBox.find('northBoundLatitude').find('Decimal').text
east = EX_GeographicBoundingBox.find('eastBoundLongitude').find('Decimal').text
all_sql.append("""INSERT INTO "area" VALUES('IGNF','%s','%s','%s',%s,%s,%s,%s,0);""" % (code, escape_literal(desc), escape_literal(desc), south, north, west, east))
return areaOfUseMap[desc]
mapCrsId = {}
mapGeocentricId = {}
# This is a trick to find a GeocentricCRS and its related GeographicCRS
# We could use the name, but if we use the datum code + area of use, it is
# more reliable
# We need this since the registry only exposes GeocentricCRS <--> GeocentricCRS
# transformations, and we need to port them to GeographicCRS as well
mapDatumAndAreaToGeocentricId = {}
mapGeocentricIdToDatumAndArea = {}
aliasOfCRS = {}
for node in root.iterfind('.//GeocentricCRS'):
id = node.attrib['id']
names = [_name.text for _name in node.iter('name')]
name = names[0]
cartesianCS = extract_id_from_href(node.find('usesCartesianCS').attrib['href'])
assert cartesianCS == 'TYP_CRG10'
datumCode = extract_id_from_href(node.find('usesGeodeticDatum').attrib['href'])
if datumCode in invalidDatumId:
print('Skipping GeocentricCRS %s since its datum is unknown' % id)
continue
assert datumCode in mapDatumId, (id, name, datumCode)
area_of_use = get_area_of_use(node.find('domainOfValidity'))
#sql = """INSERT INTO "crs" VALUES('IGNF','%s','geocentric');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "geodetic_crs" VALUES('IGNF','%s','%s',NULL,NULL,'geocentric','EPSG','6500','%s','%s','%s','%s',NULL,0);""" % (id, name, mapDatumId[datumCode][0], mapDatumId[datumCode][1], area_of_use[0], area_of_use[1])
all_sql.append(sql)
mapCrsId[id] = ('IGNF', id)
mapGeocentricId[id] = ('IGNF', id)
if len(names) >= 2 and names[1].startswith('http://registre.ign.fr/ign/IGNF/crs/IGNF/'):
alias = names[1][len('http://registre.ign.fr/ign/IGNF/crs/IGNF/'):]
aliasOfCRS[id] = [('IGNF', alias)]
if id == 'WGS84':
aliasOfCRS[id].append(('EPSG', '4978'))
#sql = """INSERT INTO "crs" VALUES('IGNF','%s','geocentric'); -- alias of %s""" % (alias, id)
#all_sql.append(sql)
sql = """INSERT INTO "geodetic_crs" VALUES('IGNF','%s','%s',NULL,NULL,'geocentric','EPSG','6500','%s','%s','%s','%s',NULL,0);""" % (alias, name, mapDatumId[datumCode][0], mapDatumId[datumCode][1], area_of_use[0], area_of_use[1])
all_sql.append(sql)
key = str((mapDatumId[datumCode], area_of_use))
assert key not in mapDatumAndAreaToGeocentricId, (id, name)
mapDatumAndAreaToGeocentricId[key] = ('IGNF', id)
mapGeocentricIdToDatumAndArea[id] = key
mapGeographicId = {}
mapDatumAndAreaToGeographicId = {}
for node in root.iterfind('.//GeographicCRS'):
id = node.attrib['id']
names = [_name.text for _name in node.iter('name')]
name = names[0]
ellipsoidalCS = extract_id_from_href(node.find('usesEllipsoidalCS').attrib['href'])
assert ellipsoidalCS in ('TYP_CRG24', 'TYP_CRG26', 'TYP_CRG22', 'TYP_CRG28', 'TYP_CRG29'), (id, name, ellipsoidalCS)
datumCode = extract_id_from_href(node.find('usesGeodeticDatum').attrib['href'])
if datumCode in invalidDatumId:
print('Skipping GeographicCRS %s since its datum is unknown' % id)
continue
assert datumCode in mapDatumId, (id, name, datumCode)
area_of_use = get_area_of_use(node.find('domainOfValidity'))
csCode = None
type = 'geographic 2D'
if ellipsoidalCS in ('TYP_CRG24', 'TYP_CRG28'): # Long, Lat deg
csCode = '6424'
if ellipsoidalCS in ('TYP_CRG26', 'TYP_CRG29'): # Long, Lat deg, h m
csCode = '6426'
type = 'geographic 3D'
if ellipsoidalCS == 'TYP_CRG22': # Long, Lat grad
csCode = '6425'
#sql = """INSERT INTO "crs" VALUES('IGNF','%s','%s');""" % (id, type)
#all_sql.append(sql)
sql = """INSERT INTO "geodetic_crs" VALUES('IGNF','%s','%s',NULL,NULL,'%s','EPSG','%s','%s','%s','%s','%s',NULL,0);""" % (id, name, type, csCode, mapDatumId[datumCode][0], mapDatumId[datumCode][1], area_of_use[0], area_of_use[1])
all_sql.append(sql)
if id == 'WGS84G':
aliasOfCRS[id] = [('EPSG','4326')]
if len(names) >= 2 and names[1].startswith('http://registre.ign.fr/ign/IGNF/crs/IGNF/'):
alias = names[1][len('http://registre.ign.fr/ign/IGNF/crs/IGNF/'):]
assert id != 'WGS84G'
aliasOfCRS[id] = [('IGNF', alias)]
#sql = """INSERT INTO "crs" VALUES('IGNF','%s','%s'); -- alias of %s""" % (alias, type, id)
#all_sql.append(sql)
sql = """INSERT INTO "geodetic_crs" VALUES('IGNF','%s','%s',NULL,NULL,'%s','EPSG','%s','%s','%s','%s','%s',NULL,0);""" % (alias, name, type, csCode, mapDatumId[datumCode][0], mapDatumId[datumCode][1], area_of_use[0], area_of_use[1])
all_sql.append(sql)
mapCrsId[id] = ('IGNF', id)
mapGeographicId[id] = ('IGNF', id)
key = str((mapDatumId[datumCode], area_of_use))
if key in mapDatumAndAreaToGeographicId:
#print('Adding ' + id + ' to ' + str(mapDatumAndAreaToGeographicId[key]))
mapDatumAndAreaToGeographicId[key].append(id)
else:
mapDatumAndAreaToGeographicId[key] = [id]
# Create a 2D version to be able to create compoundCRS with it
if id == 'RGWF96GEO':
id = 'RGWF96G'
csCode = '6424'
type = 'geographic 2D'
#sql = """INSERT INTO "crs" VALUES('IGNF','%s','%s');""" % (id, type)
#all_sql.append(sql)
sql = """INSERT INTO "geodetic_crs" VALUES('IGNF','%s','%s',NULL,NULL,'%s','EPSG','%s','%s','%s','%s','%s',NULL,0);""" % (id, name, type, csCode, mapDatumId[datumCode][0], mapDatumId[datumCode][1], area_of_use[0], area_of_use[1])
all_sql.append(sql)
mapCrsId[id] = ('IGNF', id)
mapGeographicId[id] = ('IGNF', id)
key = str((mapDatumId[datumCode], area_of_use))
if key in mapDatumAndAreaToGeographicId:
#print('Adding ' + id + ' to ' + str(mapDatumAndAreaToGeographicId[key]))
mapDatumAndAreaToGeographicId[key].append(id)
else:
mapDatumAndAreaToGeographicId[key] = [id]
mapVerticalCrsId = {}
for node in root.iterfind('.//VerticalCRS'):
id = node.attrib['id']
names = [_name.text for _name in node.iter('name')]
name = names[0]
verticalCS = extract_id_from_href(node.find('usesVerticalCS').attrib['href'])
assert verticalCS in ('TYP_CRG92','TYP_CRG91'), verticalCS
datumCode = extract_id_from_href(node.find('usesVerticalDatum').attrib['href'])
assert datumCode in mapVerticalDatumId, (id, name, datumCode)
# VerticalCRS and GeocentricCRS can have same IDs ! like 'STPM50'
id_modified = id
if id in mapCrsId:
print('VerticalCRS %s conflicts with a Geodetic one of same name. Appending _V for disambiguation'% id)
id_modified += '_V'
#sql = """INSERT INTO "crs" VALUES('IGNF','%s','vertical');""" % (id_modified)
#all_sql.append(sql)
area_of_use = get_area_of_use(node.find('domainOfValidity'))
sql = """INSERT INTO "vertical_crs" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','6499','%s','%s','%s','%s',0);""" % (id_modified, name, mapVerticalDatumId[datumCode][0], mapVerticalDatumId[datumCode][1], area_of_use[0], area_of_use[1])
all_sql.append(sql)
if len(names) >= 2 and names[1].startswith('http://registre.ign.fr/ign/IGNF/crs/IGNF/'):
assert False
mapCrsId[id] = ('IGNF', id_modified)
mapVerticalCrsId[id] = ('IGNF', id_modified)
mapGridURLs = {
# France metropole
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/RAF09.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/metropole/RAF09.mnt',
# Corse
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/RAC09.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/metropole/RAC09.mnt',
# Guadeloupe RGAF09
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_gtbt.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RAGTBT2016.mnt',
'RAGTBT2016.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RAGTBT2016.mnt',
# Les Saintes RGAF09
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_ls.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RALS2016.mnt',
'RALS2016.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RALS2016.mnt',
# Martinique RGAF09
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_mart.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RAMART2016.mnt',
'RAMART2016.MNT':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RAMART2016.mnt',
# Marie Galante RGAF09
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_mg.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RAMG2016.mnt',
'RAMG2016.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RAMG2016.mnt',
# Saint Barthelemy RGAF09
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_sb.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_sbv2.mnt',
'gg10_sbv2.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_sbv2.mnt',
# Saint Martin RGAF09
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_sm.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_smv2.mnt',
'gg10_smv2.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_smv2.mnt',
# La Desirade RGAF09
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/gg10_ld.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RALD2016.mnt',
'RALD2016.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RALD2016.mnt',
# Guadeloupe WGS84
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggg00.txt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggg00v2.mnt',
# Les Saintes WGS84
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggg00_ls.txt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggg00_lsv2.mnt',
'ggg00_lsv2.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggg00_lsv2.mnt',
# Martinique WGS84
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggm00.txt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggm00v2.mnt',
# Saint Barthelemy WGS84
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggg00_sb.txt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggg00_sbv2.mnt',
# Saint Martin WGS84
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggg00_sm.txt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggg00_smv2.mnt',
# La Desirade WGS84
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggg00_ld.txt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RALDW842016.mnt',
'RALDW842016.mnt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RALDW842016.mnt',
# Guyane RGF95
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggguy00.txt':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/ggguy15.mnt',
# Reunion grille RAR
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/RAR07_bl.gra':
'http://geodesie.ign.fr/contenu/fichiers/documentation/grilles/outremer/RAR07_bl.gra',
}
setVerticalGrids = set()
for node in root.iterfind('.//Transformation'):
id = node.attrib['id']
names = [_name.text for _name in node.iter('name')]
name = names[0]
sourceCRS = extract_id_from_href(node.find('sourceCRS').attrib['href'])
if not sourceCRS in mapCrsId:
print('Skipping ' + name + ', missing sourceCRS')
continue
targetCRS = node.find('targetCRS')
if targetCRS is None or 'href' not in targetCRS.attrib:
print('Skipping ' + name + ', missing targetCRS')
continue
targetCRS = extract_id_from_href(targetCRS.attrib['href'])
if not targetCRS in mapCrsId:
print('Skipping ' + name + ', missing targetCRS')
continue
operation_version = node.find('operationVersion').text
scope = node.find('scope').text
area_of_use = get_area_of_use(node.find('domainOfValidity'))
usesMethod = extract_id_from_href(node.find('usesMethod').attrib['href'])
if usesMethod in ('Geographic3DtoGravityRelatedHeight_IGN'):
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','grid_transformation');""" % id
#all_sql.append(sql)
usesValue = node.find('usesValue')
paramValue = usesValue.find('ParameterValue')
filename = paramValue.find('valueFile').text
if filename in mapGridURLs:
print('Fixing URL of ' + filename + ' to ' + mapGridURLs[filename])
filename = mapGridURLs[filename]
if not filename.endswith('RAF09.mnt') and not filename.endswith('ggspm06v1.mnt'): # no longer available
r = requests.head(filename, allow_redirects = True )
if r.status_code not in (200, 302):
assert False, (r.status_code, id, name, filename)
setVerticalGrids.add(filename)
assert sourceCRS in mapVerticalCrsId, (id, name, sourceCRS)
assert targetCRS in mapGeographicId, (id, name, targetCRS)
# Switching source and target to be consistent with the EPSG practice and the naming of the method
name_components = name.split(' vers ')
name_inverted = name_components[1] + ' vers ' + name_components[0]
sql = """INSERT INTO "grid_transformation" VALUES('IGNF','%s','%s',NULL,'%s','EPSG','9664','Geographic3D to GravityRelatedHeight (IGN1997)','%s','%s','%s','%s','%s','%s',NULL,'EPSG','8666','Geoid (height correction) model file','%s',NULL,NULL,NULL,NULL,NULL,NULL,'%s',0);""" % (id, name_inverted, scope, mapCrsId[targetCRS][0], mapCrsId[targetCRS][1], mapCrsId[sourceCRS][0], mapCrsId[sourceCRS][1], area_of_use[0], area_of_use[1], filename, operation_version)
all_sql.append(sql)
continue
def get_alias_of(code):
if code in aliasOfCRS:
return [ ('IGNF', code) ] + aliasOfCRS[code]
return [ ('IGNF', code) ]
if id == 'TSG1240': # 'NTF geographiques Paris (gr) vers NTF GEOGRAPHIQUES GREENWICH (DMS)', 'from1Dto1D')
#print('Skipping ' + str((id, name)))
assert usesMethod == 'from1Dto1D', usesMethod
for src in get_alias_of(sourceCRS):
for target in get_alias_of(targetCRS):
custom_id = id
if not ((src == ('IGNF', sourceCRS) and target == ('IGNF', targetCRS))):
custom_id = id + '_' + src[0] + '_' + src[1] + '_TO_' + target[0] + '_' + target[1]
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','other_transformation');""" % custom_id
#all_sql.append(sql)
sql = """INSERT INTO "other_transformation" VALUES('IGNF','%s','%s',NULL,'%s','EPSG','9601','Longitude rotation','%s','%s','%s','%s','%s','%s',0.0,'EPSG','8602','Longitude offset',2.5969213,'EPSG','9105',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'%s',0);"""% (custom_id, name, scope, src[0], src[1], target[0], target[1], area_of_use[0], area_of_use[1], operation_version)
all_sql.append(sql)
continue
if usesMethod == 'TSGM510': # geocentric interpolation
id = 'NTFG_TO_RGF93G'
assert sourceCRS == 'NTF'
assert targetCRS == 'RGF93'
sourceCRS = 'NTFG'
targetCRS = 'RGF93G'
for src in get_alias_of(sourceCRS):
# As the transformation from RGF93G to WGS84 is a zero-translation helmert,
# we can also use the grid for NTF->WGS84. This makes the coordinate
# operation finder happier
for target in get_alias_of(targetCRS) + [('EPSG','4326')]:
custom_id = id
if not ((src == ('IGNF', sourceCRS) and target == ('IGNF', targetCRS))):
custom_id = src[0] + '_' + src[1] + '_TO_' + target[0] + '_' + target[1]
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','grid_transformation');""" % (custom_id)
#all_sql.append(sql)
sql = """INSERT INTO "grid_transformation" VALUES('IGNF','%s','%s',NULL,'%s','EPSG','9615','NTv2','%s','%s','%s','%s','%s','%s',NULL,'EPSG','8656','Latitude and longitude difference file','ntf_r93.gsb',NULL,NULL,NULL,NULL,NULL,NULL,'%s',0);""" % (custom_id, name, scope, src[0], src[1], target[0], target[1], area_of_use[0], area_of_use[1], operation_version)
all_sql.append(sql)
continue
if usesMethod == 'Vfrom1Dto1D':
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','other_transformation');""" % id
#all_sql.append(sql)
usesValue = node.find('usesValue')
paramValue = usesValue.find('ParameterValue')
value = paramValue.find('value').text
uom = paramValue.find('value').attrib['uom']
assert uom == 'm'
sql = """INSERT INTO "other_transformation" VALUES('IGNF','%s','%s',NULL,'%s','EPSG','9616','Vertical Offset','%s','%s','%s','%s','%s','%s',NULL,'EPSG','8603','Vertical Offset',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'%s',0);"""% (id, name, scope, mapCrsId[sourceCRS][0], mapCrsId[sourceCRS][1], mapCrsId[targetCRS][0], mapCrsId[targetCRS][1], area_of_use[0], area_of_use[1], value, operation_version)
all_sql.append(sql)
continue
assert usesMethod in ('TSGM120', 'TSGM110', 'TSGM112', 'TSGM111'), (id, name, usesMethod)
assert sourceCRS in mapGeocentricId
assert targetCRS in mapGeocentricId
vals =[val for val in node.iterfind('usesValue')]
assert len(vals) in (3,7)
x = vals[0].find('ParameterValue').find('value').text
assert vals[0].find('ParameterValue').find('value').attrib['uom'] == 'm'
y = vals[1].find('ParameterValue').find('value').text
assert vals[1].find('ParameterValue').find('value').attrib['uom'] == 'm'
z = vals[2].find('ParameterValue').find('value').text
assert vals[2].find('ParameterValue').find('value').attrib['uom'] == 'm'
if len(vals) == 3:
rx = 'NULL'
ry = 'NULL'
rz = 'NULL'
s = 'NULL'
r_uom_auth_name = 'NULL'
r_uom_code = 'NULL'
s_uom_auth_name = 'NULL'
s_uom_code = 'NULL'
method_code = "'1031'"
method_name = "'Geocentric translations (geocentric domain)'"
method_geog_code = "'9603'"
method_geog_name = "'Geocentric translations (geog2D domain)'"
else:
s = vals[3].find('ParameterValue').find('value').text
assert vals[3].find('ParameterValue').find('value').attrib['uom'] == 'UNITE'
rx = vals[4].find('ParameterValue').find('value').text
assert vals[4].find('ParameterValue').find('value').attrib['uom'] == 'sec'
ry = vals[5].find('ParameterValue').find('value').text
assert vals[5].find('ParameterValue').find('value').attrib['uom'] == 'sec'
rz = vals[6].find('ParameterValue').find('value').text
assert vals[6].find('ParameterValue').find('value').attrib['uom'] == 'sec'
r_uom_auth_name = "'EPSG'"
r_uom_code = "'9104'"
s_uom_auth_name = "'EPSG'"
s_uom_code = "'9202'"
method_code = "'1033'"
method_name = "'Position Vector transformation (geocentric domain)'"
method_geog_code = "'9606'"
method_geog_name = "'Position Vector transformation (geog2D domain)'"
for src in get_alias_of(sourceCRS):
for target in get_alias_of(targetCRS):
custom_id = id
if not ((src == ('IGNF', sourceCRS) and target == ('IGNF', targetCRS))):
custom_id += '_' + src[1] + '_' + target[1]
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','helmert_transformation');""" % (custom_id)
#all_sql.append(sql)
sql = """INSERT INTO "helmert_transformation" VALUES('IGNF','%s','%s',NULL,'%s','EPSG',%s,%s,'%s','%s','%s','%s','%s','%s',NULL,%s,%s,%s,'EPSG','9001',%s,%s,%s,%s,%s,%s,%s, %s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'%s',0);""" % (custom_id, name, scope, method_code, method_name, src[0], src[1], target[0], target[1], area_of_use[0], area_of_use[1], x, y, z, rx, ry, rz, r_uom_auth_name, r_uom_code, s, s_uom_auth_name, s_uom_code, operation_version)
all_sql.append(sql)
key = mapGeocentricIdToDatumAndArea[sourceCRS]
assert key in mapDatumAndAreaToGeographicId
sourceGeogIdAr = mapDatumAndAreaToGeographicId[key]
key = mapGeocentricIdToDatumAndArea[targetCRS]
assert key in mapDatumAndAreaToGeographicId
targetGeogIdAr = mapDatumAndAreaToGeographicId[key]
for sourceGeogId in sourceGeogIdAr:
for targetGeogId in targetGeogIdAr:
for src in get_alias_of(sourceGeogId):
for target in get_alias_of(targetGeogId):
id_geog = id + '_' + src[1] + '_TO_' + target[1]
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','helmert_transformation');""" % (id_geog)
#all_sql.append(sql)
sql = """INSERT INTO "helmert_transformation" VALUES('IGNF','%s','%s',NULL,'%s','EPSG',%s,%s,'%s','%s','%s','%s','%s','%s',NULL,%s,%s,%s,'EPSG','9001',%s,%s,%s,%s,%s,%s,%s, %s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'%s',0);""" % (id_geog, name, scope, method_geog_code, method_geog_name, src[0], src[1], target[0], target[1], area_of_use[0], area_of_use[1], x, y, z, rx, ry, rz, r_uom_auth_name, r_uom_code, s, s_uom_auth_name, s_uom_code, operation_version)
all_sql.append(sql)
if src[1] == 'NTFG':
for NTFPalias, idFirstOp in (('NTFPGRAD', 'TSG1240'), ('NTFP', 'TSG1240_IGNF_NTFP_TO_IGNF_NTFG')):
id_concat = id + '_' + NTFPalias + '_TO_' + target[1]
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','concatenated_operation');""" % (id_concat)
#all_sql_concat.append(sql)
sql = """INSERT INTO "concatenated_operation" VALUES('IGNF','%s','Nouvelle Triangulation Francaise Paris grades to %s',NULL,'%s','IGNF','%s','%s','%s','%s','%s',NULL,'%s',0);""" % (id_concat, target[1], scope, NTFPalias, target[0], target[1], area_of_use[0], area_of_use[1], operation_version)
all_sql_concat.append(sql)
sql = """INSERT INTO "concatenated_operation_step" VALUES('IGNF','%s',1,'IGNF','%s');""" % (id_concat, idFirstOp)
all_sql_concat.append(sql)
sql = """INSERT INTO "concatenated_operation_step" VALUES('IGNF','%s',2,'IGNF','%s');""" % (id_concat, id_geog)
all_sql_concat.append(sql)
mapConversionId = {}
def getParameter(node, code, expected_uom):
for val in node.iterfind('usesValue'):
parameter_code = extract_id_from_href(val.find('ParameterValue').find('valueOfParameter').attrib['href'])
if parameter_code == code:
dms = val.find('ParameterValue').find('dmsAngleValue')
if expected_uom == 'deg' and dms is not None:
deg_val = float(dms.find('degrees').text)
direction_deg = dms.find('degrees').attrib['direction']
assert direction_deg in ('E', 'W', 'S', 'N')
min_val = float(dms.find('minutes').text)
if dms.find('secondes') is not None:
sec_val = float(dms.find('secondes').text)
else:
sec_val = 0
ret_val = deg_val + min_val / 60.0 + sec_val / 3600.0
if direction_deg in ('W', 'S'):
ret_val = -ret_val
return ret_val
assert val.find('ParameterValue').find('value').attrib['uom'] == expected_uom
return float(val.find('ParameterValue').find('value').text)
raise Exception('cannot find value for parameter ' + code)
for node in root.iterfind('.//Conversion'):
id = node.attrib['id']
names = [_name.text for _name in node.iter('name')]
name = names[0]
#print(id, name)
reuse_epsg_conversion = False ###
if reuse_epsg_conversion and len(names) == 2:
if id == 'PRC9601581': # PSEUDO MERCATOR (POPULAR VISUALISATION)
assert get_epsg_code(names[1]) == '3857' # this is wrong, this is the projectedCRS code, note the conversion one
mapConversionId[id] = ('EPSG', '3856')
else:
mapConversionId[id] = ('EPSG', get_epsg_code(names[1]))
continue
usesMethod = extract_id_from_href(node.find('usesMethod').attrib['href'])
d = {}
if usesMethod == 'PVPM001From2Dto2D': # Popular Visualisation Pseudo-Mercator
assert len([1 for val in node.iterfind('usesValue')]) == 4
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'deg')
d['lat_0'] = getParameter(node, 'PRCP400', 'deg')
assert d['x_0'] == 0
assert d['y_0'] == 0
assert d['lon_0'] == 0
assert d['lat_0'] == 0
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','EPSG','1024','Popular Visualisation Pseudo Mercator','EPSG','8801','Latitude of natural origin',0.0,'EPSG','9102','EPSG','8802','Longitude of natural origin',0.0,'EPSG','9102','EPSG','8806','False easting',0.0,'EPSG','9001','EPSG','8807','False northing',0.0,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name)
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod == 'EQRC001from2Dto2D': # Equirectangular
assert len([1 for val in node.iterfind('usesValue')]) == 5
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'deg')
d['lat_0'] = getParameter(node, 'PRCP400', 'deg')
d['lat_ts'] = getParameter(node, 'PRCP600', 'deg')
assert d['lat_0'] == 0, (id, name, d)
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','EPSG','1028','Equidistant Cylindrical','EPSG','8823','Latitude of 1st standard parallel',%s,'EPSG','9102','EPSG','8802','Longitude of natural origin',%s,'EPSG','9102','EPSG','8806','False easting',%s,'EPSG','9001','EPSG','8807','False northing',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name, d['lat_ts'], d['lon_0'], d['x_0'], d['y_0'])
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod in ('PRCM030from2Dto2D', 'PRCM020from2Dto2D', 'PRCM040from2Dto2D', 'PRCM030from3Dto2D'): # Transverse Mercator
assert len([1 for val in node.iterfind('usesValue')]) == 5
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'deg')
d['lat_0'] = getParameter(node, 'PRCP400', 'deg')
d['k_0'] = getParameter(node, 'PRCP500', 'UNITE')
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1886','EPSG','9807','Transverse Mercator','EPSG','8801','Latitude of natural origin',%s,'EPSG','9102','EPSG','8802','Longitude of natural origin',%s,'EPSG','9102','EPSG','8805','Scale factor at natural origin',%s,'EPSG','9201','EPSG','8806','False easting',%s,'EPSG','9001','EPSG','8807','False northing',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name, d['lat_0'], d['lon_0'], d['k_0'], d['x_0'], d['y_0'])
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod == 'PRCM060from2Dto2D': # Bonne
assert len([1 for val in node.iterfind('usesValue')]) == 6
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'gr')
d['lat_0'] = getParameter(node, 'PRCP400', 'gr')
d['k_0'] = getParameter(node, 'PRCP500', 'UNITE')
d['lat_1'] = getParameter(node, 'PRCP600', 'gr')
assert d['lat_0'] == d['lat_1']
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','EPSG','9827','Bonne','EPSG','8801','Latitude of natural origin',%s,'EPSG','9105','EPSG','8802','Longitude of natural origin',%s,'EPSG','9105','EPSG','8806','False easting',%s,'EPSG','9001','EPSG','8807','False northing',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name, d['lat_0'], d['lon_0'], d['x_0'], d['y_0'])
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod == 'PRCM015from2Dto2D': # LAEA
assert len([1 for val in node.iterfind('usesValue')]) == 5
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'deg')
d['lat_0'] = getParameter(node, 'PRCP400', 'deg')
d['k_0'] = getParameter(node, 'PRCP500', 'UNITE')
assert d['k_0'] == 1
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','EPSG','9820','Lambert Azimuthal Equal Area','EPSG','8801','Latitude of natural origin',%s,'EPSG','9102','EPSG','8802','Longitude of natural origin',%s,'EPSG','9102','EPSG','8806','False easting',%s,'EPSG','9001','EPSG','8807','False northing',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name, d['lat_0'], d['lon_0'], d['x_0'], d['y_0'])
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod == 'PRCM013from2Dto2D': # LCC_2SP
assert len([1 for val in node.iterfind('usesValue')]) == 6
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'deg')
d['lat_0'] = getParameter(node, 'PRCP400', 'deg')
d['lat_1'] = getParameter(node, 'PRCP600', 'deg')
d['lat_2'] = getParameter(node, 'PRCP700', 'deg')
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','EPSG','9802','Lambert Conic Conformal (2SP)','EPSG','8821','Latitude of false origin',%s,'EPSG','9102','EPSG','8822','Longitude of false origin',%s,'EPSG','9102','EPSG','8823','Latitude of 1st standard parallel',%s,'EPSG','9102','EPSG','8824','Latitude of 2nd standard parallel',%s,'EPSG','9102','EPSG','8826','Easting at false origin',%s,'EPSG','9001','EPSG','8827','Northing at false origin',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name, d['lat_0'], d['lon_0'], d['lat_1'], d['lat_2'], d['x_0'], d['y_0'])
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod == 'PRCM070from2Dto2D': # Mercator (variant A)
assert len([1 for val in node.iterfind('usesValue')]) == 5
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'deg')
d['lat_0'] = getParameter(node, 'PRCP400', 'deg')
d['k_0'] = getParameter(node, 'PRCP500', 'UNITE')
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','EPSG','9804','Mercator (variant A)','EPSG','8801','Latitude of natural origin',%s,'EPSG','9102','EPSG','8802','Longitude of natural origin',%s,'EPSG','9102','EPSG','8805','Scale factor at natural origin',%s,'EPSG','9201','EPSG','8806','False easting',%s,'EPSG','9001','EPSG','8807','False northing',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name, d['lat_0'], d['lon_0'], d['k_0'], d['x_0'], d['y_0'])
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod in ('PRCM012from2Dto2D', 'PRCM012from3Dto2D'): # LCC_1SP
assert len([1 for val in node.iterfind('usesValue')]) == 5
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'gr')
d['lat_0'] = getParameter(node, 'PRCP400', 'gr')
d['k_0'] = getParameter(node, 'PRCP500', 'UNITE')
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','EPSG','9801','Lambert Conic Conformal (1SP)','EPSG','8801','Latitude of natural origin',%s,'EPSG','9105','EPSG','8802','Longitude of natural origin',%s,'EPSG','9105','EPSG','8805','Scale factor at natural origin',%s,'EPSG','9201','EPSG','8806','False easting',%s,'EPSG','9001','EPSG','8807','False northing',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name, d['lat_0'], d['lon_0'], d['k_0'], d['x_0'], d['y_0'])
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod in ('PRCM014from2Dto2D'): # LCC_1SP
assert len([1 for val in node.iterfind('usesValue')]) == 5
d['x_0'] = getParameter(node, 'PRCP110', 'm')
d['y_0'] = getParameter(node, 'PRCP210', 'm')
d['lon_0'] = getParameter(node, 'PRCP310', 'gr')
d['lat_0'] = getParameter(node, 'PRCP410', 'gr')
d['k_0'] = getParameter(node, 'PRCP510', 'UNITE')
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','EPSG','9801','Lambert Conic Conformal (1SP)','EPSG','8801','Latitude of natural origin',%s,'EPSG','9105','EPSG','8802','Longitude of natural origin',%s,'EPSG','9105','EPSG','8805','Scale factor at natural origin',%s,'EPSG','9201','EPSG','8806','False easting',%s,'EPSG','9001','EPSG','8807','False northing',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name, d['lat_0'], d['lon_0'], d['k_0'], d['x_0'], d['y_0'])
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod == 'PRCM053from2Dto2D': # Gauss Schreiber Transverse Mercator
assert len([1 for val in node.iterfind('usesValue')]) == 5
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'deg')
d['lat_0'] = getParameter(node, 'PRCP400', 'deg')
d['k_0'] = getParameter(node, 'PRCP500', 'UNITE')
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','PROJ','gstm','Gauss Schreiber Transverse Mercator','EPSG','8801','Latitude of natural origin',%s,'EPSG','9102','EPSG','8802','Longitude of natural origin',%s,'EPSG','9102','EPSG','8805','Scale factor at natural origin',%s,'EPSG','9201','EPSG','8806','False easting',%s,'EPSG','9001','EPSG','8807','False northing',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name, d['lat_0'], d['lon_0'], d['k_0'], d['x_0'], d['y_0'])
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod == 'PRCM094from2Dto2D': # Polar Stereographic
assert len([1 for val in node.iterfind('usesValue')]) == 5
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'deg')
d['lat_0'] = getParameter(node, 'PRCP400', 'deg')
d['k_0'] = getParameter(node, 'PRCP500', 'UNITE')
assert float(d['lat_0']) == -90
assert float(d['lon_0']) == 140
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','EPSG','9810','Polar Stereographic (variant A)','EPSG','8801','Latitude of natural origin',%s,'EPSG','9102','EPSG','8802','Longitude of natural origin',%s,'EPSG','9102','EPSG','8805','Scale factor at natural origin',%s,'EPSG','9201','EPSG','8806','False easting',%s,'EPSG','9001','EPSG','8807','False northing',%s,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id, name, d['lat_0'], d['lon_0'], d['k_0'], d['x_0'], d['y_0'])
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod == 'MILL001from2Dto2D': # Miller
assert len([1 for val in node.iterfind('usesValue')]) == 4
d['x_0'] = getParameter(node, 'PRCP100', 'm')
d['y_0'] = getParameter(node, 'PRCP200', 'm')
d['lon_0'] = getParameter(node, 'PRCP300', 'deg')
d['lat_0'] = getParameter(node, 'PRCP400', 'deg')
assert d['x_0'] == 0
assert d['y_0'] == 0
assert d['lon_0'] == 0
assert d['lat_0'] == 0
#sql = """INSERT INTO "coordinate_operation" VALUES('IGNF','%s','conversion');""" % (id)
#all_sql.append(sql)
sql = """INSERT INTO "conversion" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','1262','PROJ','mill','PROJ mill',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (id,name)
all_sql.append(sql)
mapConversionId[id] = ('IGNF', id)
elif usesMethod == 'PRCM095from2Dto2D':
print('Unhandled conversion PRCM095from2Dto2D = Polar Sterographic (Variant C) %s' % (str((id, name, usesMethod))))
continue
else:
print('Unknown conversion %s' % (str((id, name, usesMethod))))
assert False
mapProjectedId = {}
for node in root.iterfind('.//ProjectedCRS'):
id = node.attrib['id']
names = [_name.text for _name in node.iter('name')]
name = names[0]
usesCartesianCS = extract_id_from_href(node.find('usesCartesianCS').attrib['href'])
assert usesCartesianCS in ('TYP_CRG32', 'TYP_CRG70', 'TYP_CRG34'), (id, name, usesCartesianCS)
baseGeographicCRS = extract_id_from_href(node.find('baseGeographicCRS').attrib['href'])
if baseGeographicCRS not in mapGeographicId:
print('Skipping ProjectedCRS %s since its baseGeographicCRS %s is unknown' % (id, baseGeographicCRS))
continue
definedByConversion = extract_id_from_href(node.find('definedByConversion').attrib['href'])
if definedByConversion in ('PRC0909577'):
print('Skipping ProjectedCRS %s since its definedByConversion %s is unhandled' % (id, definedByConversion))
continue
assert definedByConversion in mapConversionId, (id, name, definedByConversion)
area_of_use = get_area_of_use(node.find('domainOfValidity'))
#sql = """INSERT INTO "crs" VALUES('IGNF','%s','projected');""" % (id, )
#all_sql.append(sql)
cs_code = 4499 # TYP_CRG32
if usesCartesianCS == 'TYP_CRG70':
cs_code = 4400
if usesCartesianCS == 'TYP_CRG34':
cs_code = 4530
sql = """INSERT INTO "projected_crs" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','%s','%s','%s','%s','%s','%s','%s',NULL,0);""" % (id,name,cs_code,mapGeographicId[baseGeographicCRS][0], mapGeographicId[baseGeographicCRS][1],mapConversionId[definedByConversion][0], mapConversionId[definedByConversion][1], area_of_use[0], area_of_use[1])
all_sql.append(sql)
if len(names) >= 2 and names[1].startswith('http://registre.ign.fr/ign/IGNF/crs/IGNF/'):
alias = names[1][len('http://registre.ign.fr/ign/IGNF/crs/IGNF/'):]
aliasOfCRS[id] = [('IGNF', alias)]
#sql = """INSERT INTO "crs" VALUES('IGNF','%s','projected'); -- alias of %s""" % (alias, id)
#all_sql.append(sql)
sql = """INSERT INTO "projected_crs" VALUES('IGNF','%s','%s',NULL,NULL,'EPSG','%s','%s','%s','%s','%s','%s','%s',NULL,0);""" % (alias,name,cs_code,mapGeographicId[baseGeographicCRS][0], mapGeographicId[baseGeographicCRS][1],mapConversionId[definedByConversion][0], mapConversionId[definedByConversion][1], area_of_use[0], area_of_use[1])
all_sql.append(sql)
mapProjectedId[id] = ('IGNF', id)
for node in root.iterfind('.//CompoundCRS'):
id = node.attrib['id']
names = [_name.text for _name in node.iter('name')]
name = names[0]
singleCRS = [extract_id_from_href(includesSingleCRS.attrib['href']) for includesSingleCRS in node.iter('includesSingleCRS')]
assert len(singleCRS) == 2
if singleCRS[0] == 'RGWF96GEO':
singleCRS[0] = 'RGWF96G'
assert singleCRS[0] in mapProjectedId or singleCRS[0] in mapGeographicId, (id, name)
assert singleCRS[1] in mapVerticalCrsId, (id, name, singleCRS[1])
if singleCRS[0] in mapProjectedId:
horiz = mapProjectedId[singleCRS[0]]
else:
horiz = mapGeographicId[singleCRS[0]]
area_of_use = get_area_of_use(node.find('domainOfValidity'))
#sql = """INSERT INTO "crs" VALUES('IGNF','%s','compound');""" % (id, )
#all_sql.append(sql)
sql = """INSERT INTO "compound_crs" VALUES('IGNF','%s','%s',NULL,NULL,'%s','%s','%s','%s','%s','%s',0);""" % (id,name,horiz[0], horiz[1],mapVerticalCrsId[singleCRS[1]][0], mapVerticalCrsId[singleCRS[1]][1], area_of_use[0], area_of_use[1])
all_sql.append(sql)
if len(names) >= 2 and names[1].startswith('http://registre.ign.fr/ign/IGNF/crs/IGNF/'):
assert False
all_sql.append('')
all_sql.append("""--- Grid alternatives""")
all_sql.append('')
all_sql.append("""INSERT INTO grid_alternatives(original_grid_name,
proj_grid_name,
proj_grid_format,
proj_method,
inverse_direction,
package_name,
url, direct_download, open_license, directory)
VALUES ('ntf_r93.gsb', -- as referenced by the IGNF registry
'ntf_r93.gsb',
'NTv2',
'hgridshift',
0,
'proj-datumgrid',
NULL, NULL, NULL, NULL);
""")
for grid in setVerticalGrids:
original_grid_name = grid
proj_grid_name = grid[grid.rfind('/')+1:].replace('.txt', '.gtx').replace('.mnt', '.gtx').replace('.gra', '.gtx')
all_sql.append("""INSERT INTO grid_alternatives(original_grid_name,
proj_grid_name,
proj_grid_format,
proj_method,
inverse_direction,
package_name,
url, direct_download, open_license, directory)
VALUES ('%s', -- as referenced by the IGNF registry
'%s',
'GTX',
'geoid_like',
0,
'proj-datumgrid-europe',
NULL, NULL, NULL, NULL);""" % (original_grid_name, proj_grid_name))
all_sql.append('')
all_sql.append("""--- Null transformations between RRAF and WGS84 adapted from EPSG""")
all_sql.append('')
area_of_use_name = 'ANTILLES FRANCAISES'
assert area_of_use_name in areaOfUseMap
area_of_use = areaOfUseMap[area_of_use_name]
all_sql.append("""INSERT INTO "helmert_transformation" VALUES('PROJ','IGNF_RRAF_TO_EPSG_4978','RRAF to WGS 84',NULL,NULL,'EPSG','1031','Geocentric translations (geocentric domain)','IGNF','RRAF','EPSG','4978','%s','%s',1.0,0.0,0.0,0.0,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (area_of_use[0], area_of_use[1]))
all_sql.append("""INSERT INTO "helmert_transformation" VALUES('PROJ','IGNF_RRAFG_TO_EPSG_4326','RRAFG to WGS 84',NULL,NULL,'EPSG','9603','Geocentric translations (geog2D domain)','IGNF','RRAFG','EPSG','4326','%s','%s',1.0,0.0,0.0,0.0,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (area_of_use[0], area_of_use[1]))
all_sql.append("""INSERT INTO "helmert_transformation" VALUES('PROJ','IGNF_RRAFGDD_TO_EPSG_4326','RRAFGDD to WGS 84',NULL,NULL,'EPSG','9603','Geocentric translations (geog2D domain)','IGNF','RRAFGDD','EPSG','4326','%s','%s',1.0,0.0,0.0,0.0,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (area_of_use[0], area_of_use[1]))
all_sql.append('')
all_sql.append("""--- Null transformations between RGF93 and WGS84 adapted from EPSG""")
all_sql.append('')
area_of_use_name = 'FRANCE METROPOLITAINE (CORSE COMPRISE)'
assert area_of_use_name in areaOfUseMap
area_of_use = areaOfUseMap[area_of_use_name]
all_sql.append("""INSERT INTO "helmert_transformation" VALUES('PROJ','IGNF_RGF93_TO_EPSG_4978','RGF93 to WGS 84',NULL,NULL,'EPSG','1031','Geocentric translations (geocentric domain)','IGNF','RGF93','EPSG','4978','%s','%s',1.0,0.0,0.0,0.0,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (area_of_use[0], area_of_use[1]))
all_sql.append("""INSERT INTO "helmert_transformation" VALUES('PROJ','IGNF_RGF93G_TO_EPSG_4326','RGF93G to WGS 84',NULL,NULL,'EPSG','9603','Geocentric translations (geog2D domain)','IGNF','RGF93G','EPSG','4326','%s','%s',1.0,0.0,0.0,0.0,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (area_of_use[0], area_of_use[1]))
all_sql.append("""INSERT INTO "helmert_transformation" VALUES('PROJ','IGNF_RGF93GDD_TO_EPSG_4326','RGF93GDD to WGS 84',NULL,NULL,'EPSG','9603','Geocentric translations (geog2D domain)','IGNF','RGF93GDD','EPSG','4326','%s','%s',1.0,0.0,0.0,0.0,'EPSG','9001',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0);""" % (area_of_use[0], area_of_use[1]))
script_dir_name = os.path.dirname(os.path.realpath(__file__))
sql_dir_name = os.path.join(os.path.dirname(script_dir_name), 'data', 'sql')
f = open(os.path.join(sql_dir_name, 'ignf') + '.sql', 'wb')
f.write("--- This file has been generated by scripts/build_db_create_ignf_from_xml.py from the http://librairies.ign.fr/geoportail/resources/IGNF.xml definition file. DO NOT EDIT !\n\n".encode('UTF-8'))
for sql in all_sql:
f.write((sql + '\n').encode('UTF-8'))
comment = []
comment.append('')
comment.append("""--- Concatenated operations""")
comment.append('')
for sql in comment:
f.write((sql + '\n').encode('UTF-8'))
for sql in all_sql_concat:
f.write((sql + '\n').encode('UTF-8'))
f.close()
| 49.815425 | 619 | 0.623932 |
3f73b8b49f9ca7573e706080df70711d0a939ac8 | 883 | py | Python | clients/kratos/python/test/test_error_container.py | UkonnRa/sdk | 23ab5408a89cdf6ba7a6d8944f8d1b1cdc68aa4c | [
"Apache-2.0"
] | null | null | null | clients/kratos/python/test/test_error_container.py | UkonnRa/sdk | 23ab5408a89cdf6ba7a6d8944f8d1b1cdc68aa4c | [
"Apache-2.0"
] | null | null | null | clients/kratos/python/test/test_error_container.py | UkonnRa/sdk | 23ab5408a89cdf6ba7a6d8944f8d1b1cdc68aa4c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Ory Kratos
Welcome to the ORY Kratos HTTP API documentation! # noqa: E501
The version of the OpenAPI document: latest
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import ory_kratos_client
from ory_kratos_client.models.error_container import ErrorContainer # noqa: E501
from ory_kratos_client.rest import ApiException
class TestErrorContainer(unittest.TestCase):
"""ErrorContainer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testErrorContainer(self):
"""Test ErrorContainer"""
# FIXME: construct object with mandatory attributes with example values
# model = ory_kratos_client.models.error_container.ErrorContainer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.075 | 89 | 0.711212 |
bf9c1adf9db28f0c0c8d681957ef2f58486a3ea6 | 4,692 | py | Python | models/densenet.py | YZP17121579/network-slimming | 2682de727f8cecd5b3c1472969357971bf52195a | [
"MIT"
] | 1 | 2018-10-04T17:12:52.000Z | 2018-10-04T17:12:52.000Z | models/densenet.py | YZP17121579/network-slimming | 2682de727f8cecd5b3c1472969357971bf52195a | [
"MIT"
] | null | null | null | models/densenet.py | YZP17121579/network-slimming | 2682de727f8cecd5b3c1472969357971bf52195a | [
"MIT"
] | 1 | 2018-10-04T17:14:50.000Z | 2018-10-04T17:14:50.000Z | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .channel_selection import channel_selection
__all__ = ['densenet']
"""
densenet with basic block.
"""
class BasicBlock(nn.Module):
def __init__(self, inplanes, cfg, expansion=1, growthRate=12, dropRate=0):
super(BasicBlock, self).__init__()
planes = expansion * growthRate
self.bn1 = nn.BatchNorm2d(inplanes)
self.select = channel_selection(inplanes)
self.conv1 = nn.Conv2d(cfg, growthRate, kernel_size=3,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.dropRate = dropRate
def forward(self, x):
out = self.bn1(x)
out = self.select(out)
out = self.relu(out)
out = self.conv1(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
def __init__(self, inplanes, outplanes, cfg):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.select = channel_selection(inplanes)
self.conv1 = nn.Conv2d(cfg, outplanes, kernel_size=1,
bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.bn1(x)
out = self.select(out)
out = self.relu(out)
out = self.conv1(out)
out = F.avg_pool2d(out, 2)
return out
class densenet(nn.Module):
def __init__(self, depth=40,
dropRate=0, dataset='cifar10', growthRate=12, compressionRate=1, cfg = None):
super(densenet, self).__init__()
assert (depth - 4) % 3 == 0, 'depth should be 3n+4'
n = (depth - 4) // 3
block = BasicBlock
self.growthRate = growthRate
self.dropRate = dropRate
if cfg == None:
cfg = []
start = growthRate*2
for i in range(3):
cfg.append([start+12*i for i in range(n+1)])
start += growthRate*12
cfg = [item for sub_list in cfg for item in sub_list]
assert len(cfg) == 3*n+3, 'length of config variable cfg should be 3n+3'
# self.inplanes is a global variable used across multiple
# helper functions
self.inplanes = growthRate * 2
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1,
bias=False)
self.dense1 = self._make_denseblock(block, n, cfg[0:n])
self.trans1 = self._make_transition(compressionRate, cfg[n])
self.dense2 = self._make_denseblock(block, n, cfg[n+1:2*n+1])
self.trans2 = self._make_transition(compressionRate, cfg[2*n+1])
self.dense3 = self._make_denseblock(block, n, cfg[2*n+2:3*n+2])
self.bn = nn.BatchNorm2d(self.inplanes)
self.select = channel_selection(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
if dataset == 'cifar10':
self.fc = nn.Linear(cfg[-1], 10)
elif dataset == 'cifar100':
self.fc = nn.Linear(cfg[-1], 100)
# Weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(0.5)
m.bias.data.zero_()
def _make_denseblock(self, block, blocks, cfg):
layers = []
assert blocks == len(cfg), 'Length of the cfg parameter is not right.'
for i in range(blocks):
# Currently we fix the expansion ratio as the default value
layers.append(block(self.inplanes, cfg = cfg[i], growthRate=self.growthRate, dropRate=self.dropRate))
self.inplanes += self.growthRate
return nn.Sequential(*layers)
def _make_transition(self, compressionRate, cfg):
# cfg is a number in this case.
inplanes = self.inplanes
outplanes = int(math.floor(self.inplanes // compressionRate))
self.inplanes = outplanes
return Transition(inplanes, outplanes, cfg)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.dense3(x)
x = self.bn(x)
x = self.select(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x | 34 | 113 | 0.583973 |
26efa9cd9d7cae255eda7f9c5e0c5c204337c52a | 300 | py | Python | instagram/json_import.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 | 2020-04-03T22:15:44.000Z | 2022-02-26T05:22:55.000Z | instagram/json_import.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 6 | 2021-02-08T20:40:57.000Z | 2022-02-20T07:19:56.000Z | my-project-env/lib/python3.6/site-packages/instagram/json_import.py | wizzicollo/animated-fiesta | 92a449b4b632ece5a5c73b5344cafe02d8872586 | [
"MIT"
] | null | null | null | try:
import simplejson
except ImportError:
try:
import json as simplejson
except ImportError:
try:
from django.utils import simplejson
except ImportError:
raise ImportError('A json library is required to use this python library')
| 27.272727 | 87 | 0.63 |
acf6dede8bd2e838616ca2ac9069da77b8b0af50 | 2,024 | py | Python | hax/hax/queue/cli.py | papan-singh/cortx-hare | 4d6a533750dffe0b71c633a3707da79d9883b3dd | [
"Apache-2.0"
] | null | null | null | hax/hax/queue/cli.py | papan-singh/cortx-hare | 4d6a533750dffe0b71c633a3707da79d9883b3dd | [
"Apache-2.0"
] | null | null | null | hax/hax/queue/cli.py | papan-singh/cortx-hare | 4d6a533750dffe0b71c633a3707da79d9883b3dd | [
"Apache-2.0"
] | null | null | null | import logging
import sys
from typing import NamedTuple
import click
import inject
from hax.common import di_configuration
from hax.queue.publish import BQPublisher, EQPublisher, Publisher
AppCtx = NamedTuple('AppCtx', [('payload', str), ('type', str),
('publisher', Publisher)])
def _setup_logging():
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] %(message)s')
@click.command()
@click.argument('queue',
type=click.Choice(['eq', 'bq'], case_sensitive=False),
required=True)
@click.argument('type', type=str, required=True)
@click.argument('payload', type=str, required=True)
@click.pass_context
def parse_opts(ctx, queue: str, type: str, payload: str):
"""Send entry to target queue.
\b
QUEUE Name of the target queue. Supported values: "eq" (Event Queue), \
"bq" (Broadcast Queue).
TYPE Type of the entry.
PAYLOAD Entry payload encoded as JSON value.
"""
ctx.ensure_object(dict)
name = queue.lower()
types = {'eq': EQPublisher, 'bq': BQPublisher}
# We're lucky now because both constructors have the same zero count
# of arguments.
# If the things change, such oneliner must be refactored.
publisher: Publisher = types[name]()
ctx.obj['result'] = AppCtx(payload=payload,
type=type,
publisher=publisher)
return ctx.obj
def main():
_setup_logging()
inject.configure(di_configuration)
try:
raw_ctx = parse_opts(args=sys.argv[1:],
standalone_mode=False,
obj={})
if type(raw_ctx) is not dict:
exit(1)
app_context = raw_ctx['result']
pub = app_context.publisher
offset = pub.publish(app_context.type, app_context.payload)
logging.info('Written to epoch: %s', offset)
except Exception:
logging.exception('Exiting with failure')
| 31.138462 | 77 | 0.615613 |
0226e1eab3629f683c312a2f12f0fa38686de4ed | 2,197 | py | Python | nadypy/api/relay/get_relay_relay.py | Nadybot/nadypy | ae6cbb886d233fde491aee501cb72bf993f3a02f | [
"MIT"
] | null | null | null | nadypy/api/relay/get_relay_relay.py | Nadybot/nadypy | ae6cbb886d233fde491aee501cb72bf993f3a02f | [
"MIT"
] | null | null | null | nadypy/api/relay/get_relay_relay.py | Nadybot/nadypy | ae6cbb886d233fde491aee501cb72bf993f3a02f | [
"MIT"
] | null | null | null | from typing import Any, Dict, Optional, Union
import httpx
from ...client import AuthenticatedClient
from ...models.relay_config import RelayConfig
from ...types import Response
def _get_kwargs(
relay: str,
*,
client: AuthenticatedClient,
) -> Dict[str, Any]:
url = "{}/relay/{relay}".format(client.base_url, relay=relay)
headers: Dict[str, Any] = client.get_headers()
return {
"url": url,
"headers": headers,
"timeout": client.get_timeout(),
}
def _parse_response(*, response: httpx.Response) -> Optional[Union[Any, RelayConfig]]:
if response.status_code == 200:
response_200 = RelayConfig.from_dict(response.json())
return response_200
if response.status_code == 404:
response_404 = None
return response_404
return None
def _build_response(*, response: httpx.Response) -> Response[Union[Any, RelayConfig]]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
relay: str,
*,
client: AuthenticatedClient,
) -> Response[Union[Any, RelayConfig]]:
kwargs = _get_kwargs(
relay=relay,
client=client,
)
response = client.client.get(
**kwargs,
)
return _build_response(response=response)
def sync(
relay: str,
*,
client: AuthenticatedClient,
) -> Optional[Union[Any, RelayConfig]]:
"""Get a single relay"""
return sync_detailed(
relay=relay,
client=client,
).parsed
async def asyncio_detailed(
relay: str,
*,
client: AuthenticatedClient,
) -> Response[Union[Any, RelayConfig]]:
kwargs = _get_kwargs(
relay=relay,
client=client,
)
response = await client.async_client.get(**kwargs)
return _build_response(response=response)
async def asyncio(
relay: str,
*,
client: AuthenticatedClient,
) -> Optional[Union[Any, RelayConfig]]:
"""Get a single relay"""
return (
await asyncio_detailed(
relay=relay,
client=client,
)
).parsed
| 20.92381 | 86 | 0.634502 |
dc82cd5ae5343f8a9ef48f55edd931c506632cfe | 229 | py | Python | setup.py | showmethedatawiki/job_posts | 2c711aaa924feac82f317fd1f7c8433705ddbcfc | [
"MIT"
] | null | null | null | setup.py | showmethedatawiki/job_posts | 2c711aaa924feac82f317fd1f7c8433705ddbcfc | [
"MIT"
] | null | null | null | setup.py | showmethedatawiki/job_posts | 2c711aaa924feac82f317fd1f7c8433705ddbcfc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from job_posts import __version__
from distutils.core import setup
setup(name="Data Scrap",
version=__version__,
description="Job posts data scrapper",
author='Showmethedata.wiki'
)
| 22.9 | 44 | 0.71179 |
d2598310e852cf771be8a080cf30899ce8ddce05 | 18,623 | py | Python | benchmark/opperf/utils/op_registry_utils.py | lanking520/incubator-mxnet | 755541cfe8bd8b8ca0e91414be29345a4ffe8333 | [
"BSL-1.0",
"Apache-2.0"
] | 1 | 2021-05-07T10:49:37.000Z | 2021-05-07T10:49:37.000Z | benchmark/opperf/utils/op_registry_utils.py | lanking520/incubator-mxnet | 755541cfe8bd8b8ca0e91414be29345a4ffe8333 | [
"BSL-1.0",
"Apache-2.0"
] | null | null | null | benchmark/opperf/utils/op_registry_utils.py | lanking520/incubator-mxnet | 755541cfe8bd8b8ca0e91414be29345a4ffe8333 | [
"BSL-1.0",
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities to interact with MXNet operator registry."""
from operator import itemgetter
from mxnet import runtime
import mxnet as mx
from benchmark.opperf.rules.default_params import DEFAULTS_INPUTS, MX_OP_MODULE
def _select_ops(operator_names, filters=("_contrib", "_"), merge_op_forward_backward=True):
"""From a given list of operators, filter out all operator names starting with given filters and prepares
a dictionary of operator with attributes - 'has_backward' and 'nd_op_handle = mxnet.ndarray.op'
By default, merge forward and backward operators for a given op into one operator and sets the attribute
'has_backward' for the operator.
By default, filter out all Contrib operators that starts with '_contrib' and internal operators that
starts with '_'.
Note - All deprecated operators are filtered out as well.
Parameters
----------
operator_names: List[str]
List of operator names.
filters: Tuple(str)
Tuple of filters to apply on operator names.
merge_op_forward_backward: Boolean, Default - True
Merge forward and backward operators for a given op in to one op.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle"}}
"""
mx_operators = {}
operators_with_backward = []
# Filter out deprecated operators
filters += ("normal", "uniform", "BatchNorm_v1", "Flatten", "contrib_CTCLoss", "Pad", "Cast",
"Pooling_v1", "Concat", "Reshape", "Convolution_v1", "SliceChannel", "Crop",
"crop", "onehot_encode", "batch_take")
if merge_op_forward_backward:
filters += ("_backward",)
for cur_op_name in operator_names:
if not cur_op_name.startswith(filters):
mx_operators[cur_op_name] = {"has_backward": False,
"nd_op_handle": getattr(MX_OP_MODULE, cur_op_name)}
if cur_op_name.startswith("_backward_"):
operators_with_backward.append(cur_op_name)
if merge_op_forward_backward:
# Identify all operators that can run backward.
for op_with_backward in operators_with_backward:
op_name = op_with_backward.split("_backward_")[1]
if op_name in mx_operators:
mx_operators[op_name]["has_backward"] = True
return mx_operators
def _set_op_arguments(mx_operators):
"""Fetch and set operator arguments - nargs, arg_names, arg_types
"""
for op_name in mx_operators:
operator_arguments = mx.operator.get_operator_arguments(op_name)
mx_operators[op_name]["params"] = {"narg": operator_arguments.narg,
"arg_names": operator_arguments.names,
"arg_types": operator_arguments.types}
def _get_all_mxnet_operators():
# Step 1 - Get all registered op names and filter it
operator_names = mx.operator.get_all_registered_operators()
mx_operators = _select_ops(operator_names)
# Step 2 - Get all parameters for the operators
_set_op_arguments(mx_operators)
return mx_operators
def prepare_op_inputs(arg_params, arg_values):
inputs = []
for arg_value in arg_values:
inp = {}
for arg_name in arg_params["params"]["arg_names"]:
if arg_name in arg_value:
inp[arg_name] = arg_value[arg_name]
inputs.append(inp)
return inputs
def prepare_op_inputs(op, arg_params):
inputs = []
# 4d tensor is needed only by following two ops
ops_4d = ['depth_to_space', 'space_to_depth']
# 3d tensor is needed by following ops
ops_3d = ['CTCLoss', 'ctc_loss']
# For ops with args that need to change shape/value for different ops
custom_data = ['Activation', 'LeakyReLU', 'Softmax', 'BilinearSampler', 'GridGenerator', 'sample_multinomial', 'linalg_maketrian']
int_only = ['random_randint']
# Prepare op to default input mapping
arg_values = {}
for arg_name, arg_type in zip(arg_params["params"]["arg_names"],
arg_params["params"]["arg_types"]):
# Due to lack of an internal API for fetching permissible dtype
# added a logic for using float only dtype as input for ops that take only floats
# same for randint (which is the only op that takes only int as input)
# rest all operators take int as well as float
if op in int_only and arg_name == "dtype":
arg_values[arg_name] = DEFAULTS_INPUTS["dtype_int"]
elif op.startswith(('random','sample')) and arg_name == "dtype":
arg_values[arg_name] = DEFAULTS_INPUTS["dtype_float"]
elif "NDArray" in arg_type and op == "ravel_multi_index":
arg_values[arg_name] = DEFAULTS_INPUTS["ravel_data"]
elif op in custom_data and arg_name + "_" + op.lower() in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_" + op.lower()]
elif "NDArray" in arg_type and arg_name + "_nd" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_nd"]
elif "NDArray" in arg_type and op in ops_4d and arg_name + "_4d" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_4d"]
elif "NDArray" in arg_type and op in ops_3d and arg_name + "_3d" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_3d"]
elif "NDArray" in arg_type and op == 'softmax_cross_entropy':
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_smce"]
elif arg_name in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name]
elif "float" in arg_type and arg_name + "_float" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_float"]
elif "Shape" in arg_type and arg_name + "_shape" in DEFAULTS_INPUTS:
# This is for cases where in some ops 'axis' is Int in some ops a shape tuple.
# Ex: axis in sum is shape, axis in sort is int.
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_shape"]
# Number of different inputs we want to use to test
# the operator
num_input_combinations = max([len(value) for value in arg_values.values()])
# Prepare key/value args for param to input value
for idx in range(num_input_combinations):
inp = {}
for arg_name in arg_params["params"]["arg_names"]:
if arg_name in arg_values:
if len(arg_values[arg_name]) == num_input_combinations:
inp[arg_name] = arg_values[arg_name][idx]
else:
# This is required when we want to use a param same across all
# input combination. Example: keeping low and high same for random sampling
# operator for all different types of Tensor shape.
inp[arg_name] = arg_values[arg_name][0]
inputs.append(inp)
return inputs
def get_all_unary_operators():
"""Gets all Unary operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Cast operators (cast & amp_cast are unary)
cast_ops = ['cast', 'amp_cast']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for unary broadcast operators
unary_broadcast_mx_operators = {}
for op_name, op_params in mx_operators.items():
if (op_params["params"]["narg"] == 1 and \
"data" in op_params["params"]["arg_names"]) or \
op_name in cast_ops:
unary_broadcast_mx_operators[op_name] = mx_operators[op_name]
return unary_broadcast_mx_operators
def get_all_broadcast_binary_operators():
"""Gets all binary broadcast operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for binary broadcast operators
binary_broadcast_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name.startswith("broadcast_") and op_params["params"]["narg"] == 2 and \
"lhs" in op_params["params"]["arg_names"] and \
"rhs" in op_params["params"]["arg_names"]:
binary_broadcast_mx_operators[op_name] = mx_operators[op_name]
return binary_broadcast_mx_operators
def get_all_misc_binary_operators():
"""Gets all miscellaneous binary operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for miscellaneous binary operators
binary_misc_mx_operators = {}
for op_name, op_params in mx_operators.items():
if "choose_element_0index" == op_name:
binary_misc_mx_operators[op_name] = mx_operators[op_name]
elif "reshape_like" == op_name:
binary_misc_mx_operators[op_name] = mx_operators[op_name]
return binary_misc_mx_operators
def get_all_elemen_wise_binary_operators():
"""Gets all binary elemen_wise operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for binary elemen_wise operators
binary_elemen_wise_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name.startswith("elemwise_") and op_params["params"]["narg"] == 2 and \
"lhs" in op_params["params"]["arg_names"] and \
"rhs" in op_params["params"]["arg_names"]:
binary_elemen_wise_mx_operators[op_name] = mx_operators[op_name]
elif "ElementWiseSum" == op_name:
binary_elemen_wise_mx_operators[op_name] = mx_operators[op_name]
return binary_elemen_wise_mx_operators
def get_all_random_sampling_operators():
"""Gets all Random Sampling operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Additional Random Sampling ops which do not start with "random_" or "sample_"
additional_random_sampling_ops = ['GridGenerator', 'BilinearSampler']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Random Sampling operators
random_sampling_mx_operators = {}
for op_name, _ in mx_operators.items():
if op_name.startswith(("random_", "sample_")) or op_name in additional_random_sampling_ops:
random_sampling_mx_operators[op_name] = mx_operators[op_name]
return random_sampling_mx_operators
def get_all_linalg_operators():
"""Gets all Linear Algebra operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
other_linalg_ops = ['moments']
# Already tested linalg_potrf independently
independently_tested = ['linalg_potrf']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Linear Algebra operators
linalg_mx_operators = {}
for op_name, _ in mx_operators.items():
if (op_name.startswith("linalg_") and op_name not in independently_tested) or op_name in other_linalg_ops:
linalg_mx_operators[op_name] = mx_operators[op_name]
return linalg_mx_operators
def get_all_reduction_operators():
"""Gets all Reduction operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Reduction operators
reduction_mx_operators = {}
for op_name, op_params in mx_operators.items():
if (op_params["params"]["narg"] == 4 and \
set(["data", "axis", "exclude", "keepdims"]).issubset(set(op_params["params"]["arg_names"])) \
or op_name == 'norm'):
reduction_mx_operators[op_name] = mx_operators[op_name]
return reduction_mx_operators
def get_all_nn_activation_operators():
"""Gets all NN Activation operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
nn_activation_ops = ['Softmax', 'SoftmaxActivation', 'softmin', 'Activation', 'LeakyReLU', 'hard_sigmoid', 'softmax', 'log_softmax']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for NN Activation operators
nn_activation_mx_operators = {}
for op_name, _ in mx_operators.items():
if op_name in nn_activation_ops:
nn_activation_mx_operators[op_name] = mx_operators[op_name]
return nn_activation_mx_operators
def get_all_optimizer_operators():
"""Gets all Optimizer operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
optimizer_ops = ['mp_sgd_update', 'signum_update', 'rmspropalex_update', 'ftml_update', 'rmsprop_update',
'sgd_mom_update', 'signsgd_update', 'mp_sgd_mom_update', 'ftrl_update', 'sgd_update',
'adam_update', 'mp_nag_mom_update', 'nag_mom_update', 'lamb_update_phase1',
'lamb_update_phase2']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Optimizer operators
optimizer_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in optimizer_ops:
optimizer_mx_operators[op_name] = mx_operators[op_name]
return optimizer_mx_operators
def get_all_sorting_searching_operators():
"""Gets all Sorting and Searching operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
sort_search_ops = ['sort', 'argsort', 'argmax', 'argmin', 'topk']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Sort and search operators
sort_search_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in sort_search_ops:
sort_search_mx_operators[op_name] = mx_operators[op_name]
return sort_search_mx_operators
def get_all_rearrange_operators():
"""Gets all array rearrange operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
rearrange_ops = ['transpose','swapaxes','flip','depth_to_space','space_to_depth']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Array Rearrange operators
rearrange_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in rearrange_ops:
rearrange_mx_operators[op_name] = mx_operators[op_name]
return rearrange_mx_operators
def get_all_indexing_routines():
"""Gets all indexing routines registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# @ChaiBapchya unravel_index errors out on certain inputs
# tracked here https://github.com/apache/incubator-mxnet/issues/16771
# @ChaiBapchya scatter_nd errors with core dump
# tracked here https://github.com/apache/incubator-mxnet/issues/17480
indexing_routines = ['slice', 'slice_axis', 'slice_like', 'take', 'one_hot',
'where', 'ravel_multi_index', 'gather_nd', 'pick']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Indexing routines
indexing_mx_routines = {}
for op_name, _ in mx_operators.items():
if op_name in indexing_routines:
indexing_mx_routines[op_name] = mx_operators[op_name]
return indexing_mx_routines
def get_all_loss_operators():
"""Gets all Neural Network loss operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
loss_ops = ['smooth_l1', 'CTCLoss', 'ctc_loss', 'MakeLoss', 'softmax_cross_entropy']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for NN Loss operators
loss_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in loss_ops:
loss_mx_operators[op_name] = mx_operators[op_name]
return loss_mx_operators
def get_operators_with_no_benchmark(operators_with_benchmark):
"""Gets all MXNet operators with not benchmark.
Retrieve all operators registered with MXNet and prepares a list of operators that are not part of given
operators with benchmark list.
Parameters
----------
operators_with_benchmark: list[Str]
List of operator names that has benchmarks
Returns
-------
list[Str]
List of operator names that is registered with MXNet but has no benchmarks.
"""
all_mxnet_operators = _get_all_mxnet_operators().keys()
return list(set(all_mxnet_operators) - set(operators_with_benchmark))
def get_current_runtime_features():
"""Get all current runtime time flags/configuration for MXNet.
Returns
-------
Map of current runtime features such as compile flags used by MXNet.
Example: {'runtime_features': {'OPENCV' : '✔ OPENCV', 'CUDA': '✖ CUDA'}}
"""
features = runtime.Features()
runtime_features = {}
for feature, config in sorted(features.items(), key=itemgetter(0)):
runtime_features[feature] = config
return {'runtime_features': runtime_features}
| 38.006122 | 136 | 0.675992 |
715b353571fe54c39a602fd48d630dd9b18e0025 | 1,878 | py | Python | sopex/chunker.py | codemaniac/sopex | 64a503ea9267a5a9ca18e40b026d5ac22c0e6ae8 | [
"BSD-2-Clause"
] | 10 | 2017-02-09T17:10:32.000Z | 2021-10-20T17:03:27.000Z | machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/chunker.py | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | 2 | 2017-02-20T13:43:40.000Z | 2020-06-05T14:04:24.000Z | machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/chunker.py | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | 3 | 2017-02-20T13:44:39.000Z | 2020-06-05T13:53:10.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import simplejson as json
import pyparsing
from jpype import *
class PennTreebackChunker(object):
def __init__(self):
path = os.path.realpath(__file__)
path = path[:path.rfind(os.sep)] + os.sep + 'jars'
classpath = os.pathsep.join(path+os.sep+jar for jar in os.listdir(path))
startJVM(getDefaultJVMPath(), "-Djava.class.path=%s" % classpath)
String = JClass("java.lang.String")
self.StringReader = JClass("java.io.StringReader")
self.StringWriter = JClass("java.io.StringWriter")
self.PrintWriter = JClass("java.io.PrintWriter")
PTBTokenizer = JClass("edu.stanford.nlp.process.PTBTokenizer")
LexicalizedParser = JClass("edu.stanford.nlp.parser.lexparser.LexicalizedParser")
CoreLabelTokenFactory = JClass("edu.stanford.nlp.process.CoreLabelTokenFactory")
self.TreePrint = JClass("edu.stanford.nlp.trees.TreePrint")
self.tokenizerFactory = PTBTokenizer.factory(CoreLabelTokenFactory(), "")
self.lp = LexicalizedParser.loadModel()
self.penn_treebank_expr = pyparsing.nestedExpr('(', ')')
def _nestedlist2dict(self, d, l):
if not l[0] in d:
d[l[0]] = {}
for v in l[1:]:
if type(v) == list:
self._nestedlist2dict(d[l[0]],v)
else:
d[l[0]] = v
def chunk_string(self, sentence, json_response=False):
rawWords = self.tokenizerFactory.getTokenizer(self.StringReader(sentence)).tokenize()
parse = self.lp.apply(rawWords)
stringWriter = self.StringWriter()
tp = self.TreePrint("oneline")
tp.printTree(parse, self.PrintWriter(stringWriter))
penn = stringWriter.toString()
penn = self.penn_treebank_expr.parseString(penn).asList()[0]
penn_str = {}
self._nestedlist2dict(penn_str, penn)
return json.dumps(penn_str) if json_response else penn_str
def close(self):
shutdownJVM()
| 37.56 | 89 | 0.697018 |
83ec198973cff51d85f5b52cb784630791b320f5 | 3,001 | py | Python | ciara_python/ciara.py | ScialdoneLab/CIARA_python | 6fea1bd716ea2624764c0166d55b524d6d72ead6 | [
"MIT"
] | 1 | 2022-03-28T17:37:03.000Z | 2022-03-28T17:37:03.000Z | ciara_python/ciara.py | ScialdoneLab/CIARA_python | 6fea1bd716ea2624764c0166d55b524d6d72ead6 | [
"MIT"
] | null | null | null | ciara_python/ciara.py | ScialdoneLab/CIARA_python | 6fea1bd716ea2624764c0166d55b524d6d72ead6 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.stats import fisher_exact
import scipy.sparse as sp
import multiprocessing
from functools import partial
def perform_fisher(nn_gene_expression, binary_expression, p_value, odds_ratio=2):
p_value_nn = 1
if np.sum(nn_gene_expression) != 0:
input_fisher = np.array([[np.sum(nn_gene_expression), np.sum(binary_expression)-np.sum(nn_gene_expression)],
[np.sum(~nn_gene_expression), np.sum(~binary_expression)-np.sum(~nn_gene_expression)]])
oddsr_test, p_test = fisher_exact(input_fisher, alternative = 'greater')
if p_test < p_value and oddsr_test > odds_ratio:
p_0 = np.sum(nn_gene_expression) / len(nn_gene_expression)
p_value_nn = p_test
return p_value_nn
def ciara_gene(gene_idx, p_value, odds_ratio, local_region, approximation):
gene_expression = gene_expressions_g[gene_idx]
binary_expression = gene_expression > np.median(gene_expression)
if approximation:
knn_subset = np.nditer(np.where(binary_expression))
else:
knn_subset = range(knn_matrix_g.shape[0])
p_values_nn = np.array([])
for cell in knn_subset:
nn_cell = knn_matrix_g[cell, :]
nn_gene_expression = binary_expression[nn_cell==1]
p_value_sub = perform_fisher(nn_gene_expression, binary_expression , p_value, odds_ratio)
p_values_nn = np.append(p_values_nn, p_value_sub)
if np.sum(p_values_nn<p_value) >= local_region:
p_value_gene = np.min(p_values_nn)
else:
p_value_gene = 1
return p_value_gene
def ciara(norm_adata, n_cores, p_value, odds_ratio, local_region, approximation):
multiprocessing.set_start_method("fork", force=True)
background = norm_adata.X[:, norm_adata.var["CIARA_background"]]
if sp.issparse(background):
background = background.toarray()
global gene_expressions_g
gene_expressions_g = [background[:,i].flatten() for i in range(np.shape(background)[1])]
global knn_matrix_g
knn_matrix_g = norm_adata.obsp["connectivities"].toarray()
pool = multiprocessing.Pool(n_cores)
chunksize, extra = divmod(len(gene_expressions_g), 4 * n_cores)
if extra:
chunksize += 1
print("\n## Running on " + str(n_cores) + " cores with a chunksize of " + str(chunksize))
temp = partial(ciara_gene, p_value=p_value, odds_ratio=odds_ratio, local_region=local_region, approximation=approximation)
results = pool.map(func=temp, iterable=range(len(gene_expressions_g)), chunksize=chunksize)
pool.close()
pool.join()
p_values_output = [np.NAN for i in range(len(norm_adata.var_names))]
for index, gene_pos in enumerate(np.where(norm_adata.var["CIARA_background"])[0]):
p_values_output[gene_pos] = results[index]
norm_adata.var.drop(columns="CIARA_p_value", inplace=True, errors='ignore')
norm_adata.var.insert(0, "CIARA_p_value", p_values_output)
print('\n---- Finished sucessfully! ----')
return
| 38.474359 | 126 | 0.710097 |
54c6e840b17afd384f67919ef5019680c7e68c59 | 16,884 | py | Python | src/azure-cli-core/azure/cli/core/_help.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | null | null | null | src/azure-cli-core/azure/cli/core/_help.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | 3 | 2021-03-26T00:48:20.000Z | 2022-03-29T22:05:39.000Z | src/azure-cli-core/azure/cli/core/_help.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | 1 | 2017-12-28T04:51:44.000Z | 2017-12-28T04:51:44.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import argparse
import sys
import textwrap
from azure.cli.core.help_files import _load_help_file
from azure.cli.core.commands import ExtensionCommandSource
import azure.cli.core.azlogging as azlogging
__all__ = ['print_detailed_help', 'print_welcome_message', 'GroupHelpFile', 'CommandHelpFile']
FIRST_LINE_PREFIX = ': '
PRIVACY_STATEMENT = """
Welcome to Azure CLI!
---------------------
Use `az -h` to see available commands or go to https://aka.ms/cli.
Telemetry
---------
The Azure CLI collects usage data in order to improve your experience.
The data is anonymous and does not include commandline argument values.
The data is collected by Microsoft.
You can change your telemetry settings with `az configure`.
"""
logger = azlogging.get_az_logger(__name__)
def show_privacy_statement():
from azure.cli.core._config import az_config, set_global_config_value
first_ran = az_config.getboolean('core', 'first_run', fallback=False)
if not first_ran:
print(PRIVACY_STATEMENT, file=sys.stdout)
set_global_config_value('core', 'first_run', 'yes')
def show_help(nouns, parser, is_group):
delimiters = ' '.join(nouns)
help_file = CommandHelpFile(delimiters, parser) \
if not is_group \
else GroupHelpFile(delimiters, parser)
help_file.load(parser)
if not nouns:
print("\nFor version info, use 'az --version'")
help_file.command = ''
print_detailed_help(help_file)
def show_welcome(parser):
show_privacy_statement()
print_welcome_message()
help_file = GroupHelpFile('', parser)
print_description_list(help_file.children)
def print_welcome_message():
_print_indent(r"""
/\
/ \ _____ _ _ __ ___
/ /\ \ |_ / | | | \'__/ _ \
/ ____ \ / /| |_| | | | __/
/_/ \_\/___|\__,_|_| \___|
""")
_print_indent('\nWelcome to the cool new Azure CLI!\n\nHere are the base commands:\n')
def print_detailed_help(help_file):
_print_extensions_msg(help_file)
_print_header(help_file)
if help_file.type == 'command':
_print_indent('Arguments')
print_arguments(help_file)
elif help_file.type == 'group':
_print_groups(help_file)
if help_file.examples:
_print_examples(help_file)
def print_description_list(help_files):
indent = 1
max_name_length = max(len(f.name) for f in help_files) if help_files else 0
for help_file in sorted(help_files, key=lambda h: h.name):
_print_indent('{0}{1}{2}'.format(help_file.name,
_get_column_indent(help_file.name, max_name_length),
FIRST_LINE_PREFIX + help_file.short_summary
if help_file.short_summary
else ''),
indent,
_get_hanging_indent(max_name_length, indent))
def print_arguments(help_file):
indent = 1
if not help_file.parameters:
_print_indent('None', indent)
_print_indent('')
return
if not help_file.parameters:
_print_indent('none', indent)
required_tag = ' [Required]'
max_name_length = max(len(p.name) + (len(required_tag) if p.required else 0)
for p in help_file.parameters)
last_group_name = None
group_registry = ArgumentGroupRegistry(
[p.group_name for p in help_file.parameters if p.group_name])
def _get_parameter_key(parameter):
return '{}{}{}'.format(group_registry.get_group_priority(parameter.group_name),
str(not parameter.required),
parameter.name)
for p in sorted(help_file.parameters, key=_get_parameter_key):
indent = 1
required_text = required_tag if p.required else ''
short_summary = p.short_summary if p.short_summary else ''
possible_values_index = short_summary.find(' Possible values include')
short_summary = short_summary[0:possible_values_index
if possible_values_index >= 0 else len(short_summary)]
short_summary += _get_choices_defaults_sources_str(p)
short_summary = short_summary.strip()
if p.group_name != last_group_name:
if p.group_name:
print('')
print(p.group_name)
last_group_name = p.group_name
_print_indent(
'{0}{1}{2}{3}'.format(
p.name,
_get_column_indent(p.name + required_text, max_name_length),
required_text,
FIRST_LINE_PREFIX + short_summary if short_summary else ''
),
indent,
_get_hanging_indent(max_name_length, indent)
)
indent = 2
if p.long_summary:
_print_indent('{0}'.format(p.long_summary.rstrip()), indent)
return indent
class ArgumentGroupRegistry(object): # pylint: disable=too-few-public-methods
def __init__(self, group_list):
self.priorities = {
None: 0,
'Resource Id Arguments': 1,
'Generic Update Arguments': 998,
'Global Arguments': 1000,
}
priority = 2
# any groups not already in the static dictionary should be prioritized alphabetically
other_groups = [g for g in sorted(list(set(group_list))) if g not in self.priorities]
for group in other_groups:
self.priorities[group] = priority
priority += 1
def get_group_priority(self, group_name):
key = self.priorities.get(group_name, 0)
return "%06d" % key
def _print_extensions_msg(help_file):
if help_file.type != 'command':
return
if help_file.command_source and isinstance(help_file.command_source, ExtensionCommandSource):
logger.warning(help_file.command_source.get_command_warn_msg())
def _print_header(help_file):
indent = 0
_print_indent('')
_print_indent('Command' if help_file.type == 'command' else 'Group', indent)
indent += 1
_print_indent('{0}{1}'.format('az ' + help_file.command,
FIRST_LINE_PREFIX + help_file.short_summary
if help_file.short_summary
else ''),
indent)
indent += 1
if help_file.long_summary:
_print_indent('{0}'.format(help_file.long_summary.rstrip()), indent)
_print_indent('')
def _print_groups(help_file):
def _print_items(items):
for c in sorted(items, key=lambda h: h.name):
column_indent = _get_column_indent(c.name, max_name_length)
summary = FIRST_LINE_PREFIX + c.short_summary if c.short_summary else ''
summary = summary.replace('\n', ' ')
hanging_indent = max_name_length + indent * 4 + 2
_print_indent(
'{0}{1}{2}'.format(c.name, column_indent, summary), indent, hanging_indent)
_print_indent('')
indent = 1
max_name_length = max(len(c.name) for c in help_file.children) if help_file.children else 0
subgroups = [c for c in help_file.children if isinstance(c, GroupHelpFile)]
subcommands = [c for c in help_file.children if c not in subgroups]
if subgroups:
_print_indent('Subgroups:')
_print_items(subgroups)
if subcommands:
_print_indent('Commands:')
_print_items(subcommands)
def _get_choices_defaults_sources_str(p):
choice_str = ' Allowed values: {0}.'.format(', '.join(sorted([str(x) for x in p.choices]))) \
if p.choices else ''
default_str = ' Default: {0}.'.format(p.default) \
if p.default and p.default != argparse.SUPPRESS else ''
value_sources_str = ' Values from: {0}.'.format(', '.join(p.value_sources)) \
if p.value_sources else ''
return '{0}{1}{2}'.format(choice_str, default_str, value_sources_str)
def _print_examples(help_file):
indent = 0
print('')
_print_indent('Examples', indent)
for e in help_file.examples:
indent = 1
_print_indent('{0}'.format(e.name), indent)
indent = 2
_print_indent('{0}'.format(e.text), indent)
print('')
class HelpObject(object): # pylint: disable=too-few-public-methods
def __init__(self, **kwargs):
self._short_summary = ''
self._long_summary = ''
super(HelpObject, self).__init__(**kwargs)
@property
def short_summary(self):
return self._short_summary
@short_summary.setter
def short_summary(self, value):
self._short_summary = _normalize_text(value)
@property
def long_summary(self):
return self._long_summary
@long_summary.setter
def long_summary(self, value):
self._long_summary = _normalize_text(value)
class HelpFile(HelpObject): # pylint: disable=too-few-public-methods,too-many-instance-attributes
def __init__(self, delimiters):
super(HelpFile, self).__init__()
self.delimiters = delimiters
self.name = delimiters.split()[-1] if delimiters else delimiters
self.command = delimiters
self.type = ''
self.short_summary = ''
self.long_summary = ''
self.examples = []
def load(self, options):
description = getattr(options, 'description', None)
try:
self.short_summary = description[:description.index('.')]
long_summary = description[description.index('.') + 1:].lstrip()
self.long_summary = ' '.join(long_summary.splitlines())
except (ValueError, AttributeError):
self.short_summary = description
file_data = (_load_help_file_from_string(options.help_file)
if hasattr(options, '_defaults')
else None)
if file_data:
self._load_from_data(file_data)
else:
self._load_from_file()
def _load_from_file(self):
file_data = _load_help_file(self.delimiters)
if file_data:
self._load_from_data(file_data)
@staticmethod
def _should_include_example(ex):
min_profile = ex.get('min_profile')
max_profile = ex.get('max_profile')
if min_profile or max_profile:
from azure.cli.core.profiles import supported_api_version, PROFILE_TYPE
# yaml will load this as a datetime if it's a date, we need a string.
min_profile = str(min_profile) if min_profile else None
max_profile = str(max_profile) if max_profile else None
return supported_api_version(PROFILE_TYPE,
min_api=min_profile,
max_api=max_profile)
return True
def _load_from_data(self, data):
if not data:
return
if isinstance(data, str):
self.long_summary = data
return
if 'type' in data:
self.type = data['type']
if 'short-summary' in data:
self.short_summary = data['short-summary']
self.long_summary = data.get('long-summary')
if 'examples' in data:
self.examples = []
for d in data['examples']:
if HelpFile._should_include_example(d):
self.examples.append(HelpExample(d))
class GroupHelpFile(HelpFile): # pylint: disable=too-few-public-methods
def __init__(self, delimiters, parser):
super(GroupHelpFile, self).__init__(delimiters)
self.type = 'group'
self.children = []
if getattr(parser, 'choices', None):
for options in parser.choices.values():
delimiters = ' '.join(options.prog.split()[1:])
child = (GroupHelpFile(delimiters, options) if options.is_group()
else HelpFile(delimiters))
child.load(options)
self.children.append(child)
class CommandHelpFile(HelpFile): # pylint: disable=too-few-public-methods
def __init__(self, delimiters, parser):
super(CommandHelpFile, self).__init__(delimiters)
self.type = 'command'
self.command_source = getattr(parser, 'command_source', None)
self.parameters = []
for action in [a for a in parser._actions if a.help != argparse.SUPPRESS]: # pylint: disable=protected-access
self.parameters.append(HelpParameter(' '.join(sorted(action.option_strings)),
action.help,
required=action.required,
choices=action.choices,
default=action.default,
group_name=action.container.description))
help_param = next(p for p in self.parameters if p.name == '--help -h')
help_param.group_name = 'Global Arguments'
def _load_from_data(self, data):
super(CommandHelpFile, self)._load_from_data(data)
if isinstance(data, str) or not self.parameters or not data.get('parameters'):
return
loaded_params = []
loaded_param = {}
for param in self.parameters:
loaded_param = next((n for n in data['parameters'] if n['name'] == param.name), None)
if loaded_param:
param.update_from_data(loaded_param)
loaded_params.append(param)
self.parameters = loaded_params
class HelpParameter(HelpObject): # pylint: disable=too-few-public-methods, too-many-instance-attributes
def __init__(self, param_name, description, required, choices=None,
default=None, group_name=None):
super(HelpParameter, self).__init__()
self.name = param_name
self.required = required
self.type = 'string'
self.short_summary = description
self.long_summary = ''
self.value_sources = []
self.choices = choices
self.default = default
self.group_name = group_name
def update_from_data(self, data):
if self.name != data.get('name'):
raise HelpAuthoringException("mismatched name {0} vs. {1}"
.format(self.name,
data.get('name')))
if data.get('type'):
self.type = data.get('type')
if data.get('short-summary'):
self.short_summary = data.get('short-summary')
if data.get('long-summary'):
self.long_summary = data.get('long-summary')
if data.get('populator-commands'):
self.value_sources = data.get('populator-commands')
class HelpExample(object): # pylint: disable=too-few-public-methods
def __init__(self, _data):
self.name = _data['name']
self.text = _data['text']
def _print_indent(s, indent=0, subsequent_spaces=-1):
tw = textwrap.TextWrapper(initial_indent=' ' * indent,
subsequent_indent=(' ' * indent
if subsequent_spaces == -1
else ' ' * subsequent_spaces),
replace_whitespace=False,
width=100)
paragraphs = s.split('\n')
for p in paragraphs:
try:
print(tw.fill(p), file=sys.stdout)
except UnicodeEncodeError:
print(tw.fill(p).encode('ascii', 'ignore').decode('utf-8', 'ignore'), file=sys.stdout)
def _get_column_indent(text, max_name_length):
return ' ' * (max_name_length - len(text))
def _get_hanging_indent(max_length, indent):
return max_length + (indent * 4) + len(FIRST_LINE_PREFIX)
def _normalize_text(s):
if not s or len(s) < 2:
return s or ''
s = s.strip()
initial_upper = s[0].upper() + s[1:]
trailing_period = '' if s[-1] in '.!?' else '.'
return initial_upper + trailing_period
def _load_help_file_from_string(text):
import yaml
try:
return yaml.load(text) if text else None
except Exception: # pylint: disable=broad-except
return text
def _get_single_metadata(cmd_table):
assert len(cmd_table) == 1
return next(metadata for _, metadata in cmd_table.items())
class HelpAuthoringException(Exception):
pass
| 34.040323 | 118 | 0.601575 |
e3f8182d3984894a9a1fa757244178f9ce0e6846 | 1,024 | py | Python | tests/test_java.py | eerimoq/pygments | 3cd60987c27d2228ac46bfa2648e280aaaf61fc1 | [
"BSD-2-Clause"
] | 940 | 2019-08-23T13:08:46.000Z | 2022-03-31T06:40:44.000Z | tests/test_java.py | eerimoq/pygments | 3cd60987c27d2228ac46bfa2648e280aaaf61fc1 | [
"BSD-2-Clause"
] | 1,043 | 2019-08-22T12:22:28.000Z | 2022-03-31T20:26:02.000Z | tests/test_java.py | eerimoq/pygments | 3cd60987c27d2228ac46bfa2648e280aaaf61fc1 | [
"BSD-2-Clause"
] | 488 | 2019-09-19T14:27:19.000Z | 2022-03-31T17:02:44.000Z | """
Basic JavaLexer Test
~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import time
import pytest
from pygments.token import String
from pygments.lexers import JavaLexer
@pytest.fixture(scope='module')
def lexer():
yield JavaLexer()
@pytest.mark.parametrize(
'text',
(
'""', '"abc"', '"ひらがな"', '"123"',
'"\\\\"', '"\\t"' '"\\""',
),
)
def test_string_literals_positive_match(lexer, text):
"""Test positive matches for string literals."""
tokens = list(lexer.get_tokens_unprocessed(text))
assert all([token is String for _, token, _ in tokens])
assert ''.join([value for _, _, value in tokens]) == text
def test_string_literals_backtracking(lexer):
"""Test catastrophic backtracking for string literals."""
start_time = time.time()
list(lexer.get_tokens_unprocessed('"' + '\\' * 100))
assert time.time() - start_time < 1, 'possible backtracking bug'
| 24.97561 | 70 | 0.638672 |
f88f2896953faf3602311410cde2d0337795e50a | 1,362 | py | Python | portfolio/Python/scrapy/soundslive/andertons_spider.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/soundslive/andertons_spider.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/soundslive/andertons_spider.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | 5 | 2016-03-22T07:40:46.000Z | 2021-05-30T16:12:21.000Z | from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class AnderstonSpider(BaseSpider):
name = 'andertons.co.uk'
allowed_domains = ['music.andertons.co.uk']
#Empty search to obtain all the articles.
start_urls = ['http://music.andertons.co.uk/search?w=&nodet=1']
def parse(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//a[@class="prod-box"]')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'span[@class="prod-desc-area"]/'
'span[@class="prod-name-row"]/strong/text()')
loader.add_xpath('url','@href')
loader.add_xpath('price', 'span[@class="prod-desc-area"]/'
'span[@class="price-prod"]/text()')
yield loader.load_item()
next_page = hxs.select('//*[@id="sli_pagination_footer"]/'
'span/a[text()="Next"]/@href').extract()
if next_page:
next_url = next_page[-1]
yield Request(next_url, callback=self.parse)
| 43.935484 | 86 | 0.610866 |
8cd86446f819518c4e652a6ded64d604f5984369 | 6,933 | py | Python | Cogs/Utils.py | camielverdult/CorpBot.py | 56cf3ee736625525d05f9f447b31e34baf93596d | [
"MIT"
] | null | null | null | Cogs/Utils.py | camielverdult/CorpBot.py | 56cf3ee736625525d05f9f447b31e34baf93596d | [
"MIT"
] | null | null | null | Cogs/Utils.py | camielverdult/CorpBot.py | 56cf3ee736625525d05f9f447b31e34baf93596d | [
"MIT"
] | null | null | null | import asyncio
import discord
import re
from discord.ext import commands
from Cogs import Nullify
# bot = None
# url_regex = re.compile(r"(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?")
def setup(bot):
# This module isn't actually a cog - but it is a place
# we can call "a trash fire"
bot.add_cog(Utils(bot))
# global bot
# bot = bot_start
class Utils(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.url_regex = re.compile(
r"(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?")
def suppressed(self, ctx, msg, force=False):
# Checks if the passed server is suppressing user/role mentions and adjust the msg accordingly
guild = ctx if isinstance(ctx, discord.Guild) else ctx.guild if hasattr(
ctx, "guild") else None
if not guild:
return msg
settings = self.bot.get_cog("Settings")
if not settings:
return msg
return Nullify.clean(msg, ctx=guild) if (force or settings.getServerStat(guild, "SuppressMentions", True)) else msg
def is_owner(self, ctx, member=None):
# Checks if the user in the passed context is an owner
settings = self.bot.get_cog("Settings")
if not settings:
return False
member = ctx.author if not member else member
return settings.isOwner(member)
def is_admin(self, ctx, member=None):
# Checks if the user in the passed context is admin
member = ctx.author if not member else member
return member.permissions_in(ctx.channel).administrator
def is_bot_admin_only(self, ctx, member=None):
# Checks only if we're bot admin
settings = self.bot.get_cog("Settings")
if not settings:
return False
member = ctx.author if not member else member
if not hasattr(member, "roles"):
return False # No roles to iterate - can't be bot admin
return any(role for role in member.roles for check in settings.getServerStat(ctx.guild, "AdminArray", []) if str(role.id) == str(check["ID"]))
def is_bot_admin(self, ctx, member=None):
# Checks if the user in the passed context is admin or bot admin
member = ctx.author if not member else member
return member.permissions_in(ctx.channel).administrator or self.is_bot_admin_only(ctx, member)
async def is_owner_reply(self, ctx, member=None, not_claimed="I have not been claimed, *yet*.", not_owner="You are not the *true* owner of me. Only the rightful owner can use this command."):
# Auto-replies if the user isn't an owner
are_we = self.is_owner(ctx, member)
if are_we == None:
await ctx.send(not_claimed)
elif are_we == False:
await ctx.send(not_owner)
return are_we
async def is_admin_reply(self, ctx, member=None, message="You do not have sufficient privileges to access this command.", message_when=False):
# Auto-replies if the user doesn't have admin privs
are_we = self.is_admin(ctx, member)
if are_we == message_when:
await ctx.send(message)
return are_we
async def is_bot_admin_only_reply(self, ctx, member=None, message="You do not have sufficient privileges to access this command.", message_when=False):
# Auto-replies if the user doesn't have admin or bot admin privs
are_we = self.is_bot_admin_only(ctx, member)
if are_we == message_when:
await ctx.send(message)
return are_we
async def is_bot_admin_reply(self, ctx, member=None, message="You do not have sufficient privileges to access this command.", message_when=False):
# Auto-replies if the user doesn't have admin or bot admin privs
are_we = self.is_bot_admin(ctx, member)
if are_we == message_when:
await ctx.send(message)
return are_we
def yes_no_setting(self, ctx, display_name, setting_name, yes_no=None, default=None, is_global=False):
# Get or set a true/false value and return the resulting message
guild = ctx if isinstance(ctx, discord.Guild) else ctx.guild if isinstance(
ctx, discord.ext.commands.Context) else None
if not guild and not is_global:
return "I can't get a guild from here :("
settings = self.bot.get_cog("Settings")
if not settings:
return "Something is wrong with my settings module :("
current = settings.getGlobalStat(
setting_name, default) if is_global else settings.getServerStat(guild, setting_name, default)
if yes_no == None:
# Output what we have
return "{} currently *{}*.".format(display_name, "enabled" if current else "disabled")
elif yes_no.lower() in ["yes", "on", "true", "enabled", "enable"]:
yes_no = True
msg = "{} {} *enabled*.".format(display_name,
"remains" if current else "is now")
elif yes_no.lower() in ["no", "off", "false", "disabled", "disable"]:
yes_no = False
msg = "{} {} *disabled*.".format(display_name,
"is now" if current else "remains")
else:
msg = "That's not a valid setting."
yes_no = current
if not yes_no == current:
if is_global:
settings.setGlobalStat(setting_name, yes_no)
else:
settings.setServerStat(ctx.guild, setting_name, yes_no)
return msg
def get_urls(self, message):
# Returns a list of valid urls from a passed message/context/string
message = message.content if isinstance(message, discord.Message) else message.message.content if isinstance(
message, discord.ext.commands.Context) else str(message)
return [x.group(0) for x in re.finditer(self.url_regex, message)]
def truncate_string(self, value=None, limit=128, suffix="...", replace_newlines=True, complete_codeblocks=True):
if not isinstance(value, str):
return value
# Truncates the string to the max chars passed
if replace_newlines:
new_val = [line+"\n" if complete_codeblocks and line.startswith(
"```") and line[3:].isalpha() else line for line in value.split("\n")]
value = " ".join(new_val)
if len(value) > limit: # We need to truncate
value = value[:limit-len(suffix)]+suffix
# Check if we need to complete an orphaned codeblock
if complete_codeblocks and value.count("```") % 2:
value += "```"
return value
| 47.486301 | 197 | 0.605221 |
b6df9c15b381875c93cc6c5ff2cec4f3b94c8446 | 879 | py | Python | drfsample/ajaxsample/management/commands/sample.py | miveh/python_course | 96730deb1a5c07125916d723ae02c85074b86817 | [
"Apache-2.0"
] | 11 | 2021-07-15T11:14:43.000Z | 2022-02-08T08:19:57.000Z | drfsample/ajaxsample/management/commands/sample.py | miveh/python_course | 96730deb1a5c07125916d723ae02c85074b86817 | [
"Apache-2.0"
] | null | null | null | drfsample/ajaxsample/management/commands/sample.py | miveh/python_course | 96730deb1a5c07125916d723ae02c85074b86817 | [
"Apache-2.0"
] | 39 | 2021-07-15T10:42:31.000Z | 2021-07-25T13:44:25.000Z | from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
def add_arguments(self, parser):
# parser.add_argument('number', type=int)
parser.add_argument(
'--delete',
action='store_true',
help='Delete poll instead of closing it',
)
def handle(self, *args, **options):
print(options)
print('run simple custom command')
# for poll_id in options['poll_ids']:
# try:
# poll = Poll.objects.get(pk=poll_id)
# except Poll.DoesNotExist:
# raise CommandError('Poll "%s" does not exist' % poll_id)
# poll.opened = False
# poll.save()
self.stdout.write(self.style.SUCCESS('Successfully closed "%s"' %options['number'])) | 32.555556 | 92 | 0.583618 |
7bb41476d4327ea71a9c75e4085c1946f4b0771a | 8,805 | py | Python | test/test_web_tiddler_revisions.py | tiddlyweb/tiddlyweb | 376bcad280e24d2de4d74883dc4d8369abcb2c28 | [
"BSD-3-Clause"
] | 57 | 2015-02-01T21:03:34.000Z | 2021-12-25T12:02:31.000Z | test/test_web_tiddler_revisions.py | tiddlyweb/tiddlyweb | 376bcad280e24d2de4d74883dc4d8369abcb2c28 | [
"BSD-3-Clause"
] | 6 | 2016-02-05T11:43:32.000Z | 2019-09-05T13:38:49.000Z | test/test_web_tiddler_revisions.py | tiddlyweb/tiddlyweb | 376bcad280e24d2de4d74883dc4d8369abcb2c28 | [
"BSD-3-Clause"
] | 17 | 2015-05-12T08:53:23.000Z | 2021-12-21T15:56:30.000Z | """
Test GETting a tiddler revision list.
"""
import simplejson
from .fixtures import (muchdata, reset_textstore, _teststore, initialize_app,
get_http)
text_put_body = u"""modifier: JohnSmith
created:
modified: 200803030303
tags: [[tag three]]
Hello, I'm John Smith \xbb and I have something to sell.
"""
text_put_body2 = u"""modifier: Frank
created:
modified: 200803030303
tags: [[tag three]]
Hello, I'm John Smith \xbb and I have something to sell.
"""
http = get_http()
def setup_module(module):
initialize_app()
reset_textstore()
module.store = _teststore()
muchdata(module.store)
def test_put_tiddler_txt_1():
encoded_body = text_put_body.encode('utf-8')
response, content = http.request(
'http://our_test_domain:8001/bags/bag1/tiddlers/TestOne',
method='PUT',
headers={'Content-Type': 'text/plain'},
body=encoded_body)
assert response['status'] == '204'
def test_put_tiddler_txt_2():
encoded_body = text_put_body.encode('utf-8')
response, content = http.request(
'http://our_test_domain:8001/bags/bag1/tiddlers/TestOne',
method='PUT',
headers={'Content-Type': 'text/plain'},
body=encoded_body)
assert response['status'] == '204'
def test_put_tiddler_txt_3():
encoded_body = text_put_body.encode('utf-8')
response, content = http.request(
'http://our_test_domain:8001/bags/bag1/tiddlers/TestOne',
method='PUT',
headers={'Content-Type': 'text/plain'},
body=encoded_body)
assert response['status'] == '204'
assert response['etag'].startswith('"bag1/TestOne/3:')
def test_put_tiddler_txt_4():
encoded_body = text_put_body2.encode('utf-8')
response, content = http.request(
'http://our_test_domain:8001/bags/bag1/tiddlers/TestOne',
method='PUT',
headers={'Content-Type': 'text/plain'},
body=encoded_body)
assert response['status'] == '204'
assert response['etag'].startswith('"bag1/TestOne/4:')
def test_get_tiddler_revision_list():
response, content = http.requestU(
'http://our_test_domain:8001/bags/bag1/tiddlers/TestOne/revisions',
method='GET')
assert response['status'] == '200'
assert '3' in content
assert 'revisions' in content
def test_get_tiddler_revision_1():
response, content = http.request(
'http://our_test_domain:8001/bags/bag1/tiddlers/TestOne/revisions/1',
method='GET')
assert response['status'] == '200'
def test_get_tiddler_revision_2():
response, content = http.request(
'http://our_test_domain:8001/bags/bag1/tiddlers/TestOne/revisions/2',
method='GET')
assert response['status'] == '200'
def test_get_tiddler_revision_3():
response, content = http.request(
'http://our_test_domain:8001/bags/bag1/tiddlers/TestOne/revisions/3',
method='GET')
assert response['status'] == '200'
assert response['etag'].startswith('"bag1/TestOne/3:')
def test_get_tiddler_revision_5_fail():
response, content = http.request(
'http://our_test_domain:8001/bags/bag1/tiddlers/TestOne/revisions/5',
method='GET')
assert response['status'] == '404'
def test_get_tiddler_revision_nonint_fail():
response, content = http.request(
'http://our_test_domain:8001/bags/bag1/tiddlers/TestOne/revisions/four',
method='GET')
assert response['status'] == '404'
def test_get_tiddler_revision_list_404():
"""
Get a 404 when the tiddler doesn't exist.
"""
response, content = http.request(
'http://our_test_domain:8001/bags/bag1/tiddlers/Test99/revisions',
method='GET')
assert response['status'] == '404'
def test_get_tiddler_not_revision_list():
"""
When we retrieve a tiddler list we don't want their revision links.
"""
response, content = http.requestU(
'http://our_test_domain:8001/bags/bag1/tiddlers',
method='GET')
assert response['status'] == '200'
assert '3' in content
assert 'revisions' not in content
def test_get_tiddler_revision_list_json():
response, content = http.requestU(
'http://our_test_domain:8001/recipes/long/tiddlers/TestOne/revisions.json',
method='GET')
info = simplejson.loads(content)
assert response['status'] == '200'
assert len(info) == 4
response, content = http.requestU(
'http://our_test_domain:8001/recipes/long/tiddlers/TestOne/revisions.json?sort=revision',
method='GET')
info2 = simplejson.loads(content)
assert len(info) == 4
assert info[0]['revision'] == info2[-1]['revision']
def test_tiddler_revision_list_json_fat():
response, content = http.requestU(
'http://our_test_domain:8001/recipes/long/tiddlers/TestOne/revisions.json?fat=1',
method='GET')
info = simplejson.loads(content)
assert response['status'] == '200'
assert len(info) == 4
assert info[0]['revision'] == 4
assert info[0]['modifier'] == 'GUEST'
assert info[0]['creator'] == 'GUEST'
assert info[-1]['modifier'] == 'GUEST'
assert info[-1]['creator'] == 'GUEST'
assert 'I have something to sell' in info[0]['text']
response, resp_content = http.requestU(
'http://our_test_domain:8001/bags/bag28/tiddlers/tiddler0/revisions.json',
method='POST',
headers={'if-match': '"bag28/tiddler0/1"',
'content-type': 'text/plain'},
body=content)
assert response['status'] == '415'
assert 'application/vnd.tiddlyweb+json required' in resp_content
response, content = http.requestU(
'http://our_test_domain:8001/bags/bag28/tiddlers/tiddler0/revisions.json',
method='POST',
headers={'if-match': '"bag28/tiddler0/1"',
'content-type': 'application/json'},
body=content)
assert response['status'] == '204'
assert response['location'] == 'http://our_test_domain:8001/bags/bag28/tiddlers/tiddler0'
response, content = http.requestU(
'http://our_test_domain:8001/bags/bag28/tiddlers/tiddler0/revisions.json',
method='GET')
info = simplejson.loads(content)
assert response['status'] == '200'
# confirm new media type
response, content = http.requestU(
'http://our_test_domain:8001/bags/bag28/tiddlers/tiddler0/revisions.json',
method='POST',
headers={'if-match': '"bag28/tiddler0/5"',
'content-type': 'application/vnd.tiddlyweb+json'},
body=content)
assert response['status'] == '204'
assert response['location'] == 'http://our_test_domain:8001/bags/bag28/tiddlers/tiddler0'
def test_etag_generation():
from tiddlyweb.web.util import tiddler_etag
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.config import config
tiddler = Tiddler('monkey', 'bar')
etag = tiddler_etag({'tiddlyweb.config': config}, tiddler)
assert etag.startswith('"bar/monkey/0:')
bag = Bag('bar')
store.put(bag)
store.put(tiddler)
etag = tiddler_etag({'tiddlyweb.config': config}, tiddler)
assert etag.startswith('"bar/monkey/1:')
def test_tiddler_revision_list_bad_ext():
response, content = http.request(
'http://our_test_domain:8001/recipes/long/tiddlers/TestOne/revisions.monkeys',
method='GET')
assert response['status'] == '415'
def test_tiddler_revision_list_bad_ext_accept():
response, content = http.request(
'http://our_test_domain:8001/recipes/long/tiddlers/TestOne/revisions.monkeys',
method='GET',
headers={'Accept': 'text/html'})
assert response['status'] == '415'
def test_post_revision_etag_handling():
# GET a list of revisions
response, content = http.requestU(
'http://our_test_domain:8001/recipes/long/tiddlers/TestOne/revisions.json?fat=1',
method='GET')
json_content = content
response, content = http.request(
'http://our_test_domain:8001/bags/bag28/tiddlers/newone/revisions.json',
method='POST',
headers={'content-type': 'application/json'},
body=json_content)
assert response['status'] == '412'
response, content = http.request(
'http://our_test_domain:8001/bags/bag28/tiddlers/newone/revisions.json',
method='POST',
headers={'If-Match': '"bag28/newone/0"',
'content-type': 'application/json'},
body=json_content)
assert response['status'] == '204'
| 31.672662 | 101 | 0.640318 |
bd89b53ed8a19ba7b02ac4a1bf49ee30d309ea9d | 1,259 | py | Python | video_speed_benchmark.py | hajungong007/Dense-Head-Pose-Estimation | 3cdb89c1cc1289d85b1e36787223636e7e42139d | [
"MIT"
] | 1 | 2021-02-23T15:41:20.000Z | 2021-02-23T15:41:20.000Z | video_speed_benchmark.py | hajungong007/Dense-Head-Pose-Estimation | 3cdb89c1cc1289d85b1e36787223636e7e42139d | [
"MIT"
] | null | null | null | video_speed_benchmark.py | hajungong007/Dense-Head-Pose-Estimation | 3cdb89c1cc1289d85b1e36787223636e7e42139d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import numpy as np
import sys
import cv2
import service
import time
fd = service.UltraLightFaceDetecion("weights/RFB-320.tflite",
conf_threshold=0.92)
fa = service.DenseFaceReconstruction("weights/dense_face.tflite")
mr = service.TrianglesMeshRender("asset/render.so", "asset/triangles.npy")
cap = cv2.VideoCapture(sys.argv[1])
counter, rate = 0, cap.get(5)
while True:
ret, frame = cap.read()
if not ret:
break
# face detection
start_time = time.perf_counter()
boxes, scores = fd.inference(frame)
detect_cost = time.perf_counter() - start_time
# raw copy for reconstruction
feed = frame.copy()
start_time = time.perf_counter()
for landmarks, pose in fa.get_landmarks(feed, boxes):
landmarks = landmarks.astype(np.float32)
mr.render(landmarks.T.copy(), frame)
recon_cost = time.perf_counter() - start_time
if counter % rate == 0:
counter = 0
print(f"Detection Cost: {detect_cost * 1000:.2f}ms; " +
f"Reconstruction and Render Cost: {recon_cost * 1000:.2f}ms")
counter += 1
cv2.imshow("result", frame)
if cv2.waitKey(1) == ord('q'):
break
| 24.686275 | 75 | 0.636219 |
68e83e8b628c4c68b191b7c4262388105bfc4c07 | 192 | py | Python | data/groups.py | isakhanian/python_training | 689b1b4c6381c81b09c7df152d0d5f729a5abce6 | [
"Apache-2.0"
] | null | null | null | data/groups.py | isakhanian/python_training | 689b1b4c6381c81b09c7df152d0d5f729a5abce6 | [
"Apache-2.0"
] | null | null | null | data/groups.py | isakhanian/python_training | 689b1b4c6381c81b09c7df152d0d5f729a5abce6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.group import Group
testdata = [
Group(name="name1", header="header1", footer="footer1"),
Group(name="name2", header="header2", footer="footer2")
] | 21.333333 | 60 | 0.640625 |
2cdc3baca7f4707d1ab8d5a66ca8fad1da065a2d | 52,343 | py | Python | test/requirements.py | thereisnosun/sqlalchemy | 94aed8b17d21da9a20be4b092f6a60b12f60b761 | [
"MIT"
] | null | null | null | test/requirements.py | thereisnosun/sqlalchemy | 94aed8b17d21da9a20be4b092f6a60b12f60b761 | [
"MIT"
] | null | null | null | test/requirements.py | thereisnosun/sqlalchemy | 94aed8b17d21da9a20be4b092f6a60b12f60b761 | [
"MIT"
] | 1 | 2020-12-04T14:51:39.000Z | 2020-12-04T14:51:39.000Z | """Requirements specific to SQLAlchemy's own unit tests.
"""
import sys
from sqlalchemy import exc
from sqlalchemy.sql import text
from sqlalchemy.testing import exclusions
from sqlalchemy.testing.exclusions import against
from sqlalchemy.testing.exclusions import fails_if
from sqlalchemy.testing.exclusions import fails_on
from sqlalchemy.testing.exclusions import fails_on_everything_except
from sqlalchemy.testing.exclusions import LambdaPredicate
from sqlalchemy.testing.exclusions import NotPredicate
from sqlalchemy.testing.exclusions import only_if
from sqlalchemy.testing.exclusions import only_on
from sqlalchemy.testing.exclusions import skip_if
from sqlalchemy.testing.exclusions import SpecPredicate
from sqlalchemy.testing.exclusions import succeeds_if
from sqlalchemy.testing.requirements import SuiteRequirements
def no_support(db, reason):
return SpecPredicate(db, description=reason)
def exclude(db, op, spec, description=None):
return SpecPredicate(db, op, spec, description=description)
class DefaultRequirements(SuiteRequirements):
@property
def deferrable_or_no_constraints(self):
"""Target database must support deferrable constraints."""
return skip_if(
[
no_support("firebird", "not supported by database"),
no_support("mysql", "not supported by database"),
no_support("mariadb", "not supported by database"),
no_support("mssql", "not supported by database"),
]
)
@property
def check_constraints(self):
"""Target database must support check constraints."""
return exclusions.open()
@property
def enforces_check_constraints(self):
"""Target database must also enforce check constraints."""
return self.check_constraints + fails_on(
self._mysql_check_constraints_dont_exist,
"check constraints don't enforce on MySQL, MariaDB<10.2",
)
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def implicitly_named_constraints(self):
"""target database must apply names to unnamed constraints."""
return skip_if([no_support("sqlite", "not supported by database")])
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return skip_if(no_support("sqlite", "not supported by database"))
@property
def on_update_cascade(self):
"""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return skip_if(
["sqlite", "oracle"],
"target backend %(doesnt_support)s ON UPDATE CASCADE",
)
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return fails_on_everything_except("sqlite", "oracle") + skip_if(
"mssql"
)
@property
def recursive_fk_cascade(self):
"""target database must support ON DELETE CASCADE on a self-referential
foreign key"""
return skip_if(["mssql"])
@property
def deferrable_fks(self):
"""target database must support deferrable fks"""
return only_on(["oracle"])
@property
def foreign_key_constraint_option_reflection_ondelete(self):
return only_on(["postgresql", "mysql", "mariadb", "sqlite", "oracle"])
@property
def fk_constraint_option_reflection_ondelete_restrict(self):
return only_on(["postgresql", "sqlite", self._mysql_80])
@property
def fk_constraint_option_reflection_ondelete_noaction(self):
return only_on(["postgresql", "mysql", "mariadb", "sqlite"])
@property
def foreign_key_constraint_option_reflection_onupdate(self):
return only_on(["postgresql", "mysql", "mariadb", "sqlite"])
@property
def fk_constraint_option_reflection_onupdate_restrict(self):
return only_on(["postgresql", "sqlite", self._mysql_80])
@property
def comment_reflection(self):
return only_on(["postgresql", "mysql", "mariadb", "oracle"])
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return skip_if(
["firebird", "oracle", "mysql", "mariadb"],
"not supported by database",
)
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return skip_if(
[
no_support("firebird", "not supported by database"),
no_support("oracle", "not supported by database"),
no_support("mssql", "not supported by database"),
no_support("sybase", "not supported by database"),
]
)
@property
def non_native_boolean_unconstrained(self):
"""target database is not native boolean and allows arbitrary integers
in it's "bool" column"""
return skip_if(
[
LambdaPredicate(
lambda config: against(config, "mssql"),
"SQL Server drivers / odbc seem to change "
"their mind on this",
),
LambdaPredicate(
lambda config: config.db.dialect.supports_native_boolean,
"native boolean dialect",
),
]
)
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return skip_if(["firebird", "mssql+mxodbc"], "not supported by driver")
@property
def qmark_paramstyle(self):
return only_on(
[
"firebird",
"sqlite",
"+pyodbc",
"+mxodbc",
"mysql+oursql",
"mariadb+oursql",
]
)
@property
def named_paramstyle(self):
return only_on(["sqlite", "oracle+cx_oracle"])
@property
def format_paramstyle(self):
return only_on(
[
"mysql+mysqldb",
"mysql+pymysql",
"mysql+cymysql",
"mysql+mysqlconnector",
"mariadb+mysqldb",
"mariadb+pymysql",
"mariadb+cymysql",
"mariadb+mysqlconnector",
"postgresql+pg8000",
]
)
@property
def pyformat_paramstyle(self):
return only_on(
[
"postgresql+psycopg2",
"postgresql+psycopg2cffi",
"postgresql+pypostgresql",
"postgresql+pygresql",
"mysql+mysqlconnector",
"mysql+pymysql",
"mysql+cymysql",
"mariadb+mysqlconnector",
"mariadb+pymysql",
"mariadb+cymysql",
"mssql+pymssql",
]
)
@property
def no_quoting_special_bind_names(self):
"""Target database will quote bound parameter names, doesn't support
EXPANDING"""
return skip_if(["oracle"])
@property
def identity(self):
"""Target database must support GENERATED AS IDENTITY or a facsimile.
Includes GENERATED AS IDENTITY, AUTOINCREMENT, AUTO_INCREMENT, or other
column DDL feature that fills in a DB-generated identifier at
INSERT-time without requiring pre-execution of a SEQUENCE or other
artifact.
"""
return skip_if(
["firebird", "oracle", "postgresql", "sybase"],
"not supported by database",
)
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return skip_if(["firebird", self._sqlite_file_db], "not supported (?)")
@property
def temp_table_reflection(self):
return self.temporary_tables
@property
def temp_table_reflect_indexes(self):
return skip_if(
["mssql", "firebird", self._sqlite_file_db], "not supported (?)"
)
@property
def reflectable_autoincrement(self):
"""Target database must support tables that can automatically generate
PKs assuming they were reflected.
this is essentially all the DBs in "identity" plus PostgreSQL, which
has SERIAL support. FB and Oracle (and sybase?) require the Sequence
to be explicitly added, including if the table was reflected.
"""
return skip_if(
["firebird", "oracle", "sybase"], "not supported by database"
)
@property
def insert_from_select(self):
return skip_if(["firebird"], "crashes for unknown reason")
@property
def fetch_rows_post_commit(self):
return skip_if(["firebird"], "not supported")
@property
def non_broken_binary(self):
"""target DBAPI must work fully with binary values"""
# see https://github.com/pymssql/pymssql/issues/504
return skip_if(["mssql+pymssql"])
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return skip_if(["oracle", "mssql"], "not supported by database/driver")
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
# adding mssql here since it doesn't support comparisons either,
# have observed generally bad behavior with binary / mssql.
return skip_if(["oracle", "mssql"], "not supported by database/driver")
@property
def tuple_in(self):
def _sqlite_tuple_in(config):
return against(
config, "sqlite"
) and config.db.dialect.dbapi.sqlite_version_info >= (3, 15, 0)
return only_on(
["mysql", "mariadb", "postgresql", _sqlite_tuple_in, "oracle"]
)
@property
def tuple_in_w_empty(self):
return self.tuple_in + skip_if(["oracle"])
@property
def independent_cursors(self):
"""Target must support simultaneous, independent database cursors
on a single connection."""
return skip_if(["mssql", "mysql", "mariadb"], "no driver support")
@property
def independent_connections(self):
"""
Target must support simultaneous, independent database connections.
"""
# This is also true of some configurations of UnixODBC and probably
# win32 ODBC as well.
return skip_if(
[
no_support(
"sqlite",
"independent connections disabled "
"when :memory: connections are used",
),
exclude(
"mssql",
"<",
(9, 0, 0),
"SQL Server 2005+ is required for "
"independent connections",
),
]
)
@property
def memory_process_intensive(self):
"""Driver is able to handle the memory tests which run in a subprocess
and iterate through hundreds of connections
"""
return skip_if(
[
no_support("oracle", "Oracle XE usually can't handle these"),
no_support("mssql+pyodbc", "MS ODBC drivers struggle"),
self._running_on_windows(),
]
)
@property
def updateable_autoincrement_pks(self):
"""Target must support UPDATE on autoincrement/integer primary key."""
return skip_if(
["mssql", "sybase"], "IDENTITY columns can't be updated"
)
@property
def isolation_level(self):
return only_on(
("postgresql", "sqlite", "mysql", "mariadb", "mssql", "oracle"),
"DBAPI has no isolation level support",
) + fails_on(
"postgresql+pypostgresql",
"pypostgresql bombs on multiple isolation level calls",
)
def get_isolation_levels(self, config):
levels = set(config.db.dialect._isolation_lookup)
if against(config, "sqlite"):
default = "SERIALIZABLE"
levels.add("AUTOCOMMIT")
elif against(config, "postgresql"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
elif against(config, "mysql"):
default = "REPEATABLE READ"
levels.add("AUTOCOMMIT")
elif against(config, "mariadb"):
default = "REPEATABLE READ"
levels.add("AUTOCOMMIT")
elif against(config, "mssql"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
elif against(config, "oracle"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
else:
raise NotImplementedError()
return {"default": default, "supported": levels}
@property
def autocommit(self):
"""target dialect supports 'AUTOCOMMIT' as an isolation_level"""
return self.isolation_level + only_if(
lambda config: "AUTOCOMMIT"
in self.get_isolation_levels(config)["supported"]
)
@property
def row_triggers(self):
"""Target must support standard statement-running EACH ROW triggers."""
return skip_if(
[
# no access to same table
no_support("mysql", "requires SUPER priv"),
no_support("mariadb", "requires SUPER priv"),
exclude("mysql", "<", (5, 0, 10), "not supported by database"),
]
)
@property
def sequences_as_server_defaults(self):
"""Target database must support SEQUENCE as a server side default."""
return only_on(
"postgresql", "doesn't support sequences as a server side default."
)
@property
def sql_expressions_inserted_as_primary_key(self):
return only_if([self.returning, self.sqlite])
@property
def computed_columns_on_update_returning(self):
return self.computed_columns + skip_if("oracle")
@property
def correlated_outer_joins(self):
"""Target must support an outer join to a subquery which
correlates to the parent."""
return skip_if(
"oracle",
'Raises "ORA-01799: a column may not be '
'outer-joined to a subquery"',
)
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return only_on(
["postgresql", "mssql", "mysql", "mariadb"],
"Backend does not support UPDATE..FROM",
)
@property
def delete_from(self):
"""Target must support DELETE FROM..FROM or DELETE..USING syntax"""
return only_on(
["postgresql", "mssql", "mysql", "mariadb", "sybase"],
"Backend does not support DELETE..FROM",
)
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE (or DELETE) where the same table is
present in a subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as::
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return fails_if(
self._mysql_not_mariadb_103,
'MySQL error 1093 "Cant specify target table '
'for update in FROM clause", resolved by MariaDB 10.3',
)
@property
def savepoints(self):
"""Target database must support savepoints."""
return skip_if(
["sqlite", "sybase", ("mysql", "<", (5, 0, 3))],
"savepoints not supported",
)
@property
def savepoints_w_release(self):
return self.savepoints + skip_if(
["oracle", "mssql"],
"database doesn't support release of savepoint",
)
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return skip_if(["firebird"], "no schema support")
@property
def cross_schema_fk_reflection(self):
"""target system must support reflection of inter-schema foreign
keys"""
return only_on(["postgresql", "mysql", "mariadb", "mssql"])
@property
def implicit_default_schema(self):
"""target system has a strong concept of 'default' schema that can
be referred to implicitly.
basically, PostgreSQL.
"""
return only_on(["postgresql"])
@property
def unique_constraint_reflection(self):
return fails_on_everything_except(
"postgresql", "mysql", "mariadb", "sqlite", "oracle"
)
@property
def unique_constraint_reflection_no_index_overlap(self):
return (
self.unique_constraint_reflection
+ skip_if("mysql")
+ skip_if("mariadb")
+ skip_if("oracle")
)
@property
def check_constraint_reflection(self):
return fails_on_everything_except(
"postgresql",
"sqlite",
"oracle",
self._mysql_and_check_constraints_exist,
)
@property
def indexes_with_expressions(self):
return only_on(["postgresql", "sqlite>=3.9.0"])
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return only_on(["sqlite", "oracle"]) + skip_if(self._sqlite_file_db)
@property
def temporary_views(self):
"""target database supports temporary views"""
return only_on(["sqlite", "postgresql"]) + skip_if(
self._sqlite_file_db
)
@property
def update_nowait(self):
"""Target database must support SELECT...FOR UPDATE NOWAIT"""
return skip_if(
["firebird", "mssql", "mysql", "mariadb", "sqlite", "sybase"],
"no FOR UPDATE NOWAIT support",
)
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def ctes(self):
"""Target database supports CTEs"""
return only_on(
[
lambda config: against(config, "mysql")
and (
(
config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info
>= (10, 2)
)
or (
not config.db.dialect._is_mariadb
and config.db.dialect.server_version_info >= (8,)
)
),
"mariadb>10.2",
"postgresql",
"mssql",
"oracle",
"sqlite>=3.8.3",
]
)
@property
def ctes_with_update_delete(self):
"""target database supports CTES that ride on top of a normal UPDATE
or DELETE statement which refers to the CTE in a correlated subquery.
"""
return only_on(
[
"postgresql",
"mssql",
# "oracle" - oracle can do this but SQLAlchemy doesn't support
# their syntax yet
]
)
@property
def ctes_on_dml(self):
"""target database supports CTES which consist of INSERT, UPDATE
or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)"""
return only_if(["postgresql"])
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return only_if(
["mysql", "mariadb", "sqlite", "postgresql+psycopg2", "mssql"]
)
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for INTERSECT",
)
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for EXCEPT",
)
@property
def order_by_col_from_union(self):
"""target database supports ordering by a column from a SELECT
inside of a UNION
E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
Fails on SQL Server
"""
return fails_if("mssql")
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite.
"""
return fails_if("sqlite")
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return fails_if(["sqlite", "oracle"])
@property
def offset(self):
"""Target database must support some method of adding OFFSET or
equivalent to a result set."""
return fails_if(["sybase"], "no support for OFFSET or equivalent")
@property
def sql_expression_limit_offset(self):
return (
fails_if(
["mysql", "mariadb"],
"Target backend can't accommodate full expressions in "
"OFFSET or LIMIT",
)
+ self.offset
)
@property
def window_functions(self):
return only_if(
["postgresql>=8.4", "mssql", "oracle", "sqlite>=3.25.0"],
"Backend does not support window functions",
)
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
def pg_prepared_transaction(config):
if not against(config, "postgresql"):
return True
with config.db.connect() as conn:
try:
num = conn.scalar(
text(
"select cast(setting AS integer) from pg_settings "
"where name = 'max_prepared_transactions'"
)
)
except exc.OperationalError:
return False
else:
return num > 0
return skip_if(
[
no_support("firebird", "no SA implementation"),
no_support("mssql", "two-phase xact not supported by drivers"),
no_support(
"oracle", "two-phase xact not implemented in SQLA/oracle"
),
no_support(
"sqlite", "two-phase xact not supported by database"
),
no_support(
"sybase", "two-phase xact not supported by drivers/SQLA"
),
# in Ia3cbbf56d4882fcc7980f90519412f1711fae74d
# we are evaluating which modern MySQL / MariaDB versions
# can handle two-phase testing without too many problems
# no_support(
# "mysql",
# "recent MySQL communiity editions have too many issues "
# "(late 2016), disabling for now",
# ),
NotPredicate(
LambdaPredicate(
pg_prepared_transaction,
"max_prepared_transactions not available or zero",
)
),
]
)
@property
def two_phase_recovery(self):
return self.two_phase_transactions + (
skip_if(
["mysql", "mariadb"],
"still can't get recover to work w/ MariaDB / MySQL",
)
)
@property
def views(self):
"""Target database must support VIEWs."""
return skip_if("drizzle", "no VIEW support")
@property
def empty_strings_varchar(self):
"""
target database can persist/return an empty string with a varchar.
"""
return fails_if(
["oracle"], "oracle converts empty strings to a blank space"
)
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return fails_if(
["oracle"], "oracle converts empty strings to a blank space"
)
@property
def expressions_against_unbounded_text(self):
"""target database supports use of an unbounded textual field in a
WHERE clause."""
return fails_if(
["oracle"],
"ORA-00932: inconsistent datatypes: expected - got CLOB",
)
@property
def unicode_data(self):
"""target drive must support unicode data stored in columns."""
return skip_if([no_support("sybase", "no unicode driver support")])
@property
def unicode_connections(self):
"""
Target driver must support some encoding of Unicode across the wire.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
return skip_if(
[
no_support("oracle", "FIXME: no support in database?"),
no_support("sybase", "FIXME: guessing, needs confirmation"),
no_support("mssql+pymssql", "no FreeTDS support"),
]
)
@property
def symbol_names_w_double_quote(self):
"""Target driver can create tables with a name like 'some " table'"""
return skip_if(
[no_support("oracle", "ORA-03001: unimplemented feature")]
)
@property
def emulated_lastrowid(self):
""" "target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes.
"""
return fails_on_everything_except(
"mysql",
"mariadb",
"sqlite+pysqlite",
"sqlite+pysqlcipher",
"sybase",
"mssql",
)
@property
def emulated_lastrowid_even_with_sequences(self):
""" "target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes, even if the table has a
Sequence on it.
"""
return fails_on_everything_except(
"mysql",
"mariadb",
"sqlite+pysqlite",
"sqlite+pysqlcipher",
"sybase",
)
@property
def implements_get_lastrowid(self):
return skip_if([no_support("sybase", "not supported by database")])
@property
def dbapi_lastrowid(self):
""" "target backend includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return skip_if(
"mssql+pymssql", "crashes on pymssql"
) + fails_on_everything_except(
"mysql",
"mariadb",
"sqlite+pysqlite",
"sqlite+pysqlcipher",
"mssql",
)
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return fails_on_everything_except(
"postgresql", "oracle", "firebird", "sqlite >= 3.30.0"
)
@property
def reflects_pk_names(self):
"""Target driver reflects the name of primary key constraints."""
return fails_on_everything_except(
"postgresql", "oracle", "mssql", "sybase", "sqlite"
)
@property
def nested_aggregates(self):
"""target database can select an aggregate from a subquery that's
also using an aggregate"""
return skip_if(["mssql", "sqlite"])
@property
def array_type(self):
return only_on(
[
lambda config: against(config, "postgresql")
and not against(config, "+pg8000")
]
)
@property
def json_type(self):
return only_on(
[
lambda config: against(config, "mysql")
and (
(
not config.db.dialect._is_mariadb
and against(config, "mysql >= 5.7")
)
or (
config.db.dialect._mariadb_normalized_version_info
>= (10, 2, 7)
)
),
"mariadb>=10.2.7",
"postgresql >= 9.3",
self._sqlite_json,
"mssql",
]
)
@property
def json_index_supplementary_unicode_element(self):
# for sqlite see https://bugs.python.org/issue38749
return skip_if(
[
lambda config: against(config, "mysql")
and config.db.dialect._is_mariadb,
"mariadb",
"sqlite",
]
)
@property
def legacy_unconditional_json_extract(self):
"""Backend has a JSON_EXTRACT or similar function that returns a
valid JSON string in all cases.
Used to test a legacy feature and is not needed.
"""
return self.json_type + only_on(
["postgresql", "mysql", "mariadb", "sqlite"]
)
def _sqlite_file_db(self, config):
return against(config, "sqlite") and config.db.dialect._is_url_file_db(
config.db.url
)
def _sqlite_memory_db(self, config):
return against(
config, "sqlite"
) and not config.db.dialect._is_url_file_db(config.db.url)
def _sqlite_json(self, config):
if not against(config, "sqlite >= 3.9"):
return False
else:
with config.db.connect() as conn:
try:
return (
conn.exec_driver_sql(
"""select json_extract('{"foo": "bar"}', """
"""'$."foo"')"""
).scalar()
== "bar"
)
except exc.DBAPIError:
return False
@property
def reflects_json_type(self):
return only_on(
[
lambda config: against(config, "mysql >= 5.7")
and not config.db.dialect._is_mariadb,
"postgresql >= 9.3",
"sqlite >= 3.9",
]
)
@property
def json_array_indexes(self):
return self.json_type
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return fails_on_everything_except("sqlite")
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return skip_if(
["mssql", "mysql", "mariadb", "firebird", "oracle", "sybase"]
)
@property
def timestamp_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects but only
if TIMESTAMP is used."""
return only_on(["oracle"])
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
# does not work as of pyodbc 4.0.22
return fails_on("mysql+mysqlconnector") + skip_if("mssql+pyodbc")
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return skip_if(["oracle"])
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return skip_if(
["mssql", "mysql", "mariadb", "firebird", "oracle", "sybase"]
)
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
# NOTE: this exclusion isn't used in current tests.
return exclusions.open()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return fails_if(
[
(
"sybase+pyodbc",
None,
None,
"Don't know how do get these values through "
"FreeTDS + Sybase",
),
("firebird", None, None, "Precision must be from 1 to 18"),
]
)
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
def broken_cx_oracle(config):
return (
against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver <= (6, 0, 2)
and config.db.dialect.cx_oracle_ver > (6,)
)
return fails_if(
[
("sqlite", None, None, "TODO"),
("firebird", None, None, "Precision must be from 1 to 18"),
("sybase+pysybase", None, None, "TODO"),
]
)
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return fails_if(
[
("oracle", None, None, "driver doesn't do this automatically"),
(
"firebird",
None,
None,
"database and/or driver truncates decimal places.",
),
]
)
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type."""
return fails_if(
[
(
"mysql",
None,
None,
"mysql FLOAT type only returns 4 decimals",
),
(
"mariadb",
None,
None,
"mysql FLOAT type only returns 4 decimals",
),
(
"firebird",
None,
None,
"firebird FLOAT type isn't high precision",
),
]
)
@property
def floats_to_four_decimals(self):
return fails_if(
[
("mysql+oursql", None, None, "Floating point error"),
("mariadb+oursql", None, None, "Floating point error"),
(
"firebird",
None,
None,
"Firebird still has FP inaccuracy even "
"with only four decimal places",
),
]
)
@property
def implicit_decimal_binds(self):
"""target backend will return a selected Decimal as a Decimal, not
a string.
e.g.::
expr = decimal.Decimal("15.7563")
value = e.scalar(
select(literal(expr))
)
assert value == expr
See :ticket:`4036`
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
return skip_if(("mssql+pyodbc", None, None, "crashes due to bug #351"))
@property
def duplicate_key_raises_integrity_error(self):
return exclusions.open()
def _has_pg_extension(self, name):
def check(config):
if not against(config, "postgresql"):
return False
count = (
config.db.connect(close_with_result=True)
.exec_driver_sql(
"SELECT count(*) FROM pg_extension "
"WHERE extname='%s'" % name
)
.scalar()
)
return bool(count)
return only_if(check, "needs %s extension" % name)
@property
def hstore(self):
return self._has_pg_extension("hstore")
@property
def btree_gist(self):
return self._has_pg_extension("btree_gist")
@property
def range_types(self):
def check_range_types(config):
if not against(
config, ["postgresql+psycopg2", "postgresql+psycopg2cffi"]
):
return False
try:
config.db.connect(close_with_result=True).exec_driver_sql(
"select '[1,2)'::int4range;"
).scalar()
return True
except Exception:
return False
return only_if(check_range_types)
@property
def async_dialect(self):
"""dialect makes use of await_() to invoke operations on the DBAPI."""
return only_on(["postgresql+asyncpg"])
@property
def oracle_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
"sqla_testing", "oracle_db_link"
),
"oracle_db_link option not specified in config",
)
@property
def postgresql_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
"sqla_testing", "postgres_test_db_link"
),
"postgres_test_db_link option not specified in config",
)
@property
def postgresql_jsonb(self):
return only_on("postgresql >= 9.4") + skip_if(
lambda config: config.db.dialect.driver == "pg8000"
and config.db.dialect._dbapi_version <= (1, 10, 1)
)
@property
def psycopg2_native_hstore(self):
return self.psycopg2_compatibility
@property
def psycopg2_compatibility(self):
return only_on(["postgresql+psycopg2", "postgresql+psycopg2cffi"])
@property
def psycopg2_or_pg8000_compatibility(self):
return only_on(
[
"postgresql+psycopg2",
"postgresql+psycopg2cffi",
"postgresql+pg8000",
]
)
@property
def percent_schema_names(self):
return exclusions.open()
@property
def order_by_label_with_expression(self):
return fails_if(
[
(
"firebird",
None,
None,
"kinterbasdb doesn't send full type information",
),
("postgresql", None, None, "only simple labels allowed"),
("sybase", None, None, "only simple labels allowed"),
("mssql", None, None, "only simple labels allowed"),
]
)
def get_order_by_collation(self, config):
lookup = {
# will raise without quoting
"postgresql": "POSIX",
# note MySQL databases need to be created w/ utf8mb4 charset
# for the test suite
"mysql": "utf8mb4_bin",
"mariadb": "utf8mb4_bin",
"sqlite": "NOCASE",
# will raise *with* quoting
"mssql": "Latin1_General_CI_AS",
}
try:
return lookup[config.db.name]
except KeyError:
raise NotImplementedError()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return skip_if(
self._has_mysql_on_windows, "Not supported on MySQL + Windows"
)
@property
def mssql_freetds(self):
return only_on(["mssql+pymssql"])
@property
def legacy_engine(self):
return exclusions.skip_if(lambda config: config.db._is_future)
@property
def ad_hoc_engines(self):
return exclusions.skip_if(
["oracle"],
"works, but Oracle just gets tired with "
"this much connection activity",
)
@property
def no_mssql_freetds(self):
return self.mssql_freetds.not_()
@property
def pyodbc_fast_executemany(self):
def has_fastexecutemany(config):
if not against(config, "mssql+pyodbc"):
return False
if config.db.dialect._dbapi_version() < (4, 0, 19):
return False
with config.db.connect() as conn:
drivername = conn.connection.connection.getinfo(
config.db.dialect.dbapi.SQL_DRIVER_NAME
)
# on linux this is something like 'libmsodbcsql-13.1.so.9.2'.
# on Windows this is something like 'msodbcsql17.dll'.
return "msodbc" in drivername
return only_if(
has_fastexecutemany, "only on pyodbc > 4.0.19 w/ msodbc driver"
)
@property
def python_fixed_issue_8743(self):
return exclusions.skip_if(
lambda: sys.version_info < (2, 7, 8),
"Python issue 8743 fixed in Python 2.7.8",
)
@property
def granular_timezone(self):
"""the datetime.timezone class, or SQLAlchemy's port, supports
seconds and microseconds.
SQLAlchemy ported the Python 3.7 version for Python 2, so
it passes on that. For Python 3.6 and earlier, it is not supported.
"""
return exclusions.skip_if(
lambda: sys.version_info >= (3,) and sys.version_info < (3, 7)
)
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return skip_if(
["oracle", "firebird"], "non-standard SELECT scalar syntax"
)
@property
def mysql_for_update(self):
return skip_if(
"mysql+mysqlconnector",
"lock-sensitive operations crash on mysqlconnector",
)
@property
def mysql_fsp(self):
return only_if(["mysql >= 5.6.4", "mariadb"])
@property
def mysql_fully_case_sensitive(self):
return only_if(self._has_mysql_fully_case_sensitive)
@property
def mysql_zero_date(self):
def check(config):
if not against(config, "mysql"):
return False
row = (
config.db.connect(close_with_result=True)
.exec_driver_sql("show variables like 'sql_mode'")
.first()
)
return not row or "NO_ZERO_DATE" not in row[1]
return only_if(check)
@property
def mysql_non_strict(self):
def check(config):
if not against(config, "mysql"):
return False
row = (
config.db.connect(close_with_result=True)
.exec_driver_sql("show variables like 'sql_mode'")
.first()
)
return not row or "STRICT_TRANS_TABLES" not in row[1]
return only_if(check)
@property
def mysql_ngram_fulltext(self):
def check(config):
return (
against(config, "mysql")
and not config.db.dialect._is_mariadb
and config.db.dialect.server_version_info >= (5, 7)
)
return only_if(check)
def _mysql_80(self, config):
return (
against(config, "mysql")
and config.db.dialect._is_mysql
and config.db.dialect.server_version_info >= (8,)
)
def _mariadb_102(self, config):
return (
against(config, "mysql")
and config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info > (10, 2)
)
def _mysql_and_check_constraints_exist(self, config):
# 1. we have mysql / mariadb and
# 2. it enforces check constraints
if exclusions.against(config, ["mysql", "mariadb"]):
if config.db.dialect._is_mariadb:
norm_version_info = (
config.db.dialect._mariadb_normalized_version_info
)
return norm_version_info >= (10, 2)
else:
norm_version_info = config.db.dialect.server_version_info
return norm_version_info >= (8, 0, 16)
else:
return False
def _mysql_check_constraints_exist(self, config):
# 1. we dont have mysql / mariadb or
# 2. we have mysql / mariadb that enforces check constraints
return not exclusions.against(
config, ["mysql", "mariadb"]
) or self._mysql_and_check_constraints_exist(config)
def _mysql_check_constraints_dont_exist(self, config):
# 1. we have mysql / mariadb and
# 2. they dont enforce check constraints
return not self._mysql_check_constraints_exist(config)
def _mysql_not_mariadb_102(self, config):
return (against(config, ["mysql", "mariadb"])) and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 2)
)
def _mysql_not_mariadb_103(self, config):
return (against(config, ["mysql", "mariadb"])) and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 3)
)
def _mysql_not_mariadb_104(self, config):
return (against(config, ["mysql", "mariadb"])) and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 4)
)
def _has_mysql_on_windows(self, config):
return (
against(config, ["mysql", "mariadb"])
) and config.db.dialect._detect_casing(config.db) == 1
def _has_mysql_fully_case_sensitive(self, config):
return (
against(config, "mysql")
and config.db.dialect._detect_casing(config.db) == 0
)
@property
def postgresql_utf8_server_encoding(self):
return only_if(
lambda config: against(config, "postgresql")
and config.db.connect(close_with_result=True)
.exec_driver_sql("show server_encoding")
.scalar()
.lower()
== "utf8"
)
@property
def cxoracle6_or_greater(self):
return only_if(
lambda config: against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver >= (6,)
)
@property
def oracle5x(self):
return only_if(
lambda config: against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver < (6,)
)
@property
def computed_columns(self):
return skip_if(["postgresql < 12", "sqlite < 3.31", "mysql < 5.7"])
@property
def python_profiling_backend(self):
return only_on([self._sqlite_memory_db])
@property
def computed_columns_stored(self):
return self.computed_columns + skip_if(["oracle", "firebird"])
@property
def computed_columns_virtual(self):
return self.computed_columns + skip_if(["postgresql", "firebird"])
@property
def computed_columns_default_persisted(self):
return self.computed_columns + only_if("postgresql")
@property
def computed_columns_reflect_persisted(self):
return self.computed_columns + skip_if("oracle")
@property
def regexp_match(self):
return only_on(["postgresql", "mysql", "mariadb", "oracle", "sqlite"])
@property
def regexp_replace(self):
return only_on(["postgresql", "mysql>=8", "mariadb", "oracle"])
@property
def supports_distinct_on(self):
"""If a backend supports the DISTINCT ON in a select"""
return only_if(["postgresql"])
@property
def supports_for_update_of(self):
return only_if(lambda config: config.db.dialect.supports_for_update_of)
@property
def sequences_in_other_clauses(self):
"""sequences allowed in WHERE, GROUP BY, HAVING, etc."""
return skip_if(["mssql", "oracle"])
@property
def supports_lastrowid_for_expressions(self):
"""cursor.lastrowid works if an explicit SQL expression was used."""
return only_on(["sqlite", "mysql", "mariadb"])
@property
def supports_sequence_for_autoincrement_column(self):
"""for mssql, autoincrement means IDENTITY, not sequence"""
return skip_if("mssql")
@property
def identity_columns(self):
return only_if(["postgresql >= 10", "oracle >= 12", "mssql"])
@property
def identity_columns_standard(self):
return self.identity_columns + skip_if("mssql")
@property
def index_reflects_included_columns(self):
return only_on(["postgresql >= 11", "mssql"])
# mssql>= 11 -> >= MS_2012_VERSION
@property
def fetch_first(self):
return only_on(["postgresql", "mssql >= 11", "oracle >= 12"])
@property
def fetch_percent(self):
return only_on(["mssql >= 11", "oracle >= 12"])
@property
def fetch_ties(self):
return only_on(["postgresql >= 13", "mssql >= 11", "oracle >= 12"])
@property
def fetch_no_order_by(self):
return only_on(["postgresql", "oracle >= 12"])
@property
def fetch_offset_with_options(self):
return skip_if("mssql")
| 30.753819 | 81 | 0.567182 |
deeea6ae92f000e272daa6bb9a9d0b1e7b7b69eb | 1,892 | py | Python | Tests/benchmarks/bench_microbenchmarks.py | CyberFlameGO/Pyjion | 696ccb57f56036716e553711a00060a2c94ae817 | [
"MIT"
] | null | null | null | Tests/benchmarks/bench_microbenchmarks.py | CyberFlameGO/Pyjion | 696ccb57f56036716e553711a00060a2c94ae817 | [
"MIT"
] | null | null | null | Tests/benchmarks/bench_microbenchmarks.py | CyberFlameGO/Pyjion | 696ccb57f56036716e553711a00060a2c94ae817 | [
"MIT"
] | null | null | null | def test_floats(n=100000):
""" Test float/integer arithmetic """
for y in range(n):
x = 0.1
z = y * y + x - y
x *= z
def test_ints(n=100000):
""" Test integer arithmetic """
for y in range(n):
x = 2
z = y * y + x - y
x *= z
def test_bigints(n=100000):
for _ in range(n):
x = 200_100_100_100_100_100_100_100_100
y = 100_100_100_100_100_100_100_100_100
z = y * y + x - y
x *= z
def test_function_calls(n=10000):
def f():
pass
for i in range(n):
f()
f()
f()
f()
f()
f()
f()
def test_builtin_type_calls(n=10000):
for _ in range(n):
int(n)
int(n)
int(n)
int(n)
int(n)
int(n)
int(n)
def test_builtin_func_calls(n=10000):
for _ in range(n):
sum((1, 2))
sum((1, 2))
sum((1, 2))
sum((1, 2))
sum((1, 2))
sum((1, 2))
def test_bytearray_slicing(n=1000):
for y in range(1, n):
b = bytearray([0] * 2)
j = b[len(b) - 1] * b[len(b) - 1]
j = b[len(b) - 1] * b[len(b) - 1]
j = b[len(b) - 1] * b[len(b) - 1]
j = b[len(b) - 1] * b[len(b) - 1]
j = b[len(b) - 1] * b[len(b) - 1]
j = b[len(b) - 1] * b[len(b) - 1]
__benchmarks__ = [
(test_floats, "floatmath_micro", {"level": 2}, 10),
(test_ints, "intmath_micro", {"level": 2, "pgc": True}, 10),
(test_bigints, "bigintmath_micro", {"level": 2, "pgc": True}, 10),
(test_function_calls, "function_call_micro", {"level": 2, "pgc": True}, 10),
(test_builtin_type_calls, "type_call_micro", {"level": 2, "pgc": True}, 10),
(test_builtin_func_calls, "builtin_call_micro", {"level": 2, "pgc": True}, 10),
(test_bytearray_slicing, "bytearray_slicing", {"level": 2, "pgc": True}, 10)
]
| 23.073171 | 83 | 0.486786 |
c47b994b224252997cb9179bb163356f12b60851 | 250 | py | Python | code/utils/backend.py | wukailu/EDSR-PyTorch | 5625cf83ce88050b68e649beb4155b32c38018fa | [
"MIT"
] | null | null | null | code/utils/backend.py | wukailu/EDSR-PyTorch | 5625cf83ce88050b68e649beb4155b32c38018fa | [
"MIT"
] | null | null | null | code/utils/backend.py | wukailu/EDSR-PyTorch | 5625cf83ce88050b68e649beb4155b32c38018fa | [
"MIT"
] | null | null | null | import os
local_backend = os.getenv('LOCAL_BACKEND')
if local_backend:
from .local_backend import *
else:
if 'use_kubernets.backend' in os.listdir('.'):
from .kubernetes_backend import *
else:
from .atlas_backend import *
| 25 | 50 | 0.688 |
a9b182a80363b33a5780a2b66dedeadc3e2e132f | 3,392 | py | Python | plugins/community/repos/Southpole/eurorack/elements/resources/resources.py | guillaume-plantevin/VeeSeeVSTRack | 76fafc8e721613669d6f5ae82a0f58ce923a91e1 | [
"Zlib",
"BSD-3-Clause"
] | 233 | 2018-07-02T16:49:36.000Z | 2022-02-27T21:45:39.000Z | plugins/community/repos/Southpole/eurorack/elements/resources/resources.py | guillaume-plantevin/VeeSeeVSTRack | 76fafc8e721613669d6f5ae82a0f58ce923a91e1 | [
"Zlib",
"BSD-3-Clause"
] | 24 | 2018-07-09T11:32:15.000Z | 2022-01-07T01:45:43.000Z | plugins/community/repos/Southpole/eurorack/elements/resources/resources.py | guillaume-plantevin/VeeSeeVSTRack | 76fafc8e721613669d6f5ae82a0f58ce923a91e1 | [
"Zlib",
"BSD-3-Clause"
] | 24 | 2018-07-14T21:55:30.000Z | 2021-05-04T04:20:34.000Z | #!/usr/bin/python2.5
#
# Copyright 2014 Olivier Gillet.
#
# Author: Olivier Gillet (ol.gillet@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# See http://creativecommons.org/licenses/MIT/ for more information.
#
# -----------------------------------------------------------------------------
#
# Master resources file.
header = """// Copyright 2014 Olivier Gillet.
//
// Author: Olivier Gillet (ol.gillet@gmail.com)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
// See http://creativecommons.org/licenses/MIT/ for more information.
//
// -----------------------------------------------------------------------------
//
// Resources definitions.
//
// Automatically generated with:
// make resources
"""
namespace = 'elements'
target = 'elements'
types = ['uint8_t', 'uint16_t']
includes = """
#include "stmlib/stmlib.h"
"""
import lookup_tables
import samples
create_specialized_manager = True
resources = [
(lookup_tables.int16_lookup_tables,
'lookup_table_int16', 'LUT', 'int16_t', int, False),
(lookup_tables.uint32_lookup_tables,
'lookup_table_uint32', 'LUT', 'uint32_t', int, False),
(lookup_tables.lookup_tables,
'lookup_table', 'LUT', 'float', float, False),
(samples.sample_data,
'sample', 'SMP', 'int16_t', int, False),
(samples.boundaries,
'sample_boundary', 'SMP', 'size_t', int, False),
]
| 38.988506 | 80 | 0.712264 |
1bcf00f4944af43d9c7015c05144ed56d4f6d816 | 2,041 | py | Python | pykomposter/lib.py | algoravioli/pykomposter | 84af1851477d003be4beed0cdbe0dd664ff150a1 | [
"MIT"
] | null | null | null | pykomposter/lib.py | algoravioli/pykomposter | 84af1851477d003be4beed0cdbe0dd664ff150a1 | [
"MIT"
] | null | null | null | pykomposter/lib.py | algoravioli/pykomposter | 84af1851477d003be4beed0cdbe0dd664ff150a1 | [
"MIT"
] | null | null | null | import music21
import numpy as np
import pandas as pd
# function definitions
import actions
# behaviours:
import behaviours
# metabehaviours:
import metabehaviours
# microactions
import microactions
class pykomposter:
def __init__(self):
super(pykomposter, self).__init__()
self.outlook = {
"tendency": None, # tendency: how much of stated behaviour is it likely to follow. List: [2ndary behaviour, float] eg. [stochastic, 0.2]
"metabehaviour": None, # how the komposter model decides the actions to take. Reference (Variable): e.g metabehaviour.random
"op_char": dict(), # operational characteristics: dict={} containing time-dependencies, and content-dependencies.
}
# setters
def setTendency(self, tendency_list):
if len(tendency_list) == 2:
if isinstance(tendency_list[0], str):
if isinstance(tendency_list[1], float):
self.outlook["tendency"] = tendency_list
else:
raise RuntimeError(
"ERROR: 2nd argument of tendency needs to be a float."
)
else:
raise RuntimeError(
"ERROR: 1st argument of tendency needs to be a string."
)
else:
raise RuntimeError("ERROR: Tendency list must only contain 2 elements")
def setMetaBehaviour(self, metabehaviour):
self.outlook["metabehaviour"] = metabehaviour
def setOpChar(self, opchardict):
self.outlook["op_char"] = opchardict
##########################
# BEHAVIOUR INTERACTIONS #
##########################
def withBehaviour(self, behaviour, compose, state_transitions=100, cubeDict=None):
# print(f" state = {state_transitions}")
score = compose(
self.outlook["metabehaviour"],
behaviour,
self.outlook["op_char"],
state_transitions,
cubeDict,
)
return score
| 31.4 | 149 | 0.588927 |
2cc85d4bd905b49d4ddb9fb7094e59f5b5befe76 | 1,810 | py | Python | Caffe/server.py | jingmingcn/blood-server | 0bcc1fda5d05b8e5acca2c9ce6857c7963fb84b5 | [
"Apache-2.0"
] | null | null | null | Caffe/server.py | jingmingcn/blood-server | 0bcc1fda5d05b8e5acca2c9ce6857c7963fb84b5 | [
"Apache-2.0"
] | null | null | null | Caffe/server.py | jingmingcn/blood-server | 0bcc1fda5d05b8e5acca2c9ce6857c7963fb84b5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
import BaseHTTPServer
import json
import numpy as np
import base64
import caffe_predict
#import caffe_train
#服务器端配置
HOST_NAME = 'localhost'
PORT_NUMBER = 9000
class JSONHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""处理接收到的POST请求"""
def do_POST(self):
response_code = 200
response = ""
var_len = int(self.headers.get('Content-Length'))
content = self.rfile.read(var_len);
payload = json.loads(content);
# 如果是训练请求,训练然后保存训练完的神经网络
if payload.get('train'):
try:
da = base64.b64decode(payload["img"])
with open("0.jpg", 'wb') as jpg:
jpg.write(da)
#caffe_train.train()
except:
response_code = 500
# 如果是预测请求,返回预测值
elif payload.get('pred'):
try:
da = base64.b64decode(payload["img"])
with open("0.jpg", 'wb') as jpg:
jpg.write(da)
result = caffe_predict.predict()
response = {"type":"test", "result":result}
except:
response_code = 500
else:
response_code = 400
self.send_response(response_code)
self.send_header("Content-type", "application/json")
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
if response:
self.wfile.write(json.dumps(response))
return
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer;
httpd = server_class((HOST_NAME, PORT_NUMBER), JSONHandler)
try:
#启动服务器
httpd.serve_forever()
except KeyboardInterrupt:
pass
else:
print "Unexpected server exception occurred."
finally:
httpd.server_close()
| 27.846154 | 63 | 0.576796 |
1f4f7fb6d213f4208c7f5f3b368be72d9ff67177 | 23 | py | Python | kb_python/__init__.py | Maarten-vd-Sande/kb_python | f9ec9c377b8e9f270a9b121285b0b6593942080f | [
"BSD-2-Clause"
] | null | null | null | kb_python/__init__.py | Maarten-vd-Sande/kb_python | f9ec9c377b8e9f270a9b121285b0b6593942080f | [
"BSD-2-Clause"
] | null | null | null | kb_python/__init__.py | Maarten-vd-Sande/kb_python | f9ec9c377b8e9f270a9b121285b0b6593942080f | [
"BSD-2-Clause"
] | null | null | null | __version__ = '0.24.4'
| 11.5 | 22 | 0.652174 |
fc445dae8405dad339ce95bf5e475c4c332ea1fc | 995 | py | Python | lms/models/enrollment_models.py | yankai14/event-management-telegram-bot-backend | c0b4b2294ab7d06100b221d9b41a8f52d500075d | [
"MIT"
] | null | null | null | lms/models/enrollment_models.py | yankai14/event-management-telegram-bot-backend | c0b4b2294ab7d06100b221d9b41a8f52d500075d | [
"MIT"
] | 6 | 2021-06-28T07:23:15.000Z | 2021-07-22T12:59:33.000Z | lms/models/enrollment_models.py | yankai14/event-management-telegram-bot-backend | c0b4b2294ab7d06100b221d9b41a8f52d500075d | [
"MIT"
] | null | null | null | from django.db import models
from . import User, EventInstance
class EventRole(models.IntegerChoices):
PARTICIPANT = 1
FACILITATOR = 2
EVENT_ADMIN = 3
COORDINATOR = 4
LEAD = 5
class EnrollmentStatus(models.IntegerChoices):
PENDING = 1
ENROLLED = 2
REJECTED = 3
WITHDRAWN = 4
AWAITING_PAYMENT = 5
class UserEnrollment(models.Model):
''' Store administrative information about participant'''
user = models.ForeignKey("User", on_delete=models.CASCADE, null=True)
paymentId = models.CharField(max_length=200, blank=True, null=True)
eventInstance = models.ForeignKey("EventInstance", on_delete=models.CASCADE, null=True)
paymentPlatform = models.CharField(max_length=200, blank=True, null=True)
role = models.IntegerField(choices=EventRole.choices, blank=False, null=True)
status = models.IntegerField(choices=EnrollmentStatus.choices,
blank=False, null=False, default=EnrollmentStatus.PENDING) | 35.535714 | 91 | 0.717588 |
b106f30d2329afde9c89183ffa7604eb902fab96 | 4,404 | py | Python | cloudbaseinit/tests/plugins/windows/test_ntpclient.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 74 | 2015-01-07T17:03:43.000Z | 2022-02-06T17:08:54.000Z | cloudbaseinit/tests/plugins/windows/test_ntpclient.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 26 | 2015-02-13T11:32:05.000Z | 2020-11-13T15:02:03.000Z | cloudbaseinit/tests/plugins/windows/test_ntpclient.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 40 | 2015-01-22T17:12:03.000Z | 2021-12-09T20:37:35.000Z | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from oslo.config import cfg
from cloudbaseinit import exception
from cloudbaseinit.plugins.windows import ntpclient
CONF = cfg.CONF
class NTPClientPluginTests(unittest.TestCase):
def setUp(self):
self._ntpclient = ntpclient.NTPClientPlugin()
def test_set_ntp_trigger_mode(self):
mock_osutils = mock.Mock()
self._ntpclient._set_ntp_trigger_mode(mock_osutils)
mock_osutils.execute_system32_process.assert_called_once_with(
["sc.exe", "triggerinfo", ntpclient._W32TIME_SERVICE,
"start/networkon", "stop/networkoff"])
@mock.patch('time.sleep')
@mock.patch('cloudbaseinit.plugins.windows.ntpclient.NTPClientPlugin.'
'_set_ntp_trigger_mode')
def _test_check_w32time_svc_status(self, mock_set_ntp_trigger_mode,
mock_sleep, start_mode,
fail_service_start,
patch_check_os_version=True):
# TODO(rtingirica): use _W32TIME_SERVICE when it will be moved outside
# of method declaration
mock_osutils = mock.MagicMock()
mock_osutils.SERVICE_START_MODE_AUTOMATIC = "Automatic"
mock_osutils.SERVICE_STATUS_RUNNING = "running"
mock_osutils.SERVICE_STATUS_STOPPED = "stopped"
mock_osutils.get_service_start_mode.return_value = start_mode
mock_osutils.check_os_version.return_value = patch_check_os_version
if fail_service_start:
mock_osutils.get_service_status.return_value = "stopped"
self.assertRaises(exception.CloudbaseInitException,
self._ntpclient.verify_time_service,
mock_osutils)
else:
mock_osutils.get_service_status.side_effect = [
"stopped", mock_osutils.SERVICE_STATUS_RUNNING]
self._ntpclient.verify_time_service(osutils=mock_osutils)
if start_mode != mock_osutils.SERVICE_START_MODE_AUTOMATIC:
mock_osutils.set_service_start_mode.assert_called_once_with(
ntpclient._W32TIME_SERVICE,
mock_osutils.SERVICE_START_MODE_AUTOMATIC)
mock_sleep.assert_called_once_with(1)
mock_osutils.start_service.assert_called_once_with(
ntpclient._W32TIME_SERVICE)
mock_osutils.get_service_start_mode.assert_called_once_with(
ntpclient._W32TIME_SERVICE)
mock_osutils.get_service_status.assert_called_with(
ntpclient._W32TIME_SERVICE)
mock_osutils.check_os_version.assert_called_once_with(6, 0)
if patch_check_os_version:
mock_set_ntp_trigger_mode.assert_called_once_with(mock_osutils)
else:
self.assertFalse(mock_set_ntp_trigger_mode.called)
def test_check_w32time_svc_status_other_start_mode(self):
self._test_check_w32time_svc_status(start_mode="not automatic",
fail_service_start=False)
def test_check_w32time_svc_status_start_automatic(self):
self._test_check_w32time_svc_status(start_mode="automatic",
fail_service_start=False)
def test_check_w32time_svc_status_exception(self):
self._test_check_w32time_svc_status(start_mode="automatic",
fail_service_start=True)
def test_check_w32time_older_oses(self):
self._test_check_w32time_svc_status(start_mode="automatic",
fail_service_start=False,
patch_check_os_version=False)
| 41.942857 | 78 | 0.673252 |
c37e4960d4597f0c0b8f279479bb8a5f9e1baec3 | 6,486 | py | Python | tests/test_integration.py | behzadhaghgoo/cml | e659c7ae10a52bbe1cbabf9d359aea43af19eb12 | [
"MIT"
] | 210 | 2018-10-17T01:04:48.000Z | 2022-03-09T16:17:06.000Z | tests/test_integration.py | Zhiwei-Z/PrompLimitTest | 9d109f1a604125411a1e7894c3222cd50a0ec975 | [
"MIT"
] | 13 | 2018-10-25T20:01:09.000Z | 2022-01-24T13:11:24.000Z | tests/test_integration.py | Zhiwei-Z/PrompLimitTest | 9d109f1a604125411a1e7894c3222cd50a0ec975 | [
"MIT"
] | 55 | 2018-10-18T22:00:51.000Z | 2021-11-24T00:06:31.000Z | from meta_policy_search.baselines.linear_baseline import LinearFeatureBaseline
from meta_policy_search.meta_algos.pro_mp import ProMP
from meta_policy_search.samplers.meta_sampler import MetaSampler
from meta_policy_search.samplers.meta_sample_processor import MetaSampleProcessor
from meta_policy_search.policies.meta_gaussian_mlp_policy import MetaGaussianMLPPolicy
import tensorflow as tf
import numpy as np
import unittest
from gym.spaces import Box
class MetaPointEnv():
def step(self, action):
"""
Run one timestep of the environment's dynamics. When end of episode
is reached, reset() should be called to reset the environment's internal state.
Args:
action : an action provided by the environment
Returns:
(observation, reward, done, info)
observation : agent's observation of the current environment
reward [Float] : amount of reward due to the previous action
done : a boolean, indicating whether the episode has ended
info : a dictionary containing other diagnostic information from the previous action
"""
prev_state = self._state
self._state = prev_state + np.clip(action, -0.1, 0.1)
reward = self.reward(prev_state, action, self._state)
done = self.done(self._state)
next_observation = np.copy(self._state)
return next_observation, reward, done, {}
def reset(self):
"""
Resets the state of the environment, returning an initial observation.
Outputs
-------
observation : the initial observation of the space. (Initial reward is assumed to be 0.)
"""
self._state = np.random.uniform(-2, 2, size=(2,))
observation = np.copy(self._state)
return observation
@property
def observation_space(self):
return Box(low=-np.inf, high=np.inf, shape=(2,))
@property
def action_space(self):
return Box(low=-0.1, high=0.1, shape=(2,))
def done(self, obs):
if obs.ndim == 1:
return abs(obs[0]) < 0.01 and abs(obs[1]) < 0.01
elif obs.ndim == 2:
return np.logical_and(np.abs(obs[:, 0]) < 0.01, np.abs(obs[:, 1]) < 0.01)
def reward(self, obs, act, obs_next):
if obs_next.ndim == 1:
return - np.sqrt(obs_next[0]**2 + obs_next[1]**2)
elif obs_next.ndim == 2:
return - np.sqrt(obs_next[:, 0] ** 2 + obs_next[:, 1] ** 2)
def log_diagnostics(self, paths):
pass
def sample_tasks(self, n_tasks):
return [{}] * n_tasks
def set_task(self, task):
pass
class TestLikelihoodRation(unittest.TestCase):
"""
Assure that likelihhood ratio at first gradient step is approx. one since pi_old = pi_new
"""
def setUp(self):
self.env = env = MetaPointEnv()
self.baseline = baseline = LinearFeatureBaseline()
self.policy = policy = MetaGaussianMLPPolicy(
name="meta-policy",
obs_dim=np.prod(env.observation_space.shape),
action_dim=np.prod(env.action_space.shape),
meta_batch_size=10,
hidden_sizes=(16, 16),
learn_std=True,
hidden_nonlinearity=tf.tanh,
output_nonlinearity=None,
)
self.sampler = MetaSampler(
env=env,
policy=policy,
rollouts_per_meta_task=2,
meta_batch_size=10,
max_path_length=50,
parallel=False,
)
self.sample_processor = MetaSampleProcessor(
baseline=baseline,
discount=0.99,
gae_lambda=1.0,
normalize_adv=True,
positive_adv=False,
)
self.algo = ProMP(
policy=policy,
inner_lr=0.1,
meta_batch_size=10,
num_inner_grad_steps=2,
learning_rate=1e-3,
num_ppo_steps=5,
num_minibatches=1,
clip_eps=0.5,
target_inner_step=2e-2,
init_inner_kl_penalty=1e-3,
)
def test_likelihood_ratio(self):
with tf.Session() as sess:
# initialize uninitialized vars (only initialize vars that were not loaded)
uninit_vars = [var for var in tf.global_variables() if not sess.run(tf.is_variable_initialized(var))]
sess.run(tf.variables_initializer(uninit_vars))
self.sampler.update_tasks()
self.policy.switch_to_pre_update() # Switch to pre-update policy
all_samples_data, all_paths = [], []
for step in range(1):
""" -------------------- Sampling --------------------------"""
paths = self.sampler.obtain_samples(log_prefix=str(step))
all_paths.append(paths)
""" ----------------- Processing Samples ---------------------"""
samples_data = self.sample_processor.process_samples(paths, log=False)
all_samples_data.append(samples_data)
""" ------------------- Inner Policy Update --------------------"""
obs_phs, action_phs, adv_phs, dist_info_phs, all_phs = self.algo._make_input_placeholders('')
for i in range(self.algo.meta_batch_size):
obs = samples_data[i]['observations']
actions = samples_data[i]['actions']
agent_infos = samples_data[i]['agent_infos']
param_vals = self.policy.get_param_values()
likelihood_ratio_sym = self.policy.likelihood_ratio_sym(obs_phs[i], action_phs[i],
dist_info_phs[i],
self.policy.policies_params_phs[i])
feed_dict_params = dict(zip(self.policy.policies_params_phs[i].values(), param_vals.values()))
feed_dict_dist_infos = dict(zip(dist_info_phs[i].values(), agent_infos.values()))
feed_dict = {obs_phs[i]: obs,
action_phs[i]: actions
}
feed_dict.update(feed_dict_params)
feed_dict.update(feed_dict_dist_infos)
lr = sess.run(likelihood_ratio_sym, feed_dict=feed_dict)
self.assertTrue(np.allclose(lr, 1))
| 36.852273 | 114 | 0.573235 |
22a37b34c6ed009b071237dad6a3c900a387589e | 3,974 | py | Python | ptsemseg/models/modelTir3D.py | donghaozhang/fast_segmentation | 6a9911e2fe7dedb6e432cb549334e7b0cd0a720d | [
"MIT"
] | 8 | 2018-09-06T05:41:55.000Z | 2018-12-25T15:57:03.000Z | ptsemseg/models/modelTir3D.py | donghaozhang/fast_segmentation | 6a9911e2fe7dedb6e432cb549334e7b0cd0a720d | [
"MIT"
] | 2 | 2018-09-29T08:56:49.000Z | 2019-06-03T09:17:17.000Z | ptsemseg/models/modelTir3D.py | donghaozhang/fast_segmentation | 6a9911e2fe7dedb6e432cb549334e7b0cd0a720d | [
"MIT"
] | 4 | 2018-09-06T05:45:11.000Z | 2019-08-26T00:48:26.000Z | import torch
import torch.nn as nn
from ptsemseg.models.layersTir3D import *
class FCDenseNet(nn.Module):
def __init__(self, in_channels=3, down_blocks=(5, 5, 5, 5, 5),
up_blocks=(5, 5, 5, 5, 5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=48, n_classes=12):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
cur_channels_count = 0
skip_connection_channel_counts = []
## First Convolution ##
self.add_module('firstconv', nn.Conv3d(in_channels=in_channels,
out_channels=out_chans_first_conv, kernel_size=3,
stride=1, padding=1, bias=True))
cur_channels_count = out_chans_first_conv
#####################
# Downsampling path #
#####################
self.denseBlocksDown = nn.ModuleList([])
self.transDownBlocks = nn.ModuleList([])
for i in range(len(down_blocks)):
self.denseBlocksDown.append(
DenseBlock(cur_channels_count, growth_rate, down_blocks[i]))
cur_channels_count += (growth_rate * down_blocks[i])
skip_connection_channel_counts.insert(0, cur_channels_count)
self.transDownBlocks.append(TransitionDown(cur_channels_count))
#####################
# Bottleneck #
#####################
self.add_module('bottleneck', Bottleneck(cur_channels_count,
growth_rate, bottleneck_layers))
prev_block_channels = growth_rate * bottleneck_layers
cur_channels_count += prev_block_channels
#######################
# Upsampling path #
#######################
self.transUpBlocks = nn.ModuleList([])
self.denseBlocksUp = nn.ModuleList([])
for i in range(len(up_blocks) - 1):
self.transUpBlocks.append(TransitionUp(prev_block_channels, prev_block_channels))
cur_channels_count = prev_block_channels + skip_connection_channel_counts[i]
self.denseBlocksUp.append(DenseBlock(
cur_channels_count, growth_rate, up_blocks[i],
upsample=True))
prev_block_channels = growth_rate * up_blocks[i]
cur_channels_count += prev_block_channels
## Final DenseBlock ##
self.transUpBlocks.append(TransitionUp(
prev_block_channels, prev_block_channels))
cur_channels_count = prev_block_channels + skip_connection_channel_counts[-1]
self.denseBlocksUp.append(DenseBlock(
cur_channels_count, growth_rate, up_blocks[-1],
upsample=False))
cur_channels_count += growth_rate * up_blocks[-1]
## Softmax ##
self.finalConv = nn.Conv3d(in_channels=cur_channels_count,
out_channels=n_classes, kernel_size=1, stride=1,
padding=0, bias=True)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
# print ('first conv')
# print('the size of input size is', x.size())
out = self.firstconv(x)
# print(out.size())
skip_connections = []
for i in range(len(self.down_blocks)):
# print('DB',i)
out = self.denseBlocksDown[i](out)
# print(out.size())
skip_connections.append(out)
# print('TD', i)
out = self.transDownBlocks[i](out)
# print(out.size())
# print ('bottleneck')
out = self.bottleneck(out)
# print (out.size())
for i in range(len(self.up_blocks)):
skip = skip_connections.pop()
# print('TU', i)
out = self.transUpBlocks[i](out, skip)
# print (out.size())
out = self.denseBlocksUp[i](out)
out = self.finalConv(out)
# out = self.softmax(out)
return out
def FCDenseNet57(n_classes):
return FCDenseNet(
in_channels=4, down_blocks=(4, 4, 4, 4, 4),
up_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4,
growth_rate=6, out_chans_first_conv=24, n_classes=n_classes)
def FCDenseNet67(n_classes):
return FCDenseNet(
in_channels=4, down_blocks=(5, 5, 5, 5, 5),
up_blocks=(5, 5, 5, 5, 5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=48, n_classes=n_classes)
def FCDenseNet103(n_classes):
return FCDenseNet(
in_channels=4, down_blocks=(4, 5, 7, 10, 12),
up_blocks=(12, 10, 7, 5, 4), bottleneck_layers=15,
growth_rate=16, out_chans_first_conv=48, n_classes=n_classes)
| 30.569231 | 84 | 0.690237 |
e5d0dc86537c993d5adb315799c674928aff2ec6 | 3,256 | py | Python | ultra/ultra/baselines/td3/td3/config.py | isgeles/SMARTS | 423275123ae4aab8b7d409140d82b50555a5267c | [
"MIT"
] | 554 | 2020-10-16T02:30:35.000Z | 2022-03-29T14:13:00.000Z | ultra/ultra/baselines/td3/td3/config.py | isgeles/SMARTS | 423275123ae4aab8b7d409140d82b50555a5267c | [
"MIT"
] | 917 | 2020-10-17T00:10:31.000Z | 2022-03-31T23:00:47.000Z | ultra/ultra/baselines/td3/td3/config.py | isgeles/SMARTS | 423275123ae4aab8b7d409140d82b50555a5267c | [
"MIT"
] | 135 | 2020-10-20T01:44:49.000Z | 2022-03-27T04:51:31.000Z | # MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import torch
from ultra.baselines.configs import Config
from ultra.baselines.common.replay_buffer import ReplayBuffer
class TD3Config(Config):
def __init__(self, task):
super().__init__(task=task)
self.set_config(
seed=2,
social_capacity=5,
action_size=2,
social_vehicle_encoder="pointnet_encoder",
save_codes=[
"ultra/src/train.py",
"ultra/baselines/td3/config.py",
"ultra/baselines/td3/policy.py",
"ultra/baselines/td3/fc_model.py",
"ultra/utils/common.py",
"ultra/src/adapter.py",
],
)
self.set_config(
policy_params={
"state_size": self.state_size,
"action_size": self.action_size,
"action_range": np.asarray(
[[-1.0, 1.0], [-1.0, 1.0]], dtype=np.float32
),
"state_preprocessor": self.state_preprocessor,
"update_rate": 5,
"policy_delay": 2,
"noise_clip": 0.5,
"policy_noise": 0.2,
"warmup": 10000,
"actor_lr": 1e-4,
"critic_lr": 1e-3,
"critic_wd": 0.0,
"actor_wd": 0.0,
"critic_tau": 0.01,
"actor_tau": 0.01,
"device_name": self.device_name,
"seed": self.seed,
"gamma": 0.99,
"batch_size": 128,
"sigma": 0.3,
"theta": 0.15,
"dt": 1e-2,
"replay": ReplayBuffer(
buffer_size=int(1e6),
batch_size=128,
state_preprocessor=self.state_preprocessor,
device_name=self.device_name,
),
"social_feature_encoder_class": self.social_feature_encoder_class,
"social_feature_encoder_params": self.social_feature_encoder_params,
},
)
| 40.197531 | 84 | 0.58231 |
99fe9bc77c2dbbf4915fc76838b07c9662c8e827 | 606 | py | Python | timer.py | kovibalu/cnntools | 083dd35dbe006d794e61a88cc6e0935a0c0298ff | [
"MIT"
] | 1 | 2019-08-27T14:23:36.000Z | 2019-08-27T14:23:36.000Z | timer.py | kovibalu/cnntools | 083dd35dbe006d794e61a88cc6e0935a0c0298ff | [
"MIT"
] | 1 | 2017-05-27T10:49:55.000Z | 2017-05-27T10:49:55.000Z | timer.py | kovibalu/cnntools | 083dd35dbe006d794e61a88cc6e0935a0c0298ff | [
"MIT"
] | null | null | null | import timeit
from django.conf import settings
class Timer:
def __init__(self, message='Execution', force_show=False):
self.message = message
self.force_show = force_show
def __enter__(self):
self.start = timeit.default_timer()
return self
def __exit__(self, *args):
self.end = timeit.default_timer()
self.interval = self.end - self.start
skip_timer = hasattr(settings, 'SKIP_TIMER') and settings.SKIP_TIMER
if not skip_timer or self.force_show:
print '{} took {:.3f} seconds'.format(self.message, self.interval)
| 28.857143 | 78 | 0.658416 |
4c832896fd032a16adaab55464c58871ed0f0851 | 5,636 | py | Python | gam/clustering.py | timwong101/project-gam | 6a0b87418091772517e2f3b2339e8998c43ffc54 | [
"Apache-2.0"
] | null | null | null | gam/clustering.py | timwong101/project-gam | 6a0b87418091772517e2f3b2339e8998c43ffc54 | [
"Apache-2.0"
] | null | null | null | gam/clustering.py | timwong101/project-gam | 6a0b87418091772517e2f3b2339e8998c43ffc54 | [
"Apache-2.0"
] | 1 | 2020-11-18T02:30:19.000Z | 2020-11-18T02:30:19.000Z | """
Implementation of kmedoids using custom distance metric
Adaped from https://raw.githubusercontent.com/shenxudeu/K_Medoids/master/k_medoids.py
TODO:
- refactor and test components of implementation
"""
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import pairwise_distances
def _get_init_centers(n_clusters, n_samples):
"""Return random points as initial centers"""
init_ids = []
while len(init_ids) < n_clusters:
_ = np.random.randint(0, n_samples)
if _ not in init_ids:
init_ids.append(_)
return init_ids
def _get_distance(data1, data2):
"""example distance function"""
return np.sqrt(np.sum((data1 - data2) ** 2))
def _get_cost(X, centers_id, dist_func):
"""Return total cost and cost of each cluster"""
dist_mat = np.zeros((len(X), len(centers_id)))
# compute distance matrix
dist_mat = pairwise_distances(
X, X[centers_id, :], metric=dist_func, n_jobs=-1
)
mask = np.argmin(dist_mat, axis=1)
members = np.zeros(len(X))
costs = np.zeros(len(centers_id))
for i in range(len(centers_id)):
mem_id = np.where(mask == i)
members[mem_id] = i
costs[i] = np.sum(dist_mat[mem_id, i])
return members, costs, np.sum(costs), dist_mat
class KMedoids:
""""
Main API of KMedoids Clustering
Parameters
--------
n_clusters: number of clusters
dist_func : distance function
max_iter: maximum number of iterations
tol: tolerance
Attributes
--------
labels_ : cluster labels for each data item
centers_ : cluster centers id
costs_ : array of costs for each cluster
n_iter_ : number of iterations for the best trail
Methods
-------
fit(X): fit the model
- X: 2-D numpy array, size = (n_sample, n_features)
predict(X): predict cluster id given a test dataset.
"""
def __init__(self, n_clusters, dist_func=_get_distance, max_iter=1000, tol=0.0001):
self.n_clusters = n_clusters
self.dist_func = dist_func
self.max_iter = max_iter
self.tol = tol
self.centers = None
self.members = None
def fit(self, X, plotit=False, verbose=True):
"""
Fits kmedoids with the option for plotting
"""
centers, members, _, _, _ = self.kmedoids_run(
X,
self.n_clusters,
self.dist_func,
max_iter=self.max_iter,
tol=self.tol,
verbose=verbose,
)
# set centers as instance attributes
self.centers = centers
self.members = members
if plotit:
_, ax = plt.subplots(1, 1)
colors = ["b", "g", "r", "c", "m", "y", "k"]
if self.n_clusters > len(colors):
raise ValueError("we need more colors")
for i in range(len(centers)):
X_c = X[members == i, :]
ax.scatter(X_c[:, 0], X_c[:, 1], c=colors[i], alpha=0.5, s=30)
ax.scatter(
X[centers[i], 0],
X[centers[i], 1],
c=colors[i],
alpha=1.0,
s=250,
marker="*",
)
def kmedoids_run(
self, X, n_clusters, dist_func, max_iter=1000, tol=0.001, verbose=True
):
"""Runs kmedoids algorithm with custom dist_func.
Returns: centers, members, costs, tot_cost, dist_mat
"""
# Get initial centers
n_samples, _ = X.shape
init_ids = _get_init_centers(n_clusters, n_samples)
if verbose:
print("Initial centers are ", init_ids)
centers = init_ids
members, costs, tot_cost, dist_mat = _get_cost(X, init_ids, dist_func)
if verbose:
print("Members - ", members.shape)
print("Costs - ", costs.shape)
print("Total cost - ", tot_cost)
cc, swaped = 0, True
print("Max Iterations: ", max_iter)
while True:
swaped = False
for i in range(n_samples):
if i not in centers:
for j in range(len(centers)):
centers_ = deepcopy(centers)
centers_[j] = i
members_, costs_, tot_cost_, dist_mat_ = _get_cost(
X, centers_, dist_func
)
if tot_cost_ - tot_cost < tol:
members, costs, tot_cost, dist_mat = (
members_,
costs_,
tot_cost_,
dist_mat_,
)
centers = centers_
swaped = True
if verbose:
print("Change centers to ", centers)
self.centers = centers
self.members = members
if cc > max_iter:
if verbose:
print("End Searching by reaching maximum iteration", max_iter)
break
if not swaped:
if verbose:
print("End Searching by no swaps")
break
cc += 1
print("Starting Iteration: ", cc)
return centers, members, costs, tot_cost, dist_mat
def predict(self, X):
raise NotImplementedError()
| 32.205714 | 87 | 0.521647 |
9392e50db017a9484708ad15a92af15c7178a059 | 7,266 | py | Python | multiinput/generate_configs.py | NeilBotelho/ibm-fl | 24acd94086f2a68b6f471c7e9fe2794f31315b5f | [
"IBM-pibs"
] | 2 | 2020-12-02T10:59:48.000Z | 2021-11-30T01:11:53.000Z | multiinput/generate_configs.py | NeilBotelho/ibm-fl | 24acd94086f2a68b6f471c7e9fe2794f31315b5f | [
"IBM-pibs"
] | null | null | null | multiinput/generate_configs.py | NeilBotelho/ibm-fl | 24acd94086f2a68b6f471c7e9fe2794f31315b5f | [
"IBM-pibs"
] | 1 | 2020-12-02T10:59:49.000Z | 2020-12-02T10:59:49.000Z | #!/usr/bin/env python3
import argparse
import os
import time
import yaml
import sys
from importlib import import_module
fl_path = os.path.abspath('.')
if fl_path not in sys.path:
sys.path.append(fl_path)
from research.constants import GENERATE_CONFIG_DESC, NUM_PARTIES_DESC, \
PATH_CONFIG_DESC, MODEL_CONFIG_DESC, NEW_DESC, NAME_DESC, \
FL_EXAMPLES, FL_CONN_TYPES, CONNECTION_TYPE_DESC
def check_valid_folder_structure(p):
"""
Checks that the folder structure is valid
:param p: an argument parser
:type p: argparse.ArgumentParser
"""
# for folder in FL_EXAMPLES:
# if not os.path.isfile(os.path.join("examples", folder, "README.md")) and not os.path.isfile(os.path.join(
# "examples", folder, "generate_configs.py")):
# p.error(
# "Bad folder structure: '{}' directory is missing files.".format(folder))
def setup_parser():
"""
Sets up the parser for Python script
:return: a command line parser
:rtype: argparse.ArgumentParser
"""
p = argparse.ArgumentParser(description=GENERATE_CONFIG_DESC)
p.add_argument("--num_parties", "-n", help=NUM_PARTIES_DESC,
type=int, required=True)
# p.add_argument("--dataset", "-d",
# help="Dataset code from examples", type=str, required=True)
p.add_argument("--data_path", "-p", help=PATH_CONFIG_DESC, required=True)
p.add_argument("--model", "-m",
help=MODEL_CONFIG_DESC, required=True)
p.add_argument("--create_new", "-new", action="store_true", help=NEW_DESC)
p.add_argument("--name", help=NAME_DESC)
# p.add_argument("--connection", "-c", choices=[os.path.basename(
# d) for d in FL_CONN_TYPES], help=CONNECTION_TYPE_DESC, required=False, default="flask")
return p
def generate_connection_config(conn_type, party_id=0, is_party=False):
connection = {}
if conn_type == 'flask':
tls_config = {
'enable': False
}
connection = {
'name': 'FlaskConnection',
'path': 'ibmfl.connection.flask_connection',
'sync': False
}
if is_party:
connection['info'] = {
'ip': '127.0.0.1',
'port': 8085 + party_id
}
else:
connection['info'] = {
'ip': '127.0.0.1',
'port': 5000
}
connection['info']['tls_config'] = tls_config
return connection
def get_aggregator_info(conn_type):
if conn_type == 'flask':
aggregator = {
'ip': '127.0.0.1',
'port': 5000
}
else:
aggregator = {}
return aggregator
def generate_ph_config(conn_type, is_party=False):
if is_party:
protocol_handler = {
'name': 'PartyProtocolHandler',
'path': 'ibmfl.party.party_protocol_handler'
}
else:
protocol_handler = {
'name': 'ProtoHandler',
'path': 'ibmfl.aggregator.protohandler.proto_handler'
}
return protocol_handler
def generate_fusion_config(module):
gen_fusion_config = getattr(module, 'get_fusion_config')
return gen_fusion_config()
def generate_hp_config(module, num_parties):
gen_hp_config = getattr(module, 'get_hyperparams')
hp = gen_hp_config()
hp['global']['parties'] = num_parties
return hp
def generate_model_config(module, folder_configs, dataset, is_agg=False, party_id=0):
get_model_config = getattr(module, 'get_model_config')
model = get_model_config(folder_configs, dataset, is_agg, party_id)
return model
def generate_lt_config(module, keys, party_id=None):
get_local_training_config = getattr(module, 'get_local_training_config')
lt = get_local_training_config()
return lt
def generate_datahandler_config(module, party_id, dataset, folder_data, is_agg=False):
get_data_handler_config = getattr(module, 'get_data_handler_config')
dh = get_data_handler_config(party_id, dataset, folder_data, is_agg)
return dh
def generate_agg_config(module, num_parties, conn_type, dataset, folder_data, folder_configs, keys):
if not os.path.exists(folder_configs):
os.makedirs(folder_configs)
config_file = os.path.join(folder_configs, 'config_agg.yml')
content = {
'connection': generate_connection_config(conn_type),
'fusion': generate_fusion_config(module),
'hyperparams': generate_hp_config(module, num_parties),
'protocol_handler': generate_ph_config(conn_type)
}
model = generate_model_config(module, folder_configs, dataset, True)
data = generate_datahandler_config(
module, 0, dataset, folder_data, True)
if model:
content['model'] = model
if data:
content['data'] = data
with open(config_file, 'w') as outfile:
yaml.dump(content, outfile)
print('Finished generating config file for aggregator. Files can be found in: ',
os.path.abspath(os.path.join(folder_configs, 'config_agg.yml')))
def generate_party_config(module, num_parties, conn_type, dataset, folder_data, folder_configs, keys):
for i in range(num_parties):
config_file = os.path.join(
folder_configs, 'config_party' + str(i) + '.yml')
lh = generate_lt_config(module, None, party_id=i)
content = {
'connection': generate_connection_config(conn_type, i, True),
'data': generate_datahandler_config(module, i, dataset, folder_data),
'model': generate_model_config(module, folder_configs, dataset, party_id=i),
'protocol_handler': generate_ph_config(conn_type, True),
'local_training': lh,
'aggregator': get_aggregator_info(conn_type)
}
with open(config_file, 'w') as outfile:
yaml.dump(content, outfile)
print('Finished generating config file for parties. Files can be found in: ',
os.path.abspath(os.path.join(folder_configs, 'config_party*.yml')))
if __name__ == '__main__':
# Parse command line options
parser = setup_parser()
args = parser.parse_args()
check_valid_folder_structure(parser)
# Collect arguments
num_parties = args.num_parties
dataset = "l"
party_data_path = args.data_path
model = args.model
create_new = args.create_new
exp_name = args.name
conn_type = "flask"
# Create folder to save configs
folder_configs = os.path.join("multiinput", "configs")
if create_new:
folder_configs = os.path.join(
folder_configs, exp_name if exp_name else str(int(time.time())))
else:
folder_configs = os.path.join(folder_configs, model)
# Import and run generate_configs.py
config_model = import_module('multiinput.{}.generate_configs'.format(model))
generate_agg_config(config_model, num_parties, conn_type,
dataset, party_data_path, folder_configs,
None)
generate_party_config(config_model, num_parties, conn_type,
dataset, party_data_path, folder_configs,
None)
| 30.024793 | 115 | 0.64327 |
74f385d53e5dd31bf1430082c9dcff46b4875e12 | 27,046 | py | Python | gpMgmt/bin/lib/pysync.py | nurikk/gpdb | 04fe0202c59721826d1eda2b19d73e5572893fcb | [
"PostgreSQL",
"Apache-2.0"
] | 3 | 2017-12-10T16:41:21.000Z | 2020-07-08T12:59:12.000Z | gpMgmt/bin/lib/pysync.py | guofengrichard/gpdb | 29bdd6ef38d8d9b9cb04ca31d44e279eb9f640d3 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/lib/pysync.py | guofengrichard/gpdb | 29bdd6ef38d8d9b9cb04ca31d44e279eb9f640d3 | [
"PostgreSQL",
"Apache-2.0"
] | 4 | 2017-12-10T16:41:35.000Z | 2020-11-28T12:20:30.000Z | #!/usr/bin/env python
import os,sys
if sys.hexversion<0x2040400:
sys.stderr.write("pysync.py needs python version at least 2.4.4.\n")
sys.stderr.write("You are using %s\n"%sys.version)
sys.stderr.write("Here is a guess at where the python executable is--\n")
os.system("/bin/sh -c 'type python>&2'");
sys.exit(1)
import cPickle
import inspect
import hashlib
import signal
import socket
import subprocess
import threading
import zlib
import pysync_remote
from pysync_remote import Options
from pysync_remote import ProgressUpdate, ProgressCounters
from pysync_remote import statToTuple
from gppylib.commands.gp import PySync
# MPP-13617
import re
RE1=re.compile('\\[([^]]+)\\]:(.+)')
bootstrapSource="""
import os,sys
exec(sys.stdin.read(int(sys.stdin.readline())))
"""
class PysyncProxy:
'''
The PysyncProxy class is used to initiate a third-party synchronization operation.
An instance of PysyncProxy is used to start a LocalPysync instance on a remote host
to be used as the source of the synchronization operation. The "remote" LocalPysync
instance then runs RemotePysync on the destination as usual. Progress information
is fed from the destination host, through the remote LocalPysync instance an to this
instance for reporting.
Lines written by LocalPysync to stdout are recorded in the list self.stdout; lines
written by LocalPysync to stderr are recorded in self.stderr. Progress information
is handled only by the functions set for the recordProgressCallback and
recordRawProgressCallback properties.
'''
class _Quit(SystemExit):
def __init__(self, *info):
SystemExit.__init__(self, *info)
def __init__(self, sourceHost, sourceDir, destHost, destDir, syncOptions, verbose=False,
progressBytes=None, progressTime=None,
recordProgressCallback=None, recordRawProgressCallback=None, progressTimestamp=False):
'''
Initialize a new PysyncProxy instance.
sourceHost - the host from which data is to be copied.
sourceDir - the directory on sourceHost from which data is to be copied.
destHost - the host to which data is to be copied.
destDir - the directory on sourceHost to which data is to be copied.
syncOptions - a list of command-line options as described by LocalPysync.usage();
other options may be added based on the following arguments.
verbose - indicates whether or not debugging output is generated.
progressBytes - the number of bytes moved for a volume-based progress message;
maps to the LocalPysync --progress-bytes option.
progressTime - the amount of time for a time-based progress message; maps to
the LocalPysync --progress-time option.
recordProgressCallback - function to call to present a printable progress
message generated by RemotePysync; the function must accept a single
argument of type str. If not set, progress messages are ignored.
recordRawProgressCallback - function to call to handle raw progress information
generated by RemotePysync; the function must accept a single argument
of type pysync_remote.ProgressUpdate. If not set, raw progress
information is ignored.
progressTimestamp - indicates whether or not RemotePysync should include the
observation timestamp on messages it creates.
'''
self.ppid = 0
self.sourceHost = sourceHost
self.sourceDir = sourceDir
self.destHost = destHost
self.destDir = destDir
self.recordProgressCallback = recordProgressCallback
self.recordRawProgressCallback = recordRawProgressCallback
self.syncOptions = syncOptions
if verbose:
self.syncOptions += ["-v"]
if progressBytes:
self.syncOptions += ["--progress-bytes", progressBytes]
if progressTime:
self.syncOptions += ["--progress-time", progressTime]
self.syncOptions += ["--proxy"]
if not progressTimestamp:
self.syncOptions += ["--omit-progress-timestamp"]
self.stderr = []
self.stdout = []
self.cmd = None
self.returncode = None
def run(self):
'''
Initiate and wait for completion of a directory synchronization operation.
Stderr output is appended to the self.stderr list. Stdout output is appended
to the self.stdout list. Progress messages are written to stdout unless a
callback is set.
'''
pysyncCmd = PySync('pysync', self.sourceDir, self.destHost, self.destDir,
options=' '.join(self.syncOptions))
self.cmd = '. %s/greenplum_path.sh && %s' % (os.environ.get('GPHOME'), pysyncCmd.cmdStr)
# save of ppid to allow the process to be stopped.
self.ppid = os.getppid()
pidFilename = '/tmp/pysync.py.%s.%s.ppid' % (self.destHost, self.destDir.replace('/', '_'))
pidFile = open(pidFilename, 'w')
pidFile.write('%d' % (self.ppid))
pidFile.close()
code = 0
self.p = None
stderrThread = None
try:
try:
args = []
args.append("ssh")
args.extend(["-o", "BatchMode=yes"])
args.extend(["-o", "StrictHostKeyChecking=no"])
args.append(self.sourceHost)
args.append(self.cmd)
self.p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderrThread = ReaderThread("pysync_stderr", self.p.stderr, self.stderr)
stderrThread.start()
code = self._work()
except OSError, e:
self.stderr.append(str(e))
raise
finally:
os.remove(pidFilename)
if self.p:
timer = threading.Timer(2.0, (lambda: os.kill(self.p.pid, signal.SIGHUP)))
timer.start()
self.returncode = self.p.wait()
timer.cancel()
if stderrThread:
stderrThread.join(2.0)
return code
def _work(self):
'''
Wait for and process commands from the LocalPysync instance connected
to the Popened SSH process.
Command processing continues until EOF is reached on Popen.stdout (the
command input stream from LocalPysync) or a "quit" command is proocessed.
Because standard command output may be interleaved with serialized command
objects, command objects are prefixed with "pKl:<length>\n". Non-command
object lines are appended to the self.stdout buffer.
'''
while True:
try:
# check if parent still alive
os.kill(self.ppid, 0)
except:
# parent gone, exit
return 2
# Get the length of the next serialized command
a = self.p.stdout.readline()
if len(a)==0:
# End the command loop if EOF
self.stderr.append("[FATAL]:-Unexpected EOF on LocalPysync output stream")
return 3
# If not a pickled command object, just record it
if not a.startswith("pKl:"):
self.stdout.append(a.rstrip())
continue
size = int(a[4:])
# Read the serialized command and process it.
data = self.p.stdout.read(size)
assert len(data)==size
try:
self._doCommand(cPickle.loads(data))
except PysyncProxy._Quit, e:
return e.code
def _doCommand(self,what):
'''
Perform the command requested by the remote side and prepare any
result.
'''
if what[0] == 'recordProgress':
if self.recordProgressCallback:
self.recordProgressCallback(what[1].rstrip())
return None
elif what[0] == 'recordRawProgress':
if self.recordRawProgressCallback:
self.recordRawProgressCallback(what[1])
return None
elif what[0]=='quit':
raise PysyncProxy._Quit(what[1])
else:
assert 0
class ReaderThread(threading.Thread):
'''
Appends all output read from a file handle to the lines buffer.
'''
def __init__(self, name, file, lines):
self.file = file
self.lines = lines
threading.Thread.__init__(self, name=name)
self.setDaemon(True)
def run(self):
for line in self.file:
self.lines.append(line.rstrip())
class LocalPysync:
'''
The LocalPysync class initiates a directory synchronization task by starting
the pysync_remote module on a target system then processes commands from that
system to accomplish directry synchronization. Once the pysync_remote module
is started on the remote system, this LocalPysync instance acts as the remote
system's agent.
When invoked through PysyncProxy, stdout is used to return pickled objects
representing status information from this LocalPysync instance.
'''
NUMBER_SCALES = { 'M':1024*1024, 'G':1024*1024*1024, 'T':1024*1024*1024*1024 }
class _Quit(SystemExit):
def __init__(self, *info):
SystemExit.__init__(self, *info)
def __init__(self, argv, recordProgressCallback=None, recordRawProgressCallback=None, progressTimestamp=False):
'''
Initialize a new LocalPysync instance.
argv - a command-line style list of arguments as described by self.usage()
recordProgressCallback - function to call to present a printable progress
message generated by RemotePysync; the function must accept a single
argument of type str.
recordRawProgressCallback - function to call to handle raw progress information
generated by RemotePysync; the function must accept a single argument
of type pysync_remote.ProgressUpdate.
progressTimestamp - indicates whether or not RemotePysync should include the
observation timestamp on messages it creates.
'''
self.options = Options()
self.usingProxy = False
self.sshargs = []
self.cache = [None]
self.exclude = set()
self.include = set()
self.recordProgressCallback = recordProgressCallback
if self.recordProgressCallback:
self.options.sendProgress = True
self.recordRawProgressCallback = recordRawProgressCallback
if self.recordRawProgressCallback:
self.options.sendRawProgress = True
self.options.progressTimestamp = progressTimestamp
a = argv[1:]
while a:
if a[0]=='-v':
self.options.verbose = True
elif a[0]=='-?':
self.usage(argv)
elif a[0]=='-compress':
self.options.compress = True
elif a[0]=='-n':
self.options.minusn = True
elif a[0]=='--insecure':
self.options.insecure = True
elif a[0]=='--ssharg':
a.pop(0)
self.sshargs.append(a[0])
elif a[0]=='--delete':
self.options.delete = True
elif a[0]=='-x':
a.pop(0)
name = a[0]
if name[0]=='/':
raise Exception('Please do not use absolute path with -x.')
if name[0:2]!='./':
name = os.path.join('.',name)
self.exclude.add(name)
elif a[0]=='-i':
a.pop(0)
name = a[0]
if name[0]=='/':
raise Exception('Please do not use absolute path with -i.')
if name[0:2]!='./':
name = os.path.join('.',name)
self.include.add(name)
elif a[0] == '--progress-bytes':
a.pop(0)
try:
scale = a[0][-1]
if scale == '%':
# Ensure number part is convertable; otherwise pass the whole value
factor = float(a[0][:-1])
self.options.progressBytes = a[0]
elif scale.upper() in LocalPysync.NUMBER_SCALES:
# Real numeric value followed by a supported scale identifier
progressBytes = int(float(a[0][:-1]) * LocalPysync.NUMBER_SCALES[scale.upper()])
self.options.progressBytes = progressBytes
else:
# If the value isn't a percent or scaled, it must be an integer number of bytes
progressBytes = int(a[0])
self.options.progressBytes = self.options.progressBytes
except ValueError:
raise ValueError("--progress-bytes value is not supported", a[0])
if type(self.options.progressBytes) != str and progressBytes < pysync_remote.SyncProgress.MINIMUM_VOLUME_INTERVAL:
raise ValueError("--progress-bytes value must be at least %d" % pysync_remote.SyncProgress.MINIMUM_VOLUME_INTERVAL, a[0])
elif a[0] == '--progress-time':
a.pop(0)
try:
progressSeconds = int(60 * float(a[0]))
self.options.progressTime = progressSeconds
except ValueError:
raise ValueError("--progress-time value is not supported", a[0])
if progressSeconds < pysync_remote.SyncProgress.MINIMUM_TIME_INTERVAL:
raise ValueError("--progress-time value must be at least %f" % (pysync_remote.SyncProgress.MINIMUM_TIME_INTERVAL / 60))
elif a[0] == '--proxy':
self.usingProxy = True
self.options.sendProgress = True
self.recordProgressCallback = self._recordProgress
self.options.sendRawProgress = True
self.recordRawProgressCallback = self._recordRawProgress
elif a[0] == '--omit-progress-timestamp':
self.options.progressTimestamp = False
else:
break
a.pop(0)
if len(a)!=2:
self.usage(argv)
self.sourceDir = os.path.abspath(a[0])
if not os.path.exists(self.sourceDir):
raise ValueError("Source path \"%s\" not found" % self.sourceDir)
if not os.path.isdir(self.sourceDir):
raise ValueError("Source path \"%s\" is not a directory" % self.sourceDir)
if not os.access(self.sourceDir, os.F_OK | os.R_OK | os.X_OK):
raise ValueError("Source path) \"%s\" is not accessible" % self.sourceDir)
dest = a[1]
# MPP-13617
m = re.match(RE1, dest)
if m:
self.userAndHost, self.destDir = m.groups()
else:
i = dest.find(':')
if i == -1:
self.usage(argv)
self.userAndHost, self.destDir = dest[:i], dest[i+1:]
self.connectAddress = None
self.sendData = None
hostname = self.userAndHost[self.userAndHost.find('@')+1:]
try:
addrinfo = socket.getaddrinfo(hostname, None)
except:
print 'dest>>%s<<' % dest, ' hostname>>%s<<' % hostname
raise
if addrinfo:
self.options.addrinfo = addrinfo[0]
else:
raise Exception("Unable to determine address for %s" % self.userAndHost)
def usage(self,argv):
sys.stderr.write("""usage:
python """+argv[0]+""" [-v] [-?] [-n]
[--ssharg arg] [-x exclude_file] [-i include_file] [--insecure] [--delete]
[--progress-time seconds] [--progress-bytes { n[.n]{% | G | T} }
[--proxy] [--omit-progress-timestamp]
sourcedir [user@]host:destdir
-v: verbose output
-?: Print this message.
--ssharg arg: pass arg to ssh. Use many times to pass many args.
-n: Do not do any work. Just print how many bytes will need to be
transferred over the network per file and a total.
-x name: Do not transfer named file or directory. Don't be too
creative with the name. For example, "directory/./file" will not
work--use "directory/file". Name is relative to sourcedir.
-i name: Only transfer named file or directory. Don't be too
creative with the name. For example, "directory/./file" will not
work--use "directory/file". Name is relative to sourcedir.
--insecure: Do not check MD5 digest after transfering data.
This makes pysync.py run faster, but a bad guy can forge TCP
packets and put junk of his choice into your files.
--delete: Delete things in dst that do not exist in src.
--progress-time minutes: the number of minutes to elapse before a
time-based progress message is issued. Progress messages may
appear more frequently than specified due to the --progress-bytes
value.
--progress-bytes count: the number of bytes processed before a
volume-based progress message is issued. The count may be a
number followed by 'G' or 'T' or number followed by '%'. If
specified as a percent, the count is calculated as the specified
percent of the total bytes expected to be processed.
--proxy: Internal option indicating a call from PysyncProxy.
--omit-progress-timestamp: Omit the timestamp from progress messages.
""")
sys.exit(1)
def readFile(self,filename,offset,size):
'''
Read a chunk of the specified size at the specified offset from the
file identified. The last chunk read is cached for possible re-reading.
The file is opened only for the duration of the seek and read operations.
'''
key = (filename,offset,size)
if self.cache[0]==key:
return self.cache[1]
absfilename = os.path.join(self.sourceDir, filename)
f = open(absfilename,'rb')
f.seek(offset)
a = f.read(size)
f.close()
assert len(a)==size
self.cache = (key,a)
return a
def getList(self):
'''
Gets a map of {name:stat} pairs to be processed. The stat value
is generally the tuple returned from pysync_remote.statToTuple.
Hard links (an entry with an inode equal to another in the list)
are represented by a ('L', linked_name) tuple.
'''
list = dict()
inomap = dict()
for root,dirs,files in os.walk(self.sourceDir):
for i in dirs+files:
absname = os.path.join(root, i)
relname = '.' + absname[len(self.sourceDir):]
if relname in self.exclude:
if i in dirs:
dirs.remove(i)
continue
if len(self.include) > 0:
""" Check if the file or dir is in the include list """
if relname in self.include:
pass
else:
""" Make sure we include any files or dirs under a dir in the include list."""
foundPrefix = False
for j in self.include:
if relname.startswith(j + '/') == True:
foundPrefix = True
continue
if foundPrefix == False:
if i in dirs:
dirs.remove(i)
continue
s = os.lstat(absname)
if s.st_ino in inomap:
list[relname] = ('L',inomap[s.st_ino])
continue
inomap[s.st_ino] = relname
list[relname] = statToTuple(s,absname)
return list
def doCommand(self,what):
'''
Perform the command requested by the remote side and prepare any
result.
'''
if what[0]=='connect':
self.connectAddress = what[1]
elif what[0]=='getOptions':
return self.options
elif what[0]=='getDestDir':
return self.destDir
elif what[0]=='getList':
return self.getList()
elif what[0]=='getDigest':
m = hashlib.md5()
m.update(self.readFile(what[1],what[2],what[3]))
return m.digest()
elif what[0]=='getData':
self.sendData = self.readFile(what[1],what[2],what[3])
if self.options.compress:
self.sendData = zlib.compress(self.sendData,1)
return len(self.sendData)
elif what[0] == 'recordProgress':
if self.recordProgressCallback:
self.recordProgressCallback(what[1].rstrip())
else:
sys.stdout.write(what[1].rstrip())
sys.stdout.write('\n')
return None
elif what[0] == 'recordRawProgress':
if self.recordRawProgressCallback:
self.recordRawProgressCallback(what[1])
else:
sys.stdout.write("raw: " + str(what[1]))
sys.stdout.write('\n')
return None
elif what[0]=='quit':
raise LocalPysync._Quit(what[1])
else:
assert 0
def _recordProgress(self, message):
'''
Send progress information to associated PysyncProxy instance.
'''
if message:
self._sendCommand('recordProgress', message)
def _recordRawProgress(self, progressUpdate):
'''
Send raw progress data to associated PysyncProxy instance.
'''
if progressUpdate:
self._sendCommand('recordRawProgress', progressUpdate)
def _sendCommand(self, *args):
'''
Serialize the command & arguments using cPickle and send write to stdout.
This method is used for communication with the initiating PysyncProxy
instance.
'''
a = cPickle.dumps(args)
sys.stdout.write('pKl:%d\n%s'%(len(a),a))
sys.stdout.flush()
def work(self):
'''
Wait for and process commands from the RemotePysync instance connected
to the Popened SSH process.
Command processing continues until EOF is reached on Popen.stdout (the
command input stream from RemotePysync) or a "quit" command is proocessed.
Command response objects are serialized and written to Popen.stdin (the
command output stream to RemotePysync).
'''
while True:
try:
# check if parent still alive
os.kill(os.getppid(), 0)
except:
# parent gone, exit
return 2
# Get the length of the next serialized command
a = self.p.stdout.readline()
if len(a)==0:
# End the command loop if EOF
print >> sys.stderr, "[FATAL]:-Unexpected EOF on RemotePysync output stream"
return 3
size = int(a)
# Read the serialized command and process it.
data = self.p.stdout.read(size)
assert len(data)==size
try:
answer = cPickle.dumps(self.doCommand(cPickle.loads(data)))
except LocalPysync._Quit, e:
return e.code
# Send the serialized command response
self.p.stdin.write("%d\n%s"%(len(answer),answer))
self.p.stdin.flush()
# If the command was a connect order, open a socket to
# the remote side for data transfer
if self.connectAddress!=None:
self.socket = socket.socket(self.options.addrinfo[0])
self.socket.connect(self.connectAddress)
self.connectAddress = None
# If the command was a getData order, send the prepared
# data over the socket.
if self.sendData!=None:
self.socket.sendall(self.sendData)
self.sendData = None
def run(self):
'''
Start the pysync_remote module on the remote host and call self.work() to process
commands presented by the remote host.
'''
# save of ppid to allow the process to be stopped.
os.system('echo %d > /tmp/pysync.py.%s.ppid' % (os.getppid(), self.destDir.replace('/', '_')))
PATH = os.environ.get('PATH') or '.'
LIBPATH = os.environ.get('LD_LIBRARY_PATH') or '.'
cmd = ('''. %s/greenplum_path.sh && bash -c "python -u -c '%s'"'''
% (os.environ.get('GPHOME'),
bootstrapSource))
args = []
args.append('ssh')
args.extend(["-o", "BatchMode=yes"])
args.extend(["-o", "StrictHostKeyChecking=no"])
args.extend(self.sshargs)
args.append(self.userAndHost)
args.append(cmd)
code = 0
self.p = None
try:
try:
pysyncSource = inspect.getsource(pysync_remote)
self.p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.p.stdin.write("%d\n%s"%(len(pysyncSource),pysyncSource))
code = self.work()
except OSError, e:
sys.stderr.write(str(e))
raise
finally:
os.remove('/tmp/pysync.py.%s.ppid' % (self.destDir.replace('/','_')))
if self.p:
timer = threading.Timer(2.0, (lambda: os.kill(self.p.pid, signal.SIGHUP)))
timer.start()
rc = self.p.wait()
timer.cancel()
if self.usingProxy:
self._sendCommand('quit', code)
return code
if os.environ.get('GPHOME') is None:
print >> sys.stderr, '[FATAL]:- Please specify environment variable GPHOME'
sys.exit(1)
if __name__ == '__main__':
sys.exit(LocalPysync(sys.argv, progressTimestamp=True).run())
| 40.367164 | 141 | 0.562819 |
e9f33b392dfc2b5d78d00857fa40e7cb613fecf2 | 1,162 | py | Python | via_cms/model/feed/feed_dao.py | jeanjacquesp/via-cms | 12b212f8005e3d667c23ffc4da831e4d3e653999 | [
"MIT"
] | null | null | null | via_cms/model/feed/feed_dao.py | jeanjacquesp/via-cms | 12b212f8005e3d667c23ffc4da831e4d3e653999 | [
"MIT"
] | null | null | null | via_cms/model/feed/feed_dao.py | jeanjacquesp/via-cms | 12b212f8005e3d667c23ffc4da831e4d3e653999 | [
"MIT"
] | null | null | null | # Copyright 2020 Pax Syriana Foundation. Licensed under the Apache License, Version 2.0
#
from via_cms.extension import db
from via_cms.model._database import Model
ID_NONE = 0
ID_NEWS = 1
ID_FINANCE = 2
ID_DOCUMENT = 3
class Feed(Model):
__tablename__ = 'feed_tbl'
# flask packages: required to name the identifier column: id.
id = db.Column(db.Integer, primary_key=True, autoincrement=False)
name = db.Column(db.Unicode(64), nullable=False, unique=True)
@staticmethod
def import_from_csv(file_path):
import csv
with open(file_path, encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, quoting=csv.QUOTE_NONE)
first = True
for row in reader:
if first:
first = False
else:
id = row[0]
name = row[1]
print('Inserting Feed(id={}, name={})'.format(int(id), name))
feed = Feed(id=int(id), name=name)
feed.save(commit=False)
# end if first
# end for row in reader
db.session.commit()
| 29.05 | 88 | 0.571429 |
ead6b47c0ad3f8458192fe9f4850033f9acd6d48 | 16,284 | py | Python | src/compas/geometry/shapes/box.py | franaudo/compas | 8b2982a1c31e87d1a6740864476d6242612dc3dd | [
"MIT"
] | 2 | 2021-03-17T18:14:22.000Z | 2021-09-19T13:50:02.000Z | src/compas/geometry/shapes/box.py | franaudo/compas | 8b2982a1c31e87d1a6740864476d6242612dc3dd | [
"MIT"
] | null | null | null | src/compas/geometry/shapes/box.py | franaudo/compas | 8b2982a1c31e87d1a6740864476d6242612dc3dd | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.geometry import centroid_points
from compas.geometry import transform_points
from compas.geometry import Transformation
from compas.geometry import Frame
from compas.geometry import Vector
from ._shape import Shape
__all__ = ['Box']
class Box(Shape):
"""A box is defined by a frame and its dimensions along the frame's x-, y- and z-axes.
The center of the box is positioned at the origin of the
coordinate system defined by the frame. The box is axis-aligned to the frame.
A box is a three-dimensional geometric shape with 8 vertices, 12 edges and 6
faces. The edges of a box meet at its vertices at 90 degree angles. The
faces of a box are planar. Faces which do not share an edge are parallel.
Parameters
----------
frame : :class:`compas.geometry.Frame`
The frame of the box.
xsize : float
The size of the box in the box frame's x direction.
ysize : float
The size of the box in the box frame's y direction.
zsize : float
The size of the box in the box frame's z direction.
Attributes
----------
frame : compas.geometry.Frame
The local coordinate system of the box.
xsize : float
The size of the box in the local X direction.
ysize : float
The size of the box in the local Y direction.
zsize : float
The size of the box in the local Z direction.
width (read-only) : float
Alias for ``xsize``.
depth (read-only) : float
Alias for ``ysize``.
height (read-only) : float
Alias for ``zsize``.
diagonal (read-only) : tuple of compas.geometry.Point
The start and end point of the main diagonal of the box.
dimensions (read-only) : list of float
List of sizes in local coordinate directions.
area (read-only) : float
The surface area of the box.
volume (read-only) : float
The volume of the box.
vertices (read-only) : list of list
The XYZ coordinates of the corners of the box.
With respect to the local Z axis, the vertices of the bottom
face are listed first in clockwise direction, starting at the bottom left corner.
The vertices of the top face are listed in counterclockwise direction.
faces (read-only) : list of list
The vertices of the faces of the box.
The cycle directions of the faces are such that face normals point outwards.
Examples
--------
>>> box = Box(Frame.worldXY(), 1.0, 2.0, 3.0)
"""
def __init__(self, frame, xsize, ysize, zsize):
super(Box, self).__init__()
self._frame = None
self._xsize = None
self._ysize = None
self._zsize = None
self.frame = frame
self.xsize = xsize
self.ysize = ysize
self.zsize = zsize
@property
def data(self):
"""Returns the data dictionary that represents the box.
Returns
-------
dict
The box data.
Examples
--------
>>> frame = Frame.worldXY()
>>> box = Box(frame, 1.0, 2.0, 3.0)
>>> bdict = {'frame': frame.data, 'xsize': 1.0, 'ysize': 2.0, 'zsize': 3.0}
>>> bdict == box.to_data()
True
"""
return {'frame': self.frame.data,
'xsize': self.xsize,
'ysize': self.ysize,
'zsize': self.zsize}
@data.setter
def data(self, data):
self.frame = Frame.from_data(data['frame'])
self.xsize = data['xsize']
self.ysize = data['ysize']
self.zsize = data['zsize']
@property
def frame(self):
"""Frame: The box's frame."""
return self._frame
@frame.setter
def frame(self, frame):
self._frame = Frame(frame[0], frame[1], frame[2])
@property
def xsize(self):
"""float: The size of the box in the box frame's x direction."""
return self._xsize
@xsize.setter
def xsize(self, xsize):
self._xsize = float(xsize)
@property
def ysize(self):
"""float: The size of the box in the box frame's y direction."""
return self._ysize
@ysize.setter
def ysize(self, ysize):
self._ysize = float(ysize)
@property
def zsize(self):
"""float: The size of the box in the box frame's z direction."""
return self._zsize
@zsize.setter
def zsize(self, zsize):
self._zsize = float(zsize)
@property
def xmin(self):
return self.frame.point.x - 0.5 * self.xsize
@property
def xmax(self):
return self.frame.point.x + 0.5 * self.xsize
@property
def ymin(self):
return self.frame.point.y - 0.5 * self.ysize
@property
def ymax(self):
return self.frame.point.y + 0.5 * self.ysize
@property
def zmin(self):
return self.frame.point.z - 0.5 * self.zsize
@property
def zmax(self):
return self.frame.point.z + 0.5 * self.zsize
@property
def width(self):
"""float: The width of the box in x direction."""
return self.xsize
@property
def depth(self):
"""float: The depth of the box in y direction."""
return self.ysize
@property
def height(self):
"""float: The height of the box in z direction."""
return self.zsize
@property
def diagonal(self):
vertices = self.vertices
return vertices[0], vertices[-2]
@property
def dimensions(self):
return [self.xsize, self.ysize, self.zsize]
@property
def area(self):
"""float: The surface area of the box."""
return 2 * self.xsize * self.ysize + 2 * self.ysize * self.zsize + 2 * self.zsize * self.xsize
@property
def volume(self):
"""float: The volume of the box."""
return self.xsize * self.ysize * self.zsize
@property
def points(self):
return self.vertices
@property
def vertices(self):
"""list of point: The XYZ coordinates of the vertices of the box."""
point = self.frame.point
xaxis = self.frame.xaxis
yaxis = self.frame.yaxis
zaxis = self.frame.zaxis
width, depth, height = self.xsize, self.ysize, self.zsize
a = point + (xaxis * (-0.5 * width) + yaxis * (-0.5 * depth) + zaxis * (-0.5 * height))
b = point + (xaxis * (-0.5 * width) + yaxis * (+0.5 * depth) + zaxis * (-0.5 * height))
c = point + (xaxis * (+0.5 * width) + yaxis * (+0.5 * depth) + zaxis * (-0.5 * height))
d = point + (xaxis * (+0.5 * width) + yaxis * (-0.5 * depth) + zaxis * (-0.5 * height))
e = a + zaxis * height
f = d + zaxis * height
g = c + zaxis * height
h = b + zaxis * height
return [a, b, c, d, e, f, g, h]
@property
def faces(self):
"""list of list: The faces of the box defined as lists of vertex indices."""
return [self.bottom,
self.front,
self.right,
self.back,
self.left,
self.top]
@property
def bottom(self):
return [0, 1, 2, 3]
@property
def front(self):
return [0, 3, 5, 4]
@property
def right(self):
return [3, 2, 6, 5]
@property
def back(self):
return [2, 1, 7, 6]
@property
def left(self):
return [1, 0, 4, 7]
@property
def top(self):
return [4, 5, 6, 7]
@property
def edges(self):
edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
edges += [(4, 5), (5, 6), (6, 7), (7, 4)]
edges += [(0, 4), (1, 7), (2, 6), (3, 5)]
return edges
# ==========================================================================
# customisation
# ==========================================================================
def __repr__(self):
return 'Box({0}, {1}, {2}, {3})'.format(self.frame, self.xsize, self.ysize, self.zsize)
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return self.frame
elif key == 1:
return self.xsize
elif key == 2:
return self.ysize
elif key == 3:
return self.zsize
else:
raise KeyError
def __setitem__(self, key, value):
if key == 0:
self.frame = value
elif key == 1:
self.xsize = value
elif key == 2:
self.ysize = value
elif key == 3:
self.zsize = value
else:
raise KeyError
def __iter__(self):
return iter([self.frame, self.xsize, self.ysize, self.zsize])
# ==========================================================================
# constructors
# ==========================================================================
@classmethod
def from_data(cls, data):
"""Construct a box from its data representation.
Parameters
----------
data : :obj:`dict`
The data dictionary.
Returns
-------
Box
The constructed box.
Examples
--------
>>> data = {'frame': Frame.worldXY().data, 'xsize': 1.0, 'ysize': 1.0, 'zsize': 1.0}
>>> box = Box.from_data(data)
"""
return cls(Frame.from_data(data['frame']), data['xsize'], data['ysize'], data['zsize'])
@classmethod
def from_width_height_depth(cls, width, height, depth):
"""Construct a box from its width, height and depth.
Note that width is along the X-axis, height along Z-axis, and depth along the Y-axis.
Parameters
----------
width : float
Width of the box.
height : float
Height of the box.
depth : float
Depth of the box.
Returns
-------
Box
The resulting box.
Notes
-----
The box is axis-aligned to the world coordinate system and centered at the origin.
Examples
--------
>>> box = Box.from_width_height_depth(1.0, 2.0, 3.0)
"""
width = float(width)
height = float(height)
depth = float(depth)
if width == 0.0:
raise Exception('Width cannot be zero.')
if height == 0.0:
raise Exception('Height cannot be zero.')
if depth == 0.0:
raise Exception('Depth cannot be zero.')
return cls(Frame.worldXY(), width, depth, height)
@classmethod
def from_bounding_box(cls, bbox):
"""Construct a box from the result of a bounding box calculation.
Parameters
----------
bbox : list
A list of 8 point locations, representing the corners of the bounding box.
Positions 0, 1, 2, 3 are the bottom corners.
Positions 4, 5, 6, 7 are the top corners.
Both the top and bottom face are oriented in CCW direction, starting at the bottom, left-most point.
Returns
-------
Box
The box shape.
Examples
--------
>>> from compas.geometry import bounding_box
>>> bbox = bounding_box([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
>>> box = Box.from_bounding_box(bbox)
>>> box.width
1.0
>>> box.height
1.0
>>> box.depth
1.0
"""
a = bbox[0]
b = bbox[1]
d = bbox[3]
e = bbox[4]
xaxis = Vector.from_start_end(a, b)
yaxis = Vector.from_start_end(a, d)
zaxis = Vector.from_start_end(a, e)
xsize = xaxis.length
ysize = yaxis.length
zsize = zaxis.length
frame = Frame(centroid_points(bbox), xaxis, yaxis)
return cls(frame, xsize, ysize, zsize)
@classmethod
def from_corner_corner_height(cls, corner1, corner2, height):
"""Construct a box from the opposite corners of its base and its height.
Parameters
----------
corner1 : point
The XYZ coordinates of the bottom left corner of the base of the box.
corner2 : point
The XYZ coordinates of the top right corner of the base of the box.
height : float
The height of the box.
Returns
-------
Box
The resulting box.
Examples
--------
>>> box = Box.from_corner_corner_height([0.0, 0.0, 0.0], [1.0, 1.0, 0.0], 1.0)
"""
if height == 0:
raise Exception('The box should have a height.')
x1, y1, z1 = corner1
x2, y2, z2 = corner2
if z1 != z2:
raise Exception('Corners should be in the same horizontal plane.')
xaxis = Vector(x2 - x1, 0, 0)
yaxis = Vector(0, y2 - y1, 0)
width = xaxis.length
depth = yaxis.length
point = [0.5 * (x1 + x2), 0.5 * (y1 + y2), z1 + 0.5 * height]
frame = Frame(point, xaxis, yaxis)
return cls(frame, width, depth, height)
@classmethod
def from_diagonal(cls, diagonal):
"""Construct a box from its main diagonal.
Parameters
----------
diagonal : segment
The diagonal of the box, represented by a pair of points in space.
Returns
-------
Box
The resulting box.
Examples
--------
>>> diagonal = [0.0, 0.0, 0.0], [1.0, 1.0, 1.0]
>>> box = Box.from_diagonal(diagonal)
"""
# this should put the frame at the centroid of the box
# not at the bottom left corner
d1, d2 = diagonal
x1, y1, z1 = d1
x2, y2, z2 = d2
if z1 == z2:
raise Exception('The box has no height.')
xaxis = Vector(x2 - x1, 0, 0)
yaxis = Vector(0, y2 - y1, 0)
zaxis = Vector(0, 0, z2 - z1)
width = xaxis.length
depth = yaxis.length
height = zaxis.length
point = [0.5 * (x1 + x2), 0.5 * (y1 + y2), 0.5 * (z1 + z2)]
frame = Frame(point, xaxis, yaxis)
return cls(frame, width, depth, height)
# ==========================================================================
# methods
# ==========================================================================
def contains(self, point):
"""Verify if the box contains a given point.
Parameters
----------
point : :class:`compas.geometry.Point` or (float, float, float)
Returns
-------
bool
"""
T = Transformation.from_change_of_basis(Frame.worldXY(), self.frame)
point = transform_points([point], T)[0]
if -0.5 * self.xsize < point[0] < + 0.5 * self.xsize:
if -0.5 * self.ysize < point[1] < +0.5 * self.ysize:
if -0.5 * self.zsize < point[2] < +0.5 * self.zsize:
return True
return False
def to_vertices_and_faces(self):
"""Returns a list of vertices and faces.
Returns
-------
(vertices, faces)
A list of vertex locations and a list of faces,
with each face defined as a list of indices into the list of vertices.
"""
return self.vertices, self.faces
def transform(self, transformation):
"""Transform the box.
Parameters
----------
transformation : :class:`Transformation`
The transformation used to transform the Box.
Examples
--------
>>> box = Box(Frame.worldXY(), 1.0, 2.0, 3.0)
>>> frame = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = Transformation.from_frame(frame)
>>> box.transform(T)
"""
self.frame.transform(transformation)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
import doctest
from compas.geometry import Transformation # noqa : F401
doctest.testmod(globs=globals())
| 28.468531 | 112 | 0.522046 |
7195d9be67cb05960bc9134551214e2155f85a44 | 758 | py | Python | breveIDE_windows_2.7.2_2/breveIDE_2.7.2/demos/Programming-Examples/SoundExample.py | Lamouse/Evolutionary-Creativity | 9e9a4094285241d0541e0b87a3bd2c5e4ba804d3 | [
"MIT"
] | null | null | null | breveIDE_windows_2.7.2_2/breveIDE_2.7.2/demos/Programming-Examples/SoundExample.py | Lamouse/Evolutionary-Creativity | 9e9a4094285241d0541e0b87a3bd2c5e4ba804d3 | [
"MIT"
] | null | null | null | breveIDE_windows_2.7.2_2/breveIDE_2.7.2/demos/Programming-Examples/SoundExample.py | Lamouse/Evolutionary-Creativity | 9e9a4094285241d0541e0b87a3bd2c5e4ba804d3 | [
"MIT"
] | null | null | null |
# Note: this file was automatically converted to Python from the
# original steve-language source code. Please see the original
# file for more detailed comments and documentation.
import breve
class myController( breve.Control ):
def __init__( self ):
breve.Control.__init__( self )
self.sound = None
myController.init( self )
def init( self ):
self.sound = breve.createInstances( breve.Sound, 1 )
self.sound.load( 'sounds/cat.wav' )
def iterate( self ):
if ( breve.randomExpression( 1000 ) == 0 ):
self.sound.play( ( 0.800000 + breve.randomExpression( 1.200000 ) ) )
breve.Control.iterate( self )
breve.myController = myController
# Create an instance of our controller object to initialize the simulation
myController()
| 22.294118 | 74 | 0.725594 |
89f5acef06ff4721bfb6076de2293a4e4e1c0a0a | 7,035 | py | Python | pipeline/recon/amass.py | bamhm182/recon-pipeline | 7659658ec706ff7a523231ca5bf04ec464b5ae49 | [
"MIT"
] | 352 | 2020-01-22T13:36:11.000Z | 2022-03-22T19:37:24.000Z | pipeline/recon/amass.py | bamhm182/recon-pipeline | 7659658ec706ff7a523231ca5bf04ec464b5ae49 | [
"MIT"
] | 72 | 2020-01-24T04:53:52.000Z | 2021-07-14T19:23:29.000Z | pipeline/recon/amass.py | bamhm182/recon-pipeline | 7659658ec706ff7a523231ca5bf04ec464b5ae49 | [
"MIT"
] | 86 | 2020-01-23T09:20:51.000Z | 2022-03-03T08:04:37.000Z | import json
import subprocess
from pathlib import Path
import luigi
from luigi.util import inherits
from luigi.contrib.sqla import SQLAlchemyTarget
import pipeline.models.db_manager
from ..tools import tools
from .targets import TargetList
from .helpers import meets_requirements
from ..models.target_model import Target
@inherits(TargetList)
class AmassScan(luigi.Task):
""" Run ``amass`` scan to perform subdomain enumeration of given domain(s).
Note:
Expects **TARGET_FILE.domains** file to be a text file with one top-level domain per line.
Install:
.. code-block:: console
sudo apt-get install -y -q amass
Basic Example:
.. code-block:: console
amass enum -ip -brute -active -min-for-recursive 3 -df tesla -json amass.tesla.json
Luigi Example:
.. code-block:: console
PYTHONPATH=$(pwd) luigi --local-scheduler --module recon.amass AmassScan --target-file tesla
Args:
exempt_list: Path to a file providing blacklisted subdomains, one per line.
db_location: specifies the path to the database used for storing results *Required by upstream Task*
target_file: specifies the file on disk containing a list of ips or domains *Required by upstream Task*
results_dir: specifes the directory on disk to which all Task results are written *Required by upstream Task*
"""
exempt_list = luigi.Parameter(default="")
requirements = ["go", "amass"]
exception = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.db_mgr = pipeline.models.db_manager.DBManager(db_location=self.db_location)
self.results_subfolder = (Path(self.results_dir) / "amass-results").expanduser().resolve()
def requires(self):
""" AmassScan depends on TargetList to run.
TargetList expects target_file as a parameter.
Returns:
luigi.ExternalTask - TargetList
"""
meets_requirements(self.requirements, self.exception)
args = {"target_file": self.target_file, "results_dir": self.results_dir, "db_location": self.db_location}
return TargetList(**args)
def output(self):
""" Returns the target output for this task.
Naming convention for the output file is amass.json.
Returns:
luigi.local_target.LocalTarget
"""
results_subfolder = Path(self.results_dir) / "amass-results"
new_path = results_subfolder / "amass.json"
return luigi.LocalTarget(new_path.expanduser().resolve())
def run(self):
""" Defines the options/arguments sent to amass after processing.
Returns:
list: list of options/arguments, beginning with the name of the executable to run
"""
self.results_subfolder.mkdir(parents=True, exist_ok=True)
hostnames = self.db_mgr.get_all_hostnames()
if hostnames:
# TargetList generated some domains for us to scan with amass
amass_input_file = self.results_subfolder / "input-from-targetlist"
with open(amass_input_file, "w") as f:
for hostname in hostnames:
f.write(f"{hostname}\n")
else:
return subprocess.run(f"touch {self.output().path}".split())
command = [
tools.get("amass").get("path"),
"enum",
"-active",
"-ip",
"-brute",
"-min-for-recursive",
"3",
"-df",
str(amass_input_file),
"-json",
self.output().path,
]
if self.exempt_list:
command.append("-blf") # Path to a file providing blacklisted subdomains
command.append(self.exempt_list)
subprocess.run(command)
amass_input_file.unlink()
@inherits(AmassScan)
class ParseAmassOutput(luigi.Task):
""" Read amass JSON results and create categorized entries into ip|subdomain files.
Args:
db_location: specifies the path to the database used for storing results *Required by upstream Task*
target_file: specifies the file on disk containing a list of ips or domains *Required by upstream Task*
exempt_list: Path to a file providing blacklisted subdomains, one per line. *Optional by upstream Task*
results_dir: specifes the directory on disk to which all Task results are written *Required by upstream Task*
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.db_mgr = pipeline.models.db_manager.DBManager(db_location=self.db_location)
self.results_subfolder = (Path(self.results_dir) / "amass-results").expanduser().resolve()
def requires(self):
""" ParseAmassOutput depends on AmassScan to run.
TargetList expects target_file as a parameter.
AmassScan accepts exempt_list as an optional parameter.
Returns:
luigi.ExternalTask - TargetList
"""
args = {
"target_file": self.target_file,
"exempt_list": self.exempt_list,
"results_dir": self.results_dir,
"db_location": self.db_location,
}
return AmassScan(**args)
def output(self):
""" Returns the target output files for this task.
Returns:
luigi.contrib.sqla.SQLAlchemyTarget
"""
return SQLAlchemyTarget(
connection_string=self.db_mgr.connection_string, target_table="target", update_id=self.task_id
)
def run(self):
""" Parse the json file produced by AmassScan and categorize the results into ip|subdomain files.
An example (prettified) entry from the json file is shown below
{
"Timestamp": "2019-09-22T19:20:13-05:00",
"name": "beta-partners.tesla.com",
"domain": "tesla.com",
"addresses": [
{
"ip": "209.133.79.58",
"cidr": "209.133.79.0/24",
"asn": 394161,
"desc": "TESLA - Tesla"
}
],
"tag": "ext",
"source": "Previous Enum"
}
"""
self.results_subfolder.mkdir(parents=True, exist_ok=True)
if Path(self.input().path).stat().st_size == 0:
self.output().touch()
return
amass_json = self.input().open()
with amass_json as amass_json_file:
for line in amass_json_file:
entry = json.loads(line)
tgt = self.db_mgr.get_or_create(Target, hostname=entry.get("name"), is_web=True)
for address in entry.get("addresses"):
ipaddr = address.get("ip")
tgt = self.db_mgr.add_ipv4_or_v6_address_to_target(tgt, ipaddr)
self.db_mgr.add(tgt)
self.output().touch()
self.db_mgr.close()
| 33.660287 | 117 | 0.612367 |
4fce1782666b62940b9d0f65857c8b9d8725f82b | 9,138 | py | Python | resource/lib/python2.7/site-packages/Crypto/SelfTest/Hash/test_keccak.py | claudiopastorini/geofire-python | 274e1b1d733a1158e4f36de40f0349dbc1ff6c34 | [
"MIT"
] | 5 | 2018-01-18T11:39:23.000Z | 2022-01-01T11:38:40.000Z | resource/lib/python2.7/site-packages/Crypto/SelfTest/Hash/test_keccak.py | claudiopastorini/geofire-python | 274e1b1d733a1158e4f36de40f0349dbc1ff6c34 | [
"MIT"
] | 4 | 2017-02-20T02:33:48.000Z | 2017-02-20T04:10:16.000Z | resource/lib/python2.7/site-packages/Crypto/SelfTest/Hash/test_keccak.py | claudiopastorini/geofire-python | 274e1b1d733a1158e4f36de40f0349dbc1ff6c34 | [
"MIT"
] | 4 | 2018-01-18T11:32:38.000Z | 2021-11-25T23:28:04.000Z | # ===================================================================
#
# Copyright (c) 2015, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""Self-test suite for Crypto.Hash.keccak"""
import unittest
from binascii import hexlify, unhexlify
from Crypto.SelfTest.loader import load_tests
from Crypto.SelfTest.st_common import list_test_cases
from StringIO import StringIO
from Crypto.Hash import keccak
from Crypto.Util.py3compat import b, tobytes, bchr
class KeccakTest(unittest.TestCase):
def test_new_positive(self):
for digest_bits in (224, 256, 384, 512):
hobj = keccak.new(digest_bits=digest_bits)
self.assertEqual(hobj.digest_size, digest_bits // 8)
hobj2 = hobj.new()
self.assertEqual(hobj2.digest_size, digest_bits // 8)
for digest_bytes in (28, 32, 48, 64):
hobj = keccak.new(digest_bytes=digest_bytes)
self.assertEqual(hobj.digest_size, digest_bytes)
hobj2 = hobj.new()
self.assertEqual(hobj2.digest_size, digest_bytes)
def test_new_positive2(self):
digest1 = keccak.new(data=b("\x90"), digest_bytes=64).digest()
digest2 = keccak.new(digest_bytes=64).update(b("\x90")).digest()
self.assertEqual(digest1, digest2)
def test_new_negative(self):
# keccak.new needs digest size
self.assertRaises(TypeError, keccak.new)
h = keccak.new(digest_bits=512)
# Either bits or bytes can be specified
self.assertRaises(TypeError, keccak.new,
digest_bytes=64,
digest_bits=512)
# Range
self.assertRaises(ValueError, keccak.new, digest_bytes=0)
self.assertRaises(ValueError, keccak.new, digest_bytes=1)
self.assertRaises(ValueError, keccak.new, digest_bytes=65)
self.assertRaises(ValueError, keccak.new, digest_bits=0)
self.assertRaises(ValueError, keccak.new, digest_bits=1)
self.assertRaises(ValueError, keccak.new, digest_bits=513)
def test_update(self):
pieces = [bchr(10) * 200, bchr(20) * 300]
h = keccak.new(digest_bytes=64)
h.update(pieces[0]).update(pieces[1])
digest = h.digest()
h = keccak.new(digest_bytes=64)
h.update(pieces[0] + pieces[1])
self.assertEqual(h.digest(), digest)
def test_update_negative(self):
h = keccak.new(digest_bytes=64)
self.assertRaises(TypeError, h.update, u"string")
def test_digest(self):
h = keccak.new(digest_bytes=64)
digest = h.digest()
# hexdigest does not change the state
self.assertEqual(h.digest(), digest)
# digest returns a byte string
self.failUnless(isinstance(digest, type(b("digest"))))
def test_hex_digest(self):
mac = keccak.new(digest_bits=512)
digest = mac.digest()
hexdigest = mac.hexdigest()
# hexdigest is equivalent to digest
self.assertEqual(hexlify(digest), tobytes(hexdigest))
# hexdigest does not change the state
self.assertEqual(mac.hexdigest(), hexdigest)
# hexdigest returns a string
self.failUnless(isinstance(hexdigest, type("digest")))
def test_update_after_digest(self):
msg=b("rrrrttt")
# Normally, update() cannot be done after digest()
h = keccak.new(digest_bits=512, data=msg[:4])
dig1 = h.digest()
self.assertRaises(TypeError, h.update, msg[4:])
dig2 = keccak.new(digest_bits=512, data=msg).digest()
# With the proper flag, it is allowed
h = keccak.new(digest_bits=512, data=msg[:4], update_after_digest=True)
self.assertEquals(h.digest(), dig1)
# ... and the subsequent digest applies to the entire message
# up to that point
h.update(msg[4:])
self.assertEquals(h.digest(), dig2)
class KeccakVectors(unittest.TestCase):
pass
# TODO: add ExtremelyLong tests
test_vectors_224 = load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"ShortMsgKAT_224.txt",
"Short Messages KAT 224",
{ "len" : lambda x: int(x) } )
test_vectors_224 += load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"LongMsgKAT_224.txt",
"Long Messages KAT 224",
{ "len" : lambda x: int(x) } )
for idx, tv in enumerate(test_vectors_224):
if tv.len == 0:
data = b("")
else:
data = tobytes(tv.msg)
def new_test(self, data=data, result=tv.md):
hobj = keccak.new(digest_bits=224, data=data)
self.assertEqual(hobj.digest(), result)
setattr(KeccakVectors, "test_224_%d" % idx, new_test)
# ---
test_vectors_256 = load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"ShortMsgKAT_256.txt",
"Short Messages KAT 256",
{ "len" : lambda x: int(x) } )
test_vectors_256 += load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"LongMsgKAT_256.txt",
"Long Messages KAT 256",
{ "len" : lambda x: int(x) } )
for idx, tv in enumerate(test_vectors_256):
if tv.len == 0:
data = b("")
else:
data = tobytes(tv.msg)
def new_test(self, data=data, result=tv.md):
hobj = keccak.new(digest_bits=256, data=data)
self.assertEqual(hobj.digest(), result)
setattr(KeccakVectors, "test_256_%d" % idx, new_test)
# ---
test_vectors_384 = load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"ShortMsgKAT_384.txt",
"Short Messages KAT 384",
{ "len" : lambda x: int(x) } )
test_vectors_384 += load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"LongMsgKAT_384.txt",
"Long Messages KAT 384",
{ "len" : lambda x: int(x) } )
for idx, tv in enumerate(test_vectors_384):
if tv.len == 0:
data = b("")
else:
data = tobytes(tv.msg)
def new_test(self, data=data, result=tv.md):
hobj = keccak.new(digest_bits=384, data=data)
self.assertEqual(hobj.digest(), result)
setattr(KeccakVectors, "test_384_%d" % idx, new_test)
# ---
test_vectors_512 = load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"ShortMsgKAT_512.txt",
"Short Messages KAT 512",
{ "len" : lambda x: int(x) } )
test_vectors_512 += load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"LongMsgKAT_512.txt",
"Long Messages KAT 512",
{ "len" : lambda x: int(x) } )
for idx, tv in enumerate(test_vectors_512):
if tv.len == 0:
data = b("")
else:
data = tobytes(tv.msg)
def new_test(self, data=data, result=tv.md):
hobj = keccak.new(digest_bits=512, data=data)
self.assertEqual(hobj.digest(), result)
setattr(KeccakVectors, "test_512_%d" % idx, new_test)
def get_tests(config={}):
tests = []
tests += list_test_cases(KeccakTest)
tests += list_test_cases(KeccakVectors)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| 36.261905 | 88 | 0.598271 |
f3fe7b95d5b3d39e931c0cd153fadaa94b7a1368 | 5,987 | py | Python | _build/jupyter_execute/ch05/ch5_1.py | liuzhengqi1996/math452 | 635b6ce53cb792e316abf4f47396f2e4f0686815 | [
"MIT"
] | null | null | null | _build/jupyter_execute/ch05/ch5_1.py | liuzhengqi1996/math452 | 635b6ce53cb792e316abf4f47396f2e4f0686815 | [
"MIT"
] | null | null | null | _build/jupyter_execute/ch05/ch5_1.py | liuzhengqi1996/math452 | 635b6ce53cb792e316abf4f47396f2e4f0686815 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # 5.1 Weierstrass Theorem
#
# To approximate any continuous function, a very simple idea is to
# approximate the function in a polynomial space. An important property of
# this space is that polynomials can approximate any reasonable function!
#
# - $P_n(\mathbb{R}^d)$ is dense in $C(\Omega)$ \[Weierstrass theorem\]
#
# - $P_n(\mathbb{R}^d)$ is dense in all Sobolev spaces:
# $L^2(\Omega), W^{m,p}(\Omega), \ldots$
#
# ```{prf:theorem}
# Let $\Omega\subset R^n$ be a closed and bounded set. Given any
# continuous function $f(x)$ on $\Omega$, there exists a sequence of
# polynomials $\{p_n(x)\}$ such that
# $$\displaystyle \lim_{n\rightarrow \infty} \max_{x\in \Omega}|f(x)-p_n(x)|=0$$
# ```
#
# ```{prf:proof}
# *Proof.* Let us first give the proof for $d=1$ and $\Omega=[0,1]$. Given
# $f:[0,1]\rightarrow R$ be a continuous function.
#
# Let
#
# $$
# \tilde f(x)=f(x)-l(x)
# $$
#
# where $l(x)=f(0)+x(f(1)-f(0))$. Then
# $\tilde f(0)=\tilde f(1)=0$. Noting that $l(x)$ is a linear function,
# hence without lose of generality, we can only consider the case
# $f:[0,1]\rightarrow R$ with $f(0)=f(1)=0$. Since $f$ is continuous on
# the closed interval $[0,1]$, then $f$ is uniformly continuous on
# $[0,1]$.
#
# First we extend $f$ to be zero outside of $[0,1]$ and obtain
# $f: R\rightarrow R$, then it is obviously that $f$ is still uniformly
# continuous.
#
# Next for $0\le x\le 1$, we construct
#
# $$
# p_n(x)=\int_{-1}^1f(x+t)Q_n(t)dt=\int_{-x}^{1-x}f(x+t)Q_n(t)dt=\int_{0}^{1}f(t)Q_n(t-x)dt
# $$
#
# where $Q_n(x)=c_n(1-x^2)^n$ and
#
# $$
# \int_{-1}^1 Q_n(x) dx=1.
# $$ (intq)
#
# Thus $\{p_n(x)\}$ is a sequence of polynomials.
#
# Since
#
# $$
# \begin{aligned}
# \int_{-1}^1 (1-x^2)^n dx&=2\int_{0}^1 (1-x^2)^n dx= 2\int_{0}^1 (1-x)^n(1+x)^n dx\\
# &\ge 2\int_{0}^1 (1-x)^n dx=\frac{2}{n+1}> \frac{1}{n}.\end{aligned}
# $$
#
# Combing with $\int_{-1}^1 Q_n(x) dx=1$, we obtain $c_n< n$ implying that
# for any $\delta>0$
#
# $$
# 0\le Q_n(x)\le n(1-\delta^2)^n \quad (\delta\le |x|\le 1),
# $$ (qest)
#
# so that
# $Q_n\rightarrow 0$ uniformly in $\delta\le |x|\le 1$ as
# $n\rightarrow \infty$.
#
# Given any $\epsilon >0$, since $f$ in uniformly continuous, there exists
# $\delta>0$ such that for any $|y-x|<\delta$, we have
#
# $$
# |f(y)-f(x)|< \frac{\epsilon}{2}.
# $$ (fcont)
#
# Finally, let $M=\max |f(x)|$, using {eq}`fcont` , {eq}`intq` , {eq}`qest` , we have
#
# $$
# \begin{aligned}
# \big| p_n(x)-f(x)\big|&=\big|\int_{-1}^1(f(x+t)-f(t))Q_n(t)dt\big|\le \int_{-1}^1 \big| f(x+t)-f(t)\big| Q_n(t)dt\\
# &\le 2M \int_{-1}^{-\delta} Q_n(t)dt+ \frac{\epsilon}{2}\int_{-\delta}^{\delta} Q_n(t)dt+ 2M\int_{\delta}^1 Q_n(t)dt\\
# &\le 4M n(1-\delta^2)^n + \frac{\epsilon}{2}< \epsilon\end{aligned}
# $$
#
# for all large enough $n$, which proves the theorem.
#
# The above proof generalize the high dimensional case easily. We consider
# the case that
#
# $$
# \Omega=[0,1]^d.
# $$
# By extension and using cut off function, W.L.O.G. that we assume that $f=0$ on the boundary of $\Omega$
# and we then extending this function to be zero outside of $\Omega$.
#
# Let us consider the special polynomial functions
#
# $$
# Q_n(x)=c_n\prod_{k=1}^d(1-x_k^2)
# $$ (Qn)
#
# Similar proof can then be applied. ◻
# ```
#
#
# ## 5.1.1 Curse of dimensionality
#
# Number of coefficients for polynomial space $P_n(\mathbb{R}^d)$ is
# $$N = \binom{d+n}{n} = \frac{(n+d)!}{d!n!}.$$ For example $n = 100$:
#
# $d =$ $2$ $4$ $8$
# ------- --------------- ----------------- --------------------
# $N=$ $5\times10^3$ $4.6\times10^6$ $3.5\times10^{11}$
#
# As the this table shows, the dimension of the polynomial space
# $P_n(\mathbb{R}^d)$ increases rapidly as the degree $n$ increases. This
# leads to an extremely large space therefore very expensive to
# approximate functions in polynomial spaces in high dimensions.
# ## 5.1.2 Runge's phenomenon
#
# A natural way to approximate a given function on any interval $[a,b]$ is
# to use an $n$-degree polynomial $p_n(x)$ by $n+1$ equispaced points,
# namely
#
# $$
# x_i=a+{b-a\over n},\quad i=0,1,2,\cdots,n.
# $$
#
# By Weierstrass' theorem, we expect a more accurate reconstruction of $f(x)$ by using
# more points. But this is not always true as shown in the following
# example.
#
# Consider the Runge function (a scaled version of the Witch of Agnesi)
#
# $$
# f(x)=\frac{1}{1+25x^{2}}.
# $$
#
# Runge found that if this function is interpolated at equidistant points $x_i$ between $-1$ and $1$ such that:
#
# $$
# x_{i}={\frac{2i}{n}}-1,\quad i\in \left\{0,1,\dots ,n\right\}
# $$
#
# with a polynomial $p_n(x)$ of degree $\leq n$, the resulting interpolation
# oscillates toward the ends of the interval, i.e. close to $-1$ and $1$.
# It can even be proven that the interpolation error increases (without
# bound) when the degree of the polynomial is increased:
#
# $$
# \lim_{{n\rightarrow \infty }}\left(\max_{{-1\leq x\leq 1}}|f(x)-p_{n}(x)|\right)=+\infty.
# $$
# This shows that high-degree polynomial interpolation at equidistant
# points can be troublesome.
#
#
# 
#
#
# The experiment shows that the polynomials $p_n(x)$ produced in this
# manner may in fact diverge away from $f(x)$ as $n$ increases. This
# typically occurs in an oscillating pattern that magnifies near the ends
# of the interpolation points. This phenomenon is attributed to Runge.
#
# Thus, this particular set of polynomial functions $p_n(x)$ is not
# guaranteed to have the property of uniform convergence. In other words,
# Weierstrass' theorem guarantees the existence of the polynomial
# functions, but how to find such polynomials is not provided.
#
# In[ ]:
| 32.362162 | 124 | 0.611993 |
9f7afe044bc1f51397a1dcedf5104c7edf98933b | 2,612 | py | Python | schengen.py | ThiccPadx/despanatun_project | 2a8210879a4bbd93f311ed99658057820032f681 | [
"MIT"
] | 1 | 2022-02-26T21:21:26.000Z | 2022-02-26T21:21:26.000Z | schengen.py | ThiccPadx/despanatun_project | 2a8210879a4bbd93f311ed99658057820032f681 | [
"MIT"
] | null | null | null | schengen.py | ThiccPadx/despanatun_project | 2a8210879a4bbd93f311ed99658057820032f681 | [
"MIT"
] | null | null | null | import os
import time
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import subprocess
# Uncomment to check if alerting works
# playAlert()
# If it doesn't work, install mpg123
# sudo apt install mpg123
# log file location
logfile = "logs/log.txt"
# webdriver file location
webdriverfile = "/home/cgev/PycharmProjects/despanatun_script/chromedriver"
# Max page load wait time
delay = 10
# Alert audio file
alert = "alert.mp3"
def playAlert():
os.system("mpg123 " + alert)
def bash(bashCommand):
output = subprocess.check_output(['bash', '-c', bashCommand])
def say(text):
bash("spd-say " + text)
def log(text):
bash("spd-say " + text)
bash("echo '" + text + "' >> " + logfile)
bash("date '+%H:%M:%S' >> " + logfile)
options = Options()
options.add_argument("user-data-dir=selenium")
browser = webdriver.Chrome(webdriverfile, 0, options)
print("Make sure website is reachable and language is selected to English")
input("Press Enter when ready")
browser.set_page_load_timeout(delay)
bash("echo >> " + logfile)
bash("echo Started at: >> " + logfile)
bash("date '+%H:%M:%S' >> " + logfile)
while (1 < 2):
try:
# Open Web page
browser.get('https://secure.e-konsulat.gov.pl/Uslugi/RejestracjaTerminu.aspx?IDUSLUGI=8&IDPlacowki=134')
if ("Przekroczyłeś dopuszczalną" in browser.page_source):
log("Too many attempts ")
say("Too many attempts")
continue
# Check if the page did load
if not ("System Zdalnej Rejestracji" in browser.title):
log("Website didnt load")
continue
# Run anticaptcha
box = browser.find_element_by_id("cp_Captcha_ctl01_tbAnswer")
box.send_keys(Keys.LEFT_CONTROL + Keys.LEFT_SHIFT + '6')
time.sleep(20)
# Click Continue button
cont = browser.find_element_by_id('cp_btnDalej')
cont.click()
if ("Invalid image verification" in browser.page_source):
log("Captcha failed")
continue
# Check place availability
if ("Lack of available dates to 2019-08-29" in browser.page_source):
log("Lack of available dates")
elif ("Your application has been cancelled" in browser.page_source):
log("Caught by website ")
else:
playAlert()
log("Place found!!!")
input("Press Enter to continue")
except TimeoutException:
log("Website timeout")
| 30.372093 | 112 | 0.655436 |
07aa2cb23ed59451dc98842777c78e4697b66124 | 667 | py | Python | Recommender/recommender/test_functions.py | jesierski/early-sunrise | b39b983fdbd67cd1c264773b1a511b8b76d3346d | [
"MIT"
] | 3 | 2021-03-01T09:41:11.000Z | 2021-10-31T20:55:42.000Z | Recommender/recommender/test_functions.py | jesierski/early-sunrise | b39b983fdbd67cd1c264773b1a511b8b76d3346d | [
"MIT"
] | null | null | null | Recommender/recommender/test_functions.py | jesierski/early-sunrise | b39b983fdbd67cd1c264773b1a511b8b76d3346d | [
"MIT"
] | null | null | null | import pandas as pd
from functions import get_ratings, get_movies, make_df, impute
from for_model import get_scores
from recommender_cosim import get_scores2
def test_rating_in():
df = get_ratings()
assert isinstance(df, pd.DataFrame)
def test_movies_in():
df = get_movies()
assert isinstance(df, pd.DataFrame)
def test_recommender_nmf():
result = get_scores('Silver Spoon (2014)', 'Flint (2017)', 'Jumanji (1995)', 1, 4.9, 5.0)
assert isinstance(result, pd.DataFrame)
def test_recommender_cosim():
result2 = get_scores2('Silver Spoon (2014)', 'Flint (2017)', 'Jumanji (1995)', 1, 4.9, 5.0)
assert isinstance(result2, pd.DataFrame) | 33.35 | 95 | 0.722639 |
bd6f10680719a2488234d45b72c55e2b7fa57565 | 2,046 | py | Python | NewsRecommendation/src/model/NAML/fm.py | ustcsky/recommenders | 64f0dd4653cf7f1d687c45c61807841415e111c7 | [
"MIT"
] | null | null | null | NewsRecommendation/src/model/NAML/fm.py | ustcsky/recommenders | 64f0dd4653cf7f1d687c45c61807841415e111c7 | [
"MIT"
] | null | null | null | NewsRecommendation/src/model/NAML/fm.py | ustcsky/recommenders | 64f0dd4653cf7f1d687c45c61807841415e111c7 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch
class FM_Layer(nn.Module):
def __init__(self, n=10, k=5):
super(FM_Layer, self).__init__()
self.n = n
self.k = k
self.linear = nn.Linear(self.n, 1) # 前两项线性层
self.V = nn.Parameter(torch.randn(self.n, self.k)) # 交互矩阵
nn.init.uniform_(self.V, -0.1, 0.1)
def fm_layer(self, x):
linear_part = self.linear(x)
# print('linear_part.shape:', linear_part.shape)
# print('x.shape:', x.shape)
# print('self.V.shape:', self.V.shape)
interaction_part_1 = torch.matmul(x, self.V)
interaction_part_1 = torch.pow(interaction_part_1, 2)
# print('interaction_part_1.shape:', interaction_part_1.shape)
interaction_part_2 = torch.matmul(torch.pow(x, 2), torch.pow(self.V, 2))
# print('interaction_part_2.shape:', interaction_part_2.shape)
tmp = torch.sum(interaction_part_2 - interaction_part_1, -1, keepdim=True)
# print('tmp.shape:',tmp.shape)
output = linear_part + 0.5 * tmp
return output
def forward(self, x):
return self.fm_layer(x)
fm = FM_Layer(6, 1)
x = torch.randn(400, 6)
output = fm(x) # (400, 1)
class Attention(torch.nn.Module):
def __init__(self, query_vector_dim, input_vector_dim):
super(Attention, self).__init__()
self.linear = nn.Linear(input_vector_dim, query_vector_dim)
self.query_vector = nn.Parameter(torch.empty(query_vector_dim).uniform_(-0.1, 0.1))
def forward(self, input):
'''
Args:
input: batch_size, n_input_vector, input_vector_dim
Returns:
result: batch_size, input_vector_dim
'''
# batch_size, n_input_vector, query_vector_dim
tmp = torch.tanh(self.linear(input))
# batch_size, n_input_vector
weight = F.softmax(torch.matmul(tmp, self.query_vector), dim=1)
result = torch.bmm(weight.unsqueeze(dim=1), input).squeeze(dim=1)
return result | 38.603774 | 91 | 0.634409 |
43db65ecfa2da59037af4de4e36f1c29739626dc | 2,246 | py | Python | data_scripts/cache_to_cache.py | GabeWeiss/break_data_bank | cfd97a436270768aade5aaf529e4871164ac4f9c | [
"Apache-2.0"
] | null | null | null | data_scripts/cache_to_cache.py | GabeWeiss/break_data_bank | cfd97a436270768aade5aaf529e4871164ac4f9c | [
"Apache-2.0"
] | null | null | null | data_scripts/cache_to_cache.py | GabeWeiss/break_data_bank | cfd97a436270768aade5aaf529e4871164ac4f9c | [
"Apache-2.0"
] | null | null | null | import argparse
import sys
from google.cloud import firestore
parser = argparse.ArgumentParser()
# TODO: Add ability to set seed and transfer directly to production
#parser.add_argument("-s", "--seed")
parser.add_argument("--seed", required=True)
args = parser.parse_args()
db = firestore.Client()
try:
SEED = int(args.seed)
except:
print("You need to specify an integer for the seed.")
sys.exit(1)
instance_keys = [
"sql-1-1", "sql-1-2", "sql-1-3",
"sql-2-1", "sql-2-2", "sql-2-3",
"sql-3-1", "sql-3-2", "sql-3-3",
"sqlrep-1-1", "sqlrep-1-2", "sqlrep-1-3",
"sqlrep-2-1", "sqlrep-2-2", "sqlrep-2-3",
"sqlrep-3-1", "sqlrep-3-2", "sqlrep-3-3",
"spanner-1-1", "spanner-1-2", "spanner-1-3",
"spanner-2-1", "spanner-2-2", "spanner-2-3",
"spanner-3-1", "spanner-3-2", "spanner-3-3"
]
traffic_keys = [
"1-1", "1-2", "1-3", "2-1", "2-2", "2-3", "3-1", "3-2", "3-3"
]
def transfer():
for instance in instance_keys:
print(f"Running for instance: {instance}")
for traffic in traffic_keys:
source_collection_ref = db.collection("events").document("next2020").collection("cached_staged").document(instance).collection("patterns").document(traffic).collection("transactions")
target_collection_ref = db.collection("events").document("next2020").collection("cached").document(instance).collection("patterns").document(traffic).collection("transactions")
transactions = source_collection_ref.stream()
for t in transactions:
doc_dict = t.to_dict()
doc_dict['seed'] = SEED
target_collection_ref.add(doc_dict)
def will_overwrite():
for instance in instance_keys:
target_collection_ref = db.collection("events").document("next2020").collection("cached").document(instance).collection("patterns").document("1-1").collection("transactions")
targets = target_collection_ref.limit(1).where('seed', '==', SEED).stream()
if len(list(targets)) > 0:
return True
return False
if will_overwrite():
print("This would overwrite existing data in the database. Please delete any transactions in the way before continuing.")
sys.exit(1)
transfer()
| 35.09375 | 195 | 0.645147 |
e823619137cc103ba915a752539dd2229d123100 | 45 | py | Python | __init__.py | caokai1073/UTA | 352719118cdebb25ccde6e22928676b83b38cd41 | [
"MIT"
] | 18 | 2020-02-05T00:49:19.000Z | 2022-01-12T03:32:31.000Z | __init__.py | caokai1073/UTA | 352719118cdebb25ccde6e22928676b83b38cd41 | [
"MIT"
] | 3 | 2020-12-29T07:18:50.000Z | 2022-01-19T04:17:41.000Z | __init__.py | caokai1073/UTA | 352719118cdebb25ccde6e22928676b83b38cd41 | [
"MIT"
] | 4 | 2020-06-04T10:00:25.000Z | 2020-11-01T11:50:34.000Z | #!/usr/bin/env python
# encoding=utf-8
| 9 | 22 | 0.6 |
1a228c63aa7e1b96d18566073239739e8d0dcaf0 | 5,223 | py | Python | samples/dnn/segmentation.py | artun3e/opencv | 524a2fffe96195b906a95b548b0a185d3251eb7e | [
"BSD-3-Clause"
] | 5 | 2015-04-13T01:45:39.000Z | 2017-03-07T10:35:28.000Z | samples/dnn/segmentation.py | artun3e/opencv | 524a2fffe96195b906a95b548b0a185d3251eb7e | [
"BSD-3-Clause"
] | 4 | 2019-02-18T14:09:16.000Z | 2019-09-30T11:01:03.000Z | samples/dnn/segmentation.py | artun3e/opencv | 524a2fffe96195b906a95b548b0a185d3251eb7e | [
"BSD-3-Clause"
] | 3 | 2015-02-09T08:29:48.000Z | 2016-06-28T07:47:04.000Z | import cv2 as cv
import argparse
import numpy as np
import sys
from common import *
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
help='An optional path to file with preprocessing parameters.')
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet'],
help='Optional name of an origin framework of the model. '
'Detect it automatically if it does not set.')
parser.add_argument('--colors', help='Optional path to a text file with colors for an every class. '
'An every color is represented with three values from 0 to 255 in BGR channels order.')
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
help="Choose one of computation backends: "
"%d: automatically (by default), "
"%d: Halide language (http://halide-lang.org/), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
"%d: OpenCV implementation" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: VPU' % targets)
args, _ = parser.parse_known_args()
add_preproc_args(args.zoo, parser, 'segmentation')
parser = argparse.ArgumentParser(parents=[parser],
description='Use this script to run semantic segmentation deep learning networks using OpenCV.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args()
args.model = findFile(args.model)
args.config = findFile(args.config)
args.classes = findFile(args.classes)
np.random.seed(324)
# Load names of classes
classes = None
if args.classes:
with open(args.classes, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Load colors
colors = None
if args.colors:
with open(args.colors, 'rt') as f:
colors = [np.array(color.split(' '), np.uint8) for color in f.read().rstrip('\n').split('\n')]
legend = None
def showLegend(classes):
global legend
if not classes is None and legend is None:
blockHeight = 30
assert(len(classes) == len(colors))
legend = np.zeros((blockHeight * len(colors), 200, 3), np.uint8)
for i in range(len(classes)):
block = legend[i * blockHeight:(i + 1) * blockHeight]
block[:,:] = colors[i]
cv.putText(block, classes[i], (0, blockHeight//2), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))
cv.namedWindow('Legend', cv.WINDOW_NORMAL)
cv.imshow('Legend', legend)
classes = None
# Load a network
net = cv.dnn.readNet(args.model, args.config, args.framework)
net.setPreferableBackend(args.backend)
net.setPreferableTarget(args.target)
winName = 'Deep learning semantic segmentation in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
cap = cv.VideoCapture(args.input if args.input else 0)
legend = None
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:
cv.waitKey()
break
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
# Create a 4D blob from a frame.
inpWidth = args.width if args.width else frameWidth
inpHeight = args.height if args.height else frameHeight
blob = cv.dnn.blobFromImage(frame, args.scale, (inpWidth, inpHeight), args.mean, args.rgb, crop=False)
# Run a model
net.setInput(blob)
score = net.forward()
numClasses = score.shape[1]
height = score.shape[2]
width = score.shape[3]
# Draw segmentation
if not colors:
# Generate colors
colors = [np.array([0, 0, 0], np.uint8)]
for i in range(1, numClasses):
colors.append((colors[i - 1] + np.random.randint(0, 256, [3], np.uint8)) / 2)
classIds = np.argmax(score[0], axis=0)
segm = np.stack([colors[idx] for idx in classIds.flatten()])
segm = segm.reshape(height, width, 3)
segm = cv.resize(segm, (frameWidth, frameHeight), interpolation=cv.INTER_NEAREST)
frame = (0.1 * frame + 0.9 * segm).astype(np.uint8)
# Put efficiency information.
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
showLegend(classes)
cv.imshow(winName, frame)
| 40.804688 | 130 | 0.640245 |
023e8a5a01d92a08250536cfd4a17119758299ac | 3,674 | py | Python | reana_cluster/cli/__init__.py | BenGalewsky/reana-cluster | b631360d92886b760d56d47686774537aa4b1db5 | [
"MIT"
] | 1 | 2019-06-27T01:21:02.000Z | 2019-06-27T01:21:02.000Z | reana_cluster/cli/__init__.py | BenGalewsky/reana-cluster | b631360d92886b760d56d47686774537aa4b1db5 | [
"MIT"
] | null | null | null | reana_cluster/cli/__init__.py | BenGalewsky/reana-cluster | b631360d92886b760d56d47686774537aa4b1db5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA cluster command line interface."""
import logging
import sys
import click
from . import cluster
from ..config import cluster_spec_default_file_path, supported_backends
from ..utils import load_spec_file
DEBUG_LOG_FORMAT = '[%(asctime)s] p%(process)s ' \
'{%(pathname)s:%(lineno)d} ' \
'%(levelname)s - %(message)s'
LOG_FORMAT = '[%(levelname)s] %(message)s'
class Config(object):
"""Configuration object to share across commands."""
def __init__(self):
"""Initialize config variables."""
self.backend = None
self.cluster_spec = None
@click.group()
@click.option(
'--loglevel',
'-l',
help='Sets log level',
type=click.Choice(['DEBUG', 'INFO', 'WARNING']),
default='WARNING')
@click.option(
'-f',
'--file',
type=click.Path(exists=True, resolve_path=True),
default=cluster_spec_default_file_path,
help='REANA cluster specifications file describing configuration '
'for the cluster and for REANA components')
@click.option(
'-s',
'--skip-validation', is_flag=True,
help='If set, specifications file is not validated before '
'starting the initialization.')
@click.option(
'--cephfs', is_flag=True,
help='Set cephfs volume for cluster storage.')
@click.option(
'--cephfs-volume-size',
type=int,
help='Set cephfs volume size in GB.')
@click.option(
'--debug', is_flag=True,
help='If set, deploy REANA in debug mode.')
@click.option(
'-u',
'--url',
help='Set REANA cluster URL')
@click.pass_context
def cli(ctx, loglevel, skip_validation, file,
cephfs, cephfs_volume_size, debug, url):
"""Command line application for managing a REANA cluster."""
logging.basicConfig(
format=DEBUG_LOG_FORMAT if loglevel == 'debug' else LOG_FORMAT,
stream=sys.stderr,
level=loglevel)
try:
cluster_spec = load_spec_file(click.format_filename(file),
skip_validation)
if cephfs_volume_size and not cephfs:
cephfs_volume_size = None
click.echo(click.style('CEPHFS volume size will not be set because'
' missing `--cephfs` flag', fg='yellow'))
ctx.obj = Config()
cluster_type = cluster_spec['cluster']['type']
logging.info("Cluster type specified in cluster "
"specifications file is '{}'"
.format(cluster_type))
ctx.obj.backend = supported_backends[cluster_type](
cluster_spec,
cephfs=cephfs,
cephfs_volume_size=cephfs_volume_size,
debug=debug,
url=url)
# This might be unnecessary since validation of cluster specifications
# file is done against schema and schema should include the supported
# cluster (backend) types.
# On the other hand there is --skip-validation flag.
except KeyError as e:
logging.info('Unsupported value for cluster type in '
'reana cluster specifications file: {}'
.format(cluster_type))
raise e
except Exception as e:
logging.debug(str(e))
cli.add_command(cluster.init)
cli.add_command(cluster.verify)
cli.add_command(cluster.down)
cli.add_command(cluster.restart)
cli.add_command(cluster.get)
cli.add_command(cluster.env)
cli.add_command(cluster.status)
| 30.363636 | 79 | 0.637452 |
b23cb0829cc5f3bc93031c9d6b9de2aaf7826180 | 20,647 | py | Python | tests/plugins/different_versions/rdflib3_1_0/env/lib/python2.7/site-packages/rdflib/plugins/memory.py | EliAndrewC/sideboard | 81f0099f4c03e7abb5856e046539aa033ecf04f9 | [
"BSD-3-Clause"
] | 3 | 2015-01-12T16:16:50.000Z | 2020-03-20T03:22:36.000Z | lib/rdflib/plugins/memory.py | mhausenblas/madr | aaae3fa6a68cc56978873e329f4886ec0bc11b9c | [
"Apache-2.0"
] | null | null | null | lib/rdflib/plugins/memory.py | mhausenblas/madr | aaae3fa6a68cc56978873e329f4886ec0bc11b9c | [
"Apache-2.0"
] | null | null | null | from __future__ import generators
from rdflib.term import BNode
from rdflib.store import Store, NO_STORE, VALID_STORE
ANY = Any = None
class Memory(Store):
"""\
An in memory implementation of a triple store.
This triple store uses nested dictionaries to store triples. Each
triple is stored in two such indices as follows spo[s][p][o] = 1 and
pos[p][o][s] = 1.
Authors: Michel Pelletier, Daniel Krech, Stefan Niederhauser
"""
def __init__(self, configuration=None, identifier=None):
super(Memory, self).__init__(configuration)
self.identifier = identifier
# indexed by [subject][predicate][object]
self.__spo = {}
# indexed by [predicate][object][subject]
self.__pos = {}
# indexed by [predicate][object][subject]
self.__osp = {}
self.__namespace = {}
self.__prefix = {}
def add(self, (subject, predicate, object), context, quoted=False):
"""\
Add a triple to the store of triples.
"""
# add dictionary entries for spo[s][p][p] = 1 and pos[p][o][s]
# = 1, creating the nested dictionaries where they do not yet
# exits.
spo = self.__spo
try:
po = spo[subject]
except:
po = spo[subject] = {}
try:
o = po[predicate]
except:
o = po[predicate] = {}
o[object] = 1
pos = self.__pos
try:
os = pos[predicate]
except:
os = pos[predicate] = {}
try:
s = os[object]
except:
s = os[object] = {}
s[subject] = 1
osp = self.__osp
try:
sp = osp[object]
except:
sp = osp[object] = {}
try:
p = sp[subject]
except:
p = sp[subject] = {}
p[predicate] = 1
def remove(self, (subject, predicate, object), context=None):
for (subject, predicate, object), c in self.triples(
(subject, predicate, object)):
del self.__spo[subject][predicate][object]
del self.__pos[predicate][object][subject]
del self.__osp[object][subject][predicate]
def triples(self, (subject, predicate, object), context=None):
"""A generator over all the triples matching """
if subject!=ANY: # subject is given
spo = self.__spo
if subject in spo:
subjectDictionary = spo[subject]
if predicate!=ANY: # subject+predicate is given
if predicate in subjectDictionary:
if object!=ANY: # subject+predicate+object is given
if object in subjectDictionary[predicate]:
yield (subject, predicate, object), \
self.__contexts()
else: # given object not found
pass
else: # subject+predicate is given, object unbound
for o in subjectDictionary[predicate].keys():
yield (subject, predicate, o), \
self.__contexts()
else: # given predicate not found
pass
else: # subject given, predicate unbound
for p in subjectDictionary.keys():
if object!=ANY: # object is given
if object in subjectDictionary[p]:
yield (subject, p, object), self.__contexts()
else: # given object not found
pass
else: # object unbound
for o in subjectDictionary[p].keys():
yield (subject, p, o), self.__contexts()
else: # given subject not found
pass
elif predicate!=ANY: # predicate is given, subject unbound
pos = self.__pos
if predicate in pos:
predicateDictionary = pos[predicate]
if object!=ANY: # predicate+object is given, subject unbound
if object in predicateDictionary:
for s in predicateDictionary[object].keys():
yield (s, predicate, object), self.__contexts()
else: # given object not found
pass
else: # predicate is given, object+subject unbound
for o in predicateDictionary.keys():
for s in predicateDictionary[o].keys():
yield (s, predicate, o), self.__contexts()
elif object!=ANY: # object is given, subject+predicate unbound
osp = self.__osp
if object in osp:
objectDictionary = osp[object]
for s in objectDictionary.keys():
for p in objectDictionary[s].keys():
yield (s, p, object), self.__contexts()
else: # subject+predicate+object unbound
spo = self.__spo
for s in spo.keys():
subjectDictionary = spo[s]
for p in subjectDictionary.keys():
for o in subjectDictionary[p].keys():
yield (s, p, o), self.__contexts()
def __len__(self, context=None):
#@@ optimize
i = 0
for triple in self.triples((None, None, None)):
i += 1
return i
def bind(self, prefix, namespace):
self.__prefix[namespace] = prefix
self.__namespace[prefix] = namespace
def namespace(self, prefix):
return self.__namespace.get(prefix, None)
def prefix(self, namespace):
return self.__prefix.get(namespace, None)
def namespaces(self):
for prefix, namespace in self.__namespace.iteritems():
yield prefix, namespace
def __contexts(self):
return (c for c in []) # TODO: best way to return empty generator
class IOMemory(Store):
"""\
An integer-key-optimized-context-aware-in-memory store.
Uses nested dictionaries to store triples and context. Each triple
is stored in six such indices as follows cspo[c][s][p][o] = 1
and cpos[c][p][o][s] = 1 and cosp[c][o][s][p] = 1 as well as
spo[s][p][o] = [c] and pos[p][o][s] = [c] and pos[o][s][p] = [c]
Context information is used to track the 'source' of the triple
data for merging, unmerging, remerging purposes. context aware
store stores consume more memory size than non context stores.
"""
context_aware = True
formula_aware = True
def __init__(self, configuration=None, identifier=None):
super(IOMemory, self).__init__()
# indexed by [context][subject][predicate][object] = 1
self.cspo = self.createIndex()
# indexed by [context][predicate][object][subject] = 1
self.cpos = self.createIndex()
# indexed by [context][object][subject][predicate] = 1
self.cosp = self.createIndex()
# indexed by [subject][predicate][object] = [context]
self.spo = self.createIndex()
# indexed by [predicate][object][subject] = [context]
self.pos = self.createIndex()
# indexed by [object][subject][predicate] = [context]
self.osp = self.createIndex()
# indexes integer keys to identifiers
self.forward = self.createForward()
# reverse index of forward
self.reverse = self.createReverse()
self.identifier = identifier or BNode()
self.__namespace = self.createPrefixMap()
self.__prefix = self.createPrefixMap()
def open(self, configuration, create=False):
if not create:
# An IOMemory Store never exists.
return NO_STORE
else:
return VALID_STORE
def bind(self, prefix, namespace):
self.__prefix[namespace] = prefix
self.__namespace[prefix] = namespace
def namespace(self, prefix):
return self.__namespace.get(prefix, None)
def prefix(self, namespace):
return self.__prefix.get(namespace, None)
def namespaces(self):
for prefix, namespace in self.__namespace.iteritems():
yield prefix, namespace
def defaultContext(self):
return self.default_context
def addContext(self, context):
""" Add context w/o adding statement. Dan you can remove this if you want """
if not self.reverse.has_key(context):
ci=randid()
while not self.forward.insert(ci, context):
ci=randid()
self.reverse[context] = ci
def intToIdentifier(self, (si, pi, oi)):
""" Resolve an integer triple into identifers. """
return (self.forward[si], self.forward[pi], self.forward[oi])
def identifierToInt(self, (s, p, o)):
""" Resolve an identifier triple into integers. """
return (self.reverse[s], self.reverse[p], self.reverse[o])
def uniqueSubjects(self, context=None):
if context is None:
index = self.spo
else:
index = self.cspo[context]
for si in index.keys():
yield self.forward[si]
def uniquePredicates(self, context=None):
if context is None:
index = self.pos
else:
index = self.cpos[context]
for pi in index.keys():
yield self.forward[pi]
def uniqueObjects(self, context=None):
if context is None:
index = self.osp
else:
index = self.cosp[context]
for oi in index.keys():
yield self.forward[oi]
def createForward(self):
return {}
def createReverse(self):
return {}
def createIndex(self):
return {}
def createPrefixMap(self):
return {}
def add(self, triple, context, quoted=False):
"""\
Add a triple to the store.
"""
Store.add(self, triple, context, quoted)
for triple, cg in self.triples(triple, context):
#triple is already in the store.
return
subject, predicate, object = triple
f = self.forward
r = self.reverse
# assign keys for new identifiers
if not r.has_key(subject):
si=randid()
while f.has_key(si):
si=randid()
f[si] = subject
r[subject] = si
else:
si = r[subject]
if not r.has_key(predicate):
pi=randid()
while f.has_key(pi):
pi=randid()
f[pi] = predicate
r[predicate] = pi
else:
pi = r[predicate]
if not r.has_key(object):
oi=randid()
while f.has_key(oi):
oi=randid()
f[oi] = object
r[object] = oi
else:
oi = r[object]
if not r.has_key(context):
ci=randid()
while f.has_key(ci):
ci=randid()
f[ci] = context
r[context] = ci
else:
ci = r[context]
# add dictionary entries for cspo[c][s][p][o] = 1,
# cpos[c][p][o][s] = 1, and cosp[c][o][s][p] = 1, creating the
# nested {} where they do not yet exits.
self._setNestedIndex(self.cspo, ci, si, pi, oi)
self._setNestedIndex(self.cpos, ci, pi, oi, si)
self._setNestedIndex(self.cosp, ci, oi, si, pi)
if not quoted:
self._setNestedIndex(self.spo, si, pi, oi, ci)
self._setNestedIndex(self.pos, pi, oi, si, ci)
self._setNestedIndex(self.osp, oi, si, pi, ci)
def _setNestedIndex(self, index, *keys):
for key in keys[:-1]:
if not index.has_key(key):
index[key] = self.createIndex()
index = index[key]
index[keys[-1]] = 1
def _removeNestedIndex(self, index, *keys):
""" Remove context from the list of contexts in a nested index.
Afterwards, recursively remove nested indexes when they became empty.
"""
parents = []
for key in keys[:-1]:
parents.append(index)
index = index[key]
del index[keys[-1]]
n = len(parents)
for i in xrange(n):
index = parents[n-1-i]
key = keys[n-1-i]
if len(index[key]) == 0:
del index[key]
def remove(self, triple, context=None):
Store.remove(self, triple, context)
if context is not None:
if context == self:
context = None
f = self.forward
r = self.reverse
if context is None:
for triple, cg in self.triples(triple):
subject, predicate, object = triple
si, pi, oi = self.identifierToInt((subject, predicate, object))
contexts = list(self.contexts(triple))
for context in contexts:
ci = r[context]
del self.cspo[ci][si][pi][oi]
del self.cpos[ci][pi][oi][si]
del self.cosp[ci][oi][si][pi]
self._removeNestedIndex(self.spo, si, pi, oi, ci)
self._removeNestedIndex(self.pos, pi, oi, si, ci)
self._removeNestedIndex(self.osp, oi, si, pi, ci)
# grr!! hafta ref-count these before you can collect them dumbass!
#del f[si], f[pi], f[oi]
#del r[subject], r[predicate], r[object]
else:
subject, predicate, object = triple
ci = r.get(context, None)
if ci:
for triple, cg in self.triples(triple, context):
si, pi, oi = self.identifierToInt(triple)
del self.cspo[ci][si][pi][oi]
del self.cpos[ci][pi][oi][si]
del self.cosp[ci][oi][si][pi]
try:
self._removeNestedIndex(self.spo, si, pi, oi, ci)
self._removeNestedIndex(self.pos, pi, oi, si, ci)
self._removeNestedIndex(self.osp, oi, si, pi, ci)
except KeyError:
# the context may be a quoted one in which
# there will not be a triple in spo, pos or
# osp. So ignore any KeyErrors
pass
# TODO delete references to resources in self.forward/self.reverse
# that are not in use anymore...
if subject is None and predicate is None and object is None:
# remove context
try:
ci = self.reverse[context]
del self.cspo[ci], self.cpos[ci], self.cosp[ci]
except KeyError:
# TODO: no exception when removing non-existant context?
pass
def triples(self, triple, context=None):
"""A generator over all the triples matching """
if context is not None:
if context == self:
context = None
subject, predicate, object = triple
ci = si = pi = oi = Any
if context is None:
spo = self.spo
pos = self.pos
osp = self.osp
else:
try:
ci = self.reverse[context] # TODO: Really ignore keyerror here
spo = self.cspo[ci]
pos = self.cpos[ci]
osp = self.cosp[ci]
except KeyError:
return
try:
if subject is not Any:
si = self.reverse[subject] # throws keyerror if subject doesn't exist ;(
if predicate is not Any:
pi = self.reverse[predicate]
if object is not Any:
oi = self.reverse[object]
except KeyError, e:
return #raise StopIteration
if si != Any: # subject is given
if spo.has_key(si):
subjectDictionary = spo[si]
if pi != Any: # subject+predicate is given
if subjectDictionary.has_key(pi):
if oi!= Any: # subject+predicate+object is given
if subjectDictionary[pi].has_key(oi):
ss, pp, oo = self.intToIdentifier((si, pi, oi))
yield (ss, pp, oo), (c for c in self.contexts((ss, pp, oo)))
else: # given object not found
pass
else: # subject+predicate is given, object unbound
for o in subjectDictionary[pi].keys():
ss, pp, oo = self.intToIdentifier((si, pi, o))
yield (ss, pp, oo), (c for c in self.contexts((ss, pp, oo)))
else: # given predicate not found
pass
else: # subject given, predicate unbound
for p in subjectDictionary.keys():
if oi != Any: # object is given
if subjectDictionary[p].has_key(oi):
ss, pp, oo = self.intToIdentifier((si, p, oi))
yield (ss, pp, oo), (c for c in self.contexts((ss, pp, oo)))
else: # given object not found
pass
else: # object unbound
for o in subjectDictionary[p].keys():
ss, pp, oo = self.intToIdentifier((si, p, o))
yield (ss, pp, oo), (c for c in self.contexts((ss, pp, oo)))
else: # given subject not found
pass
elif pi != Any: # predicate is given, subject unbound
if pos.has_key(pi):
predicateDictionary = pos[pi]
if oi != Any: # predicate+object is given, subject unbound
if predicateDictionary.has_key(oi):
for s in predicateDictionary[oi].keys():
ss, pp, oo = self.intToIdentifier((s, pi, oi))
yield (ss, pp, oo), (c for c in self.contexts((ss, pp, oo)))
else: # given object not found
pass
else: # predicate is given, object+subject unbound
for o in predicateDictionary.keys():
for s in predicateDictionary[o].keys():
ss, pp, oo = self.intToIdentifier((s, pi, o))
yield (ss, pp, oo), (c for c in self.contexts((ss, pp, oo)))
elif oi != Any: # object is given, subject+predicate unbound
if osp.has_key(oi):
objectDictionary = osp[oi]
for s in objectDictionary.keys():
for p in objectDictionary[s].keys():
ss, pp, oo = self.intToIdentifier((s, p, oi))
yield (ss, pp, oo), (c for c in self.contexts((ss, pp, oo)))
else: # subject+predicate+object unbound
for s in spo.keys():
subjectDictionary = spo[s]
for p in subjectDictionary.keys():
for o in subjectDictionary[p].keys():
ss, pp, oo = self.intToIdentifier((s, p, o))
yield (ss, pp, oo), (c for c in self.contexts((ss, pp, oo)))
def __len__(self, context=None):
if context is not None:
if context == self:
context = None
# TODO: for eff. implementation
count = 0
for triple, cg in self.triples((Any, Any, Any), context):
count += 1
return count
def contexts(self, triple=None):
if triple:
si, pi, oi = self.identifierToInt(triple)
for ci in self.spo[si][pi][oi]:
yield self.forward[ci]
else:
for ci in self.cspo.keys():
yield self.forward[ci]
import random
def randid(randint=random.randint, choice=random.choice, signs=(-1,1)):
return choice(signs)*randint(1,2000000000)
del random
| 36.738434 | 92 | 0.50758 |
5dade8eeb2af37457bcc7cd82e980bf254e746b4 | 1,492 | py | Python | homeassistant/components/websocket_api/permissions.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/websocket_api/permissions.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/websocket_api/permissions.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Permission constants for the websocket API.
Separate file to avoid circular imports.
"""
from __future__ import annotations
from typing import Final
from homeassistant.components.frontend import EVENT_PANELS_UPDATED
from homeassistant.components.lovelace.const import EVENT_LOVELACE_UPDATED
from homeassistant.components.persistent_notification import (
EVENT_PERSISTENT_NOTIFICATIONS_UPDATED,
)
from homeassistant.components.shopping_list import EVENT as EVENT_SHOPPING_LIST_UPDATED
from homeassistant.const import (
EVENT_COMPONENT_LOADED,
EVENT_CORE_CONFIG_UPDATE,
EVENT_SERVICE_REGISTERED,
EVENT_SERVICE_REMOVED,
EVENT_STATE_CHANGED,
EVENT_THEMES_UPDATED,
)
from homeassistant.helpers.area_registry import EVENT_AREA_REGISTRY_UPDATED
from homeassistant.helpers.device_registry import EVENT_DEVICE_REGISTRY_UPDATED
from homeassistant.helpers.entity_registry import EVENT_ENTITY_REGISTRY_UPDATED
# These are events that do not contain any sensitive data
# Except for state_changed, which is handled accordingly.
SUBSCRIBE_ALLOWLIST: Final[set[str]] = {
EVENT_AREA_REGISTRY_UPDATED,
EVENT_COMPONENT_LOADED,
EVENT_CORE_CONFIG_UPDATE,
EVENT_DEVICE_REGISTRY_UPDATED,
EVENT_ENTITY_REGISTRY_UPDATED,
EVENT_LOVELACE_UPDATED,
EVENT_PANELS_UPDATED,
EVENT_PERSISTENT_NOTIFICATIONS_UPDATED,
EVENT_SERVICE_REGISTERED,
EVENT_SERVICE_REMOVED,
EVENT_SHOPPING_LIST_UPDATED,
EVENT_STATE_CHANGED,
EVENT_THEMES_UPDATED,
}
| 33.909091 | 87 | 0.835791 |
e102fc5cccbcf6941fee158c3a2b9046dc0dd6d6 | 997 | py | Python | lib_rovpp/tests/yaml_system_tests/subprefix_graph_13/test_subprefix_prefix_01.py | jfuruness/lib_rovpp | 67032c2dc296fa1804a8305d8cb671339b8e45e0 | [
"BSD-3-Clause"
] | 1 | 2021-09-27T14:20:12.000Z | 2021-09-27T14:20:12.000Z | lib_rovpp/tests/yaml_system_tests/subprefix_graph_13/test_subprefix_prefix_01.py | jfuruness/lib_rovpp | 67032c2dc296fa1804a8305d8cb671339b8e45e0 | [
"BSD-3-Clause"
] | null | null | null | lib_rovpp/tests/yaml_system_tests/subprefix_graph_13/test_subprefix_prefix_01.py | jfuruness/lib_rovpp | 67032c2dc296fa1804a8305d8cb671339b8e45e0 | [
"BSD-3-Clause"
] | 1 | 2021-10-01T16:30:33.000Z | 2021-10-01T16:30:33.000Z | from pathlib import Path
from lib_bgp_simulator import BaseGraphSystemTester
from lib_bgp_simulator import BGPSimpleAS
from lib_bgp_simulator import ROVSimpleAS
from lib_bgp_simulator import Graph013
from ..unstable import Unstable
from ....as_classes import ROVPPV1SimpleAS
from ....as_classes import ROVPPV2SimpleAS
from ....as_classes import ROVPPV2aSimpleAS
from ....engine_input import ROVPPSubprefixHijack
class BaseSubPrefix01Tester(Unstable, BaseGraphSystemTester):
GraphInfoCls = Graph013
BaseASCls = BGPSimpleAS
EngineInputCls = ROVPPSubprefixHijack
base_dir = Path(__file__).parent
adopting_asns = (2, )
class Test021SubprefixPrefix01(BaseSubPrefix01Tester):
AdoptASCls = ROVSimpleAS
class Test022SubprefixPrefix01(BaseSubPrefix01Tester):
AdoptASCls = ROVPPV1SimpleAS
class Test023SubprefixPrefix01(BaseSubPrefix01Tester):
AdoptASCls = ROVPPV2SimpleAS
class Test024SubprefixPrefix01(BaseSubPrefix01Tester):
AdoptASCls = ROVPPV2aSimpleAS
| 26.236842 | 61 | 0.822467 |
fe93aed7cc37061c0b4d50fabc8933c6640df7ea | 1,487 | py | Python | texar/tf/modules/encoders/encoder_base.py | jiajunhua/asyml-texar | 22d7b8eea5bd43eef68b615ba87b2e8220bafdf8 | [
"Apache-2.0"
] | 1 | 2020-09-18T04:36:43.000Z | 2020-09-18T04:36:43.000Z | texar/tf/modules/encoders/encoder_base.py | jiajunhua/asyml-texar | 22d7b8eea5bd43eef68b615ba87b2e8220bafdf8 | [
"Apache-2.0"
] | 6 | 2020-09-26T01:31:48.000Z | 2021-08-25T16:13:51.000Z | texar/tf/modules/encoders/encoder_base.py | jiajunhua/asyml-texar | 22d7b8eea5bd43eef68b615ba87b2e8220bafdf8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base class for encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from texar.tf.module_base import ModuleBase
__all__ = [
"EncoderBase"
]
class EncoderBase(ModuleBase):
"""Base class inherited by all encoder classes.
"""
def __init__(self, hparams=None):
ModuleBase.__init__(self, hparams)
@staticmethod
def default_hparams():
"""Returns a dictionary of hyperparameters with default values.
"""
return {
"name": "encoder"
}
def _build(self, inputs, *args, **kwargs):
"""Encodes the inputs.
Args:
inputs: Inputs to the encoder.
*args: Other arguments.
**kwargs: Keyword arguments.
Returns:
Encoding results.
"""
raise NotImplementedError
| 26.553571 | 74 | 0.67922 |
cdd24175e2b1406e8d9e4109017da21ac2da2b34 | 157,610 | py | Python | scipy/special/add_newdocs.py | luispedro/scipy | 0fcbbfdbee1297a93f9fd2335770efe62ff8b5ac | [
"BSD-3-Clause"
] | null | null | null | scipy/special/add_newdocs.py | luispedro/scipy | 0fcbbfdbee1297a93f9fd2335770efe62ff8b5ac | [
"BSD-3-Clause"
] | null | null | null | scipy/special/add_newdocs.py | luispedro/scipy | 0fcbbfdbee1297a93f9fd2335770efe62ff8b5ac | [
"BSD-3-Clause"
] | null | null | null | # Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
The spherical harmonics are defined as
.. math::
Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
e^{i m \theta} P^m_n(\cos(\phi))
where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
Parameters
----------
m : array_like
Order of the harmonic (int); must have ``|m| <= n``.
n : array_like
Degree of the harmonic (int); must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
theta : array_like
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
phi : array_like
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
azimuthal angle and ``phi`` is the polar angle. It is common to
see the opposite convention, that is, ``theta`` as the polar angle
and ``phi`` as the azimuthal angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `lpmv`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\theta} \sin(\phi) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\phi) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\theta} \sin(\phi).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
http://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "wrightomega",
r"""
wrightomega(z, out=None)
Wright Omega function.
Defined as the solution to
.. math::
\omega + \log(\omega) = z
where :math:`\log` is the principal branch of the complex logarithm.
Parameters
----------
z : array_like
Points at which to evaluate the Wright Omega function
Returns
-------
omega : ndarray
Values of the Wright Omega function
Notes
-----
.. versionadded:: 0.19.0
The function can also be defined as
.. math::
\omega(z) = W_{K(z)}(e^z)
where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
unwinding number and :math:`W` is the Lambert W function.
The implementation here is taken from [1]_.
See Also
--------
lambertw : The Lambert W function
References
----------
.. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
Double-Precision Evaluation of the Wright :math:`\omega`
Function." ACM Transactions on Mathematical Software,
2012. :doi:`10.1145/2168773.2168779`.
""")
add_newdoc("scipy.special", "airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/.org/amos/
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `k` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `k + 1` through `n` of the binomial probability density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `k + 1` or more successes in `n` independent events
with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
See Also
--------
comb : The number of combinations of N things taken k at a time.
""")
add_newdoc("scipy.special", "btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a, b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to `x`::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute `x` such that betainc(a, b, x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
r"""
btdtr(a, b, x)
Cumulative density function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative density function of the beta distribution with parameters
`a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of `x`
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtriv(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \\approx P(1-m) - (1-m) \\log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \\sqrt(1-m)
is used.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipk(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, ``1 - erf(x)``.
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, ``-i erf(i z)``.
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "eval_jacobi",
r"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
The Jacobi polynomials can be defined via the Gauss hypergeometric
function :math:`{}_2F_1` as
.. math::
P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
beta : array_like
Parameter
x : array_like
Points at which to evaluate the polynomial
Returns
-------
P : ndarray
Values of the Jacobi polynomial
See Also
--------
roots_jacobi : roots and quadrature weights of Jacobi polynomials
jacobi : Jacobi polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
r"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi polynomial.
Parameters
----------
n : int
Degree of the polynomial. If not an integer, the result is
determined via the relation to `binom` and `eval_jacobi`.
p : float
Parameter
q : float
Parameter
Returns
-------
G : ndarray
Values of the shifted Jacobi polynomial.
See Also
--------
roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
polynomials
sh_jacobi : shifted Jacobi polynomial object
eval_jacobi : evaluate Jacobi polynomials
""")
add_newdoc("scipy.special", "eval_gegenbauer",
r"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
The Gegenbauer polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
x : array_like
Points at which to evaluate the Gegenbauer polynomial
Returns
-------
C : ndarray
Values of the Gegenbauer polynomial
See Also
--------
roots_gegenbauer : roots and quadrature weights of Gegenbauer
polynomials
gegenbauer : Gegenbauer polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebyt",
r"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind at a point.
The Chebyshev polynomials of the first kind can be defined via the
Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
T : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyt : roots and quadrature weights of Chebyshev
polynomials of the first kind
chebyu : Chebychev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
Notes
-----
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
r"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind at a point.
The Chebyshev polynomials of the second kind can be defined via
the Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
U : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyu : roots and quadrature weights of Chebyshev
polynomials of the second kind
chebyu : Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebys",
r"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = U_n(x/2)
where :math:`U_n` is a Chebyshev polynomial of the second kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
S : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebys : roots and quadrature weights of Chebyshev
polynomials of the second kind on [-2, 2]
chebys : Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_chebyc",
r"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = T_n(x/2)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
C : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyc : roots and quadrature weights of Chebyshev
polynomials of the first kind on [-2, 2]
chebyc : Chebyshev polynomial object
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
eval_chebyt : evaluate Chebycshev polynomials of the first kind
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
r"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the first kind at a
point.
These polynomials are defined as
.. math::
T_n^*(x) = T_n(2x - 1)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
T : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyt : roots and quadrature weights of shifted
Chebyshev polynomials of the first kind
sh_chebyt : shifted Chebyshev polynomial object
eval_chebyt : evalaute Chebyshev polynomials of the first kind
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
r"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the second kind at a
point.
These polynomials are defined as
.. math::
U_n^*(x) = U_n(2x - 1)
where :math:`U_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
U : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyu : roots and quadrature weights of shifted
Chebychev polynomials of the second kind
sh_chebyu : shifted Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_legendre",
r"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
The Legendre polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Legendre polynomial
Returns
-------
P : ndarray
Values of the Legendre polynomial
See Also
--------
roots_legendre : roots and quadrature weights of Legendre
polynomials
legendre : Legendre polynomial object
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_sh_legendre",
r"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
These polynomials are defined as
.. math::
P_n^*(x) = P_n(2x - 1)
where :math:`P_n` is a Legendre polynomial.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the value is
determined via the relation to `eval_legendre`.
x : array_like
Points at which to evaluate the shifted Legendre polynomial
Returns
-------
P : ndarray
Values of the shifted Legendre polynomial
See Also
--------
roots_sh_legendre : roots and quadrature weights of shifted
Legendre polynomials
sh_legendre : shifted Legendre polynomial object
eval_legendre : evaluate Legendre polynomials
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_genlaguerre",
r"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
The generalized Laguerre polynomials can be defined via the
confluent hypergeometric function :math:`{}_1F_1` as
.. math::
L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
{}_1F_1(-n, \alpha + 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. The Laguerre polynomials are the special case where
:math:`\alpha = 0`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
alpha : array_like
Parameter; must have ``alpha > -1``
x : array_like
Points at which to evaluate the generalized Laguerre
polynomial
Returns
-------
L : ndarray
Values of the generalized Laguerre polynomial
See Also
--------
roots_genlaguerre : roots and quadrature weights of generalized
Laguerre polynomials
genlaguerre : generalized Laguerre polynomial object
hyp1f1 : confluent hypergeometric function
eval_laguerre : evaluate Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_laguerre",
r"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
The Laguerre polynomials can be defined via the confluent
hypergeometric function :math:`{}_1F_1` as
.. math::
L_n(x) = {}_1F_1(-n, 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
x : array_like
Points at which to evaluate the Laguerre polynomial
Returns
-------
L : ndarray
Values of the Laguerre polynomial
See Also
--------
roots_laguerre : roots and quadrature weights of Laguerre
polynomials
laguerre : Laguerre polynomial object
numpy.polynomial.laguerre.Laguerre : Laguerre series
eval_genlaguerre : evaluate generalized Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_hermite",
r"""
eval_hermite(n, x, out=None)
Evaluate physicist's Hermite polynomial at a point.
Defined by
.. math::
H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
H : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermite : roots and quadrature weights of physicist's
Hermite polynomials
hermite : physicist's Hermite polynomial object
numpy.polynomial.hermite.Hermite : Physicist's Hermite series
eval_hermitenorm : evaluate Probabilist's Hermite polynomials
""")
add_newdoc("scipy.special", "eval_hermitenorm",
r"""
eval_hermitenorm(n, x, out=None)
Evaluate probabilist's (normalized) Hermite polynomial at a
point.
Defined by
.. math::
He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
He : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermitenorm : roots and quadrature weights of probabilist's
Hermite polynomials
hermitenorm : probabilist's Hermite polynomial object
numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
eval_hermite : evaluate physicist's Hermite polynomials
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t, t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t, t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer `n` and non-negative `x` and
`n`::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative density function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2), t=0..z)
csa = integral(cos(pi/2 * t**2), t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function.
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
r"""
gammainc(a, x)
Regularized lower incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where
`gammaincc` is the regularized upper incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammaincc : regularized upper incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammaincc",
r"""
gammaincc(a, x)
Regularized upper incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where `gammainc`
is the regularized lower incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a, y)
Inverse to `gammaincc`
Returns `x` such that ``gammaincc(a, x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to `gammainc`
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "_gammaln",
"""
Internal function, use ``gammaln`` instead.
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
loggamma
""")
add_newdoc("scipy.special", "gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative density function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtri
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp0f1",
r"""
hyp0f1(v, x)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`.
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "_igam_fac",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptitic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptitic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptitic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptitic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
See also
--------
jv : Bessel function of real order and complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
See also
--------
jv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
See also
--------
jv
""")
add_newdoc("scipy.special", "jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. http://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("scipy.special", "kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("scipy.special", "_lanczos_sum_expg_scaled",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_lgam1p",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc("scipy.special", "_log1pmx",
"""
Internal function, do not use.
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
r"""
lpmv(m, v, x)
Associated Legendre function of integer order and real degree.
Defined as
.. math::
P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x)
where
.. math::
P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2}
\left(\frac{1 - x}{2}\right)^k
is the Legendre function of the first kind. Here :math:`(\cdot)_k`
is the Pochhammer symbol; see `poch`.
Parameters
----------
m : array_like
Order (int or float). If passed a float not equal to an
integer the function returns NaN.
v : array_like
Degree (float).
x : array_like
Argument (float). Must have ``|x| <= 1``.
Returns
-------
pmv : ndarray
Value of the associated Legendre function.
See Also
--------
lpmn : Compute the associated Legendre function for all orders
``0, ..., m`` and degrees ``0, ..., n``.
clpmn : Compute the associated Legendre function at complex
arguments.
Notes
-----
Note that this implementation includes the Condon-Shortley phase.
References
----------
.. [1] Zhang, Jin, "Computation of Special Functions", John Wiley
and Sons, Inc, 1996.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a, x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a, x) in w and the
derivative, W'(a, x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
r"""
shichi(x, out=None)
Hyperbolic sine and cosine integrals.
The hyperbolic sine integral is
.. math::
\int_0^x \frac{\sinh{t}}{t}dt
and the hyperbolic cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the hyperbolic sine
and cosine integrals.
Returns
-------
si : ndarray
Hyperbolic sine integral at ``x``
ci : ndarray
Hyperbolic cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``chi`` is the real part of the
hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+ 0j)`` differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *shichi* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *shi* and *chi* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sici",
r"""
sici(x, out=None)
Sine and cosine integrals.
The sine integral is
.. math::
\int_0^x \frac{\sin{t}}{t}dt
and the cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the sine and cosine
integrals.
Returns
-------
si : ndarray
Sine integral at ``x``
ci : ndarray
Cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``ci`` is the real part of the
cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *sici* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *si* and *ci* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on `n` samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to `smirnov`
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
r"""
spence(z, out=None)
Spence's function, also known as the dilogarithm.
It is defined to be
.. math::
\int_0^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Parameters
----------
z : array_like
Points at which to evaluate Spence's function
Returns
-------
s : ndarray
Computed values of Spence's function
Notes
-----
There is a different convention which defines Spence's function by
the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df, t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.wofz(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$wofz(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the Gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining ``loggamma`` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas ``loggamma`` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make ``loggama`` useful for working in complex logspace. However,
``loggamma`` necessarily returns complex outputs for real inputs,
so if you want to work only with real numbers use `gammaln`. On
the real line the two functions are related by ``exp(loggamma(x))
= gammasgn(x)*exp(gammaln(x))``, though in practice rounding
errors will introduce small spurious imaginary components in
``exp(loggamma(x))``.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the Gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("scipy.special", "_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_cospi",
"""
Internal function, do not use.
""")
| 24.816564 | 194 | 0.596548 |
d7747391026b69a1399a5dea21954ed10139c60e | 240 | py | Python | args_kwargs.py | jnwanya/python-basic-lesson | d9afd21c6d2d8914a54210432d01936c630b5ee7 | [
"MIT"
] | null | null | null | args_kwargs.py | jnwanya/python-basic-lesson | d9afd21c6d2d8914a54210432d01936c630b5ee7 | [
"MIT"
] | null | null | null | args_kwargs.py | jnwanya/python-basic-lesson | d9afd21c6d2d8914a54210432d01936c630b5ee7 | [
"MIT"
] | null | null | null | def my_method(arg1, arg2, arg3):
return arg1 + arg2 + arg3
def long_arg_method(*args):
return sum(args)
def kwargs_method(*args, **kwargs):
print(args)
print(kwargs)
kwargs_method(1, 2, 45, name='John', location='UK')
| 16 | 51 | 0.658333 |
0eb910fabfec73f706194e2331f6e71b7f7d512e | 25,639 | py | Python | aicsimageio/formats.py | griffinfujioka/aicsimageio | c49a613dc54381d11237240ba36f0ef54603a7d6 | [
"BSD-3-Clause"
] | null | null | null | aicsimageio/formats.py | griffinfujioka/aicsimageio | c49a613dc54381d11237240ba36f0ef54603a7d6 | [
"BSD-3-Clause"
] | null | null | null | aicsimageio/formats.py | griffinfujioka/aicsimageio | c49a613dc54381d11237240ba36f0ef54603a7d6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Dict, List
###############################################################################
# The order of the readers in this impl dict is important.
#
# Example:
# if TiffReader was placed before OmeTiffReader,
# we would never hit the OmeTiffReader
# Additionally while so many formats can be read by base-imageio
# Our custom reader may be more well-suited for interactions
# Example:
# DefaultReader supports LSM, and all the similar "Tiff-Like"
# formats, but TiffReader does as well and it has better
# dask chunking + metadata parsing than DefaultReader for those formats
# BASE-IMAGEIO FORMATS (with tifffile + non-existant removals)
#
# Pulled using:
# from imageio import formats
# routes = {}
# for f in formats:
# for ext in f.extensions:
# routes[ext[1:]] = ["aicsimageio.readers.default_reader.DefaultReader"]
FORMAT_IMPLEMENTATIONS: Dict[str, List[str]] = {
"1sc": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"2fl": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"3fr": ["aicsimageio.readers.default_reader.DefaultReader"],
"acff": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"acqp": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"afi": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"afm": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"aim": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"al3d": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ali": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"am": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"amiramesh": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ano": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"apl": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"arf": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"array-like": ["aicsimageio.readers.array_like_reader.ArrayLikeReader"],
"arw": ["aicsimageio.readers.default_reader.DefaultReader"],
"avi": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"bay": ["aicsimageio.readers.default_reader.DefaultReader"],
"bif": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"bin": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"bip": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"bmp": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"bmq": ["aicsimageio.readers.default_reader.DefaultReader"],
"bsdf": ["aicsimageio.readers.default_reader.DefaultReader"],
"bufr": ["aicsimageio.readers.default_reader.DefaultReader"],
"bw": ["aicsimageio.readers.default_reader.DefaultReader"],
"c01": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"cap": ["aicsimageio.readers.default_reader.DefaultReader"],
"cat": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"cfg": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ch5": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"cif": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"cine": ["aicsimageio.readers.default_reader.DefaultReader"],
"cr2": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"crw": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"cs1": ["aicsimageio.readers.default_reader.DefaultReader"],
"csv": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ct": ["aicsimageio.readers.default_reader.DefaultReader"],
"ct.img": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"cur": ["aicsimageio.readers.default_reader.DefaultReader"],
"cut": ["aicsimageio.readers.default_reader.DefaultReader"],
"cxd": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"czi": [
"aicsimageio.readers.czi_reader.CziReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"dat": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"db": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"dc2": ["aicsimageio.readers.default_reader.DefaultReader"],
"dcm": [
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"dcr": ["aicsimageio.readers.default_reader.DefaultReader"],
"dcx": ["aicsimageio.readers.default_reader.DefaultReader"],
"dds": ["aicsimageio.readers.default_reader.DefaultReader"],
"df3": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"dicom": [
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"dm2": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"dm3": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"dng": ["aicsimageio.readers.default_reader.DefaultReader"],
"drf": ["aicsimageio.readers.default_reader.DefaultReader"],
"dsc": ["aicsimageio.readers.default_reader.DefaultReader"],
"dti": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"dv": [
"aicsimageio.readers.dv_reader.DVReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"ecw": ["aicsimageio.readers.default_reader.DefaultReader"],
"emf": ["aicsimageio.readers.default_reader.DefaultReader"],
"eps": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"epsi": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"erf": ["aicsimageio.readers.default_reader.DefaultReader"],
"exp": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"exr": ["aicsimageio.readers.default_reader.DefaultReader"],
"fake": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"fdf": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"fff": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"ffr": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"fid": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"fit": ["aicsimageio.readers.default_reader.DefaultReader"],
"fits": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"flc": ["aicsimageio.readers.default_reader.DefaultReader"],
"flex": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"fli": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"fpx": ["aicsimageio.readers.default_reader.DefaultReader"],
"frm": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ftc": ["aicsimageio.readers.default_reader.DefaultReader"],
"fts": ["aicsimageio.readers.default_reader.DefaultReader"],
"ftu": ["aicsimageio.readers.default_reader.DefaultReader"],
"fz": ["aicsimageio.readers.default_reader.DefaultReader"],
"g3": ["aicsimageio.readers.default_reader.DefaultReader"],
"gbr": ["aicsimageio.readers.default_reader.DefaultReader"],
"gdcm": ["aicsimageio.readers.default_reader.DefaultReader"],
"gel": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"gif": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"gipl": ["aicsimageio.readers.default_reader.DefaultReader"],
"grey": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"grib": ["aicsimageio.readers.default_reader.DefaultReader"],
"h5": [
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"hdf": [
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"hdf5": ["aicsimageio.readers.default_reader.DefaultReader"],
"hdp": ["aicsimageio.readers.default_reader.DefaultReader"],
"hdr": [
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"hed": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"his": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"htd": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"htm": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"html": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"hx": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"i2i": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ia": ["aicsimageio.readers.default_reader.DefaultReader"],
"icns": ["aicsimageio.readers.default_reader.DefaultReader"],
"ico": ["aicsimageio.readers.default_reader.DefaultReader"],
"ics": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ids": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"iff": ["aicsimageio.readers.default_reader.DefaultReader"],
"iim": ["aicsimageio.readers.default_reader.DefaultReader"],
"iiq": ["aicsimageio.readers.default_reader.DefaultReader"],
"im": ["aicsimageio.readers.default_reader.DefaultReader"],
"im3": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"img": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"imggz": ["aicsimageio.readers.default_reader.DefaultReader"],
"ims": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"inf": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"inr": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ipl": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"ipm": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ipw": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"j2c": ["aicsimageio.readers.default_reader.DefaultReader"],
"j2k": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"jfif": ["aicsimageio.readers.default_reader.DefaultReader"],
"jif": ["aicsimageio.readers.default_reader.DefaultReader"],
"jng": ["aicsimageio.readers.default_reader.DefaultReader"],
"jp2": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"jpc": ["aicsimageio.readers.default_reader.DefaultReader"],
"jpe": ["aicsimageio.readers.default_reader.DefaultReader"],
"jpeg": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"jpf": ["aicsimageio.readers.default_reader.DefaultReader"],
"jpg": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"jpk": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"jpx": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"jxr": ["aicsimageio.readers.default_reader.DefaultReader"],
"k25": ["aicsimageio.readers.default_reader.DefaultReader"],
"kc2": ["aicsimageio.readers.default_reader.DefaultReader"],
"kdc": ["aicsimageio.readers.default_reader.DefaultReader"],
"klb": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"koa": ["aicsimageio.readers.default_reader.DefaultReader"],
"l2d": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"labels": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"lbm": ["aicsimageio.readers.default_reader.DefaultReader"],
"lei": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"lfp": ["aicsimageio.readers.default_reader.DefaultReader"],
"lfr": ["aicsimageio.readers.default_reader.DefaultReader"],
"lif": [
"aicsimageio.readers.lif_reader.LifReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"liff": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"lim": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"lms": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"lsm": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"mdb": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"mdc": ["aicsimageio.readers.default_reader.DefaultReader"],
"mef": ["aicsimageio.readers.default_reader.DefaultReader"],
"mgh": ["aicsimageio.readers.default_reader.DefaultReader"],
"mha": ["aicsimageio.readers.default_reader.DefaultReader"],
"mhd": ["aicsimageio.readers.default_reader.DefaultReader"],
"mic": ["aicsimageio.readers.default_reader.DefaultReader"],
"mkv": ["aicsimageio.readers.default_reader.DefaultReader"],
"mnc": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"mnc2": ["aicsimageio.readers.default_reader.DefaultReader"],
"mng": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"mod": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"mos": ["aicsimageio.readers.default_reader.DefaultReader"],
"mov": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"mp4": ["aicsimageio.readers.default_reader.DefaultReader"],
"mpeg": ["aicsimageio.readers.default_reader.DefaultReader"],
"mpg": ["aicsimageio.readers.default_reader.DefaultReader"],
"mpo": ["aicsimageio.readers.default_reader.DefaultReader"],
"mrc": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"mri": ["aicsimageio.readers.default_reader.DefaultReader"],
"mrw": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"msp": ["aicsimageio.readers.default_reader.DefaultReader"],
"msr": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"mtb": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"mvd2": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"naf": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"nd": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"nd2": [
"aicsimageio.readers.nd2_reader.ND2Reader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"ndpi": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ndpis": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"nef": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"nhdr": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"nia": ["aicsimageio.readers.default_reader.DefaultReader"],
"nii": [
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"nii.gz": [
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"niigz": [
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"npz": ["aicsimageio.readers.default_reader.DefaultReader"],
"nrrd": [
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"nrw": ["aicsimageio.readers.default_reader.DefaultReader"],
"obf": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"oib": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"oif": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"oir": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ome": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ome.tif": [
"aicsimageio.readers.ome_tiff_reader.OmeTiffReader",
"aicsimageio.readers.tiff_reader.TiffReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"ome.tiff": [
"aicsimageio.readers.ome_tiff_reader.OmeTiffReader",
"aicsimageio.readers.tiff_reader.TiffReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"orf": ["aicsimageio.readers.default_reader.DefaultReader"],
"par": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"pbm": ["aicsimageio.readers.default_reader.DefaultReader"],
"pcd": ["aicsimageio.readers.default_reader.DefaultReader"],
"pcoraw": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"pct": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"pcx": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"pef": ["aicsimageio.readers.default_reader.DefaultReader"],
"pfm": ["aicsimageio.readers.default_reader.DefaultReader"],
"pgm": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"pic": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"pict": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"png": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"pnl": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ppm": ["aicsimageio.readers.default_reader.DefaultReader"],
"pr3": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"ps": ["aicsimageio.readers.default_reader.DefaultReader"],
"psd": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"ptx": ["aicsimageio.readers.default_reader.DefaultReader"],
"pxn": ["aicsimageio.readers.default_reader.DefaultReader"],
"pxr": ["aicsimageio.readers.default_reader.DefaultReader"],
"qptiff": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"qtk": ["aicsimageio.readers.default_reader.DefaultReader"],
"r3d": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"raf": ["aicsimageio.readers.default_reader.DefaultReader"],
"ras": ["aicsimageio.readers.default_reader.DefaultReader"],
"raw": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"rcpnl": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"rdc": ["aicsimageio.readers.default_reader.DefaultReader"],
"rec": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"rgb": ["aicsimageio.readers.default_reader.DefaultReader"],
"rgba": ["aicsimageio.readers.default_reader.DefaultReader"],
"rw2": ["aicsimageio.readers.default_reader.DefaultReader"],
"rwl": ["aicsimageio.readers.default_reader.DefaultReader"],
"rwz": ["aicsimageio.readers.default_reader.DefaultReader"],
"scan": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"scn": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"sdt": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"seq": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"sif": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"sld": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"sm2": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"sm3": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"spc": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"spe": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"spi": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"sr2": ["aicsimageio.readers.default_reader.DefaultReader"],
"srf": ["aicsimageio.readers.default_reader.DefaultReader"],
"srw": ["aicsimageio.readers.default_reader.DefaultReader"],
"st": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"sti": ["aicsimageio.readers.default_reader.DefaultReader"],
"stk": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"stp": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"svs": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"swf": ["aicsimageio.readers.default_reader.DefaultReader"],
"sxm": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"targa": ["aicsimageio.readers.default_reader.DefaultReader"],
"tfr": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"tga": [
"aicsimageio.readers.default_reader.DefaultReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
],
"thm": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"tif": [
"aicsimageio.readers.ome_tiff_reader.OmeTiffReader",
"aicsimageio.readers.tiff_reader.TiffReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"tiff": [
"aicsimageio.readers.ome_tiff_reader.OmeTiffReader",
"aicsimageio.readers.tiff_reader.TiffReader",
"aicsimageio.readers.bioformats_reader.BioformatsReader",
"aicsimageio.readers.default_reader.DefaultReader",
],
"tim": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"tnb": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"top": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"txt": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"v": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"vff": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"vms": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"vsi": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"vtk": ["aicsimageio.readers.default_reader.DefaultReader"],
"vws": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"wap": ["aicsimageio.readers.default_reader.DefaultReader"],
"wat": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"wav": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"wbm": ["aicsimageio.readers.default_reader.DefaultReader"],
"wbmp": ["aicsimageio.readers.default_reader.DefaultReader"],
"wdp": ["aicsimageio.readers.default_reader.DefaultReader"],
"webp": ["aicsimageio.readers.default_reader.DefaultReader"],
"wlz": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"wmf": ["aicsimageio.readers.default_reader.DefaultReader"],
"wmv": ["aicsimageio.readers.default_reader.DefaultReader"],
"wpi": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"xbm": ["aicsimageio.readers.default_reader.DefaultReader"],
"xdce": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"xml": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"xpm": ["aicsimageio.readers.default_reader.DefaultReader"],
"xqd": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"xqf": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"xv": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"xys": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"zfp": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"zfr": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"zip": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"zpo": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
"zvi": ["aicsimageio.readers.bioformats_reader.BioformatsReader"],
}
READER_TO_INSTALL: Dict[str, str] = {
"aicsimageio.readers.bioformats_reader.BioformatsReader": "bioformats_jar",
"aicsimageio.readers.default_reader.DefaultReader": "aicsimageio[base-imageio]",
"aicsimageio.readers.lif_reader.LifReader": "readlif>=0.6.4",
"aicsimageio.readers.czi_reader.CziReader": "aicspylibczi>=3.0.5",
"aicsimageio.readers.dv_reader.DVReader": "aicsimageio[dv]",
"aicsimageio.readers.nd2_reader.ND2Reader": "aicsimageio[nd2]",
}
| 52.755144 | 84 | 0.727954 |
6a7e5c7b4dec5291193355dc96194901de773799 | 3,834 | py | Python | generator.py | kholohan/chexnet | e8cb9bf2365326210d64b09ccfd503a858485941 | [
"MIT"
] | 16 | 2018-12-23T22:19:47.000Z | 2020-08-13T16:30:33.000Z | generator.py | kholohan/chexnet | e8cb9bf2365326210d64b09ccfd503a858485941 | [
"MIT"
] | 21 | 2018-10-18T16:29:49.000Z | 2021-06-16T12:15:58.000Z | generator.py | kholohan/chexnet | e8cb9bf2365326210d64b09ccfd503a858485941 | [
"MIT"
] | 12 | 2018-12-23T22:19:53.000Z | 2020-12-21T12:06:09.000Z | import numpy as np
import os
import pandas as pd
from keras.utils import Sequence
from PIL import Image
from skimage.transform import resize
class AugmentedImageSequence(Sequence):
"""
Class for generating augmented image sequences
Arguments:
Sequence {[type]} -- [description]
"""
def __init__(self, dataset_csv_file: str, class_names: list, source_image_dir: str,
batch_size=16, target_size=(224, 224), augmenter=None,
verbose=0, steps=None, shuffle_on_epoch_end=True, random_state=1):
"""
Arguments:
dataset_csv_file {str} -- Path of dataset CSV (assuming CSV here)
class_names {list} -- List of class names
source_image_dir {str} -- Path of source imagees
Keyword Arguments:
batch_size {int} -- [description] (default: {16})
target_size {tuple} -- [description] (default: {(224, 224)})
augmenter {imgaug} -- [description] (default: {None})
verbose {int} -- [description] (default: {0})
steps {int or str} -- [description] (default: {None})
shuffle_on_epoch_end {bool} -- [description] (default: {True})
random_state {int} -- [description] (default: {1})
"""
self.dataset_dataframe = pd.read_csv(dataset_csv_file)
self.source_image_dir = source_image_dir
self.batch_size = batch_size
self.target_size = target_size
self.augmenter = augmenter
self.verbose = verbose
self.shuffle = shuffle_on_epoch_end
self.random_state = random_state
self.class_names = class_names
self.prepare_dataset()
if steps is None:
self.steps = int(np.ceil(len(self.x_path) / float(self.batch_size)))
else:
self.steps = int(steps)
def __bool__(self):
return True
def __len__(self):
return self.steps
def __getitem__(self, idx):
batch_x_path = self.x_path[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_x = np.asarray([self.load_image(x_path) for x_path in batch_x_path])
batch_x = self.transform_batch_images(batch_x)
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return batch_x, batch_y
def load_image(self, image_file):
image_path = os.path.join(self.source_image_dir, image_file)
image = Image.open(image_path)
image_array = np.asarray(image.convert("RGB"))
image_array = image_array / 255.
image_array = resize(image_array, self.target_size)
return image_array
def transform_batch_images(self, batch_x):
"""
Normalize the batch and center the data accordingly
Arguments:
batch_x {} -- batch
Returns:
[type] -- [description]
"""
if self.augmenter is not None:
batch_x = self.augmenter.augment_images(batch_x)
imagenet_mean = np.array([0.485, 0.456, 0.406])
imagenet_stddev = np.array([0.229, 0.224, 0.225])
batch_x = (batch_x - imagenet_mean) / imagenet_stddev
return batch_x
def get_y_true(self):
if self.shuffle:
raise ValueError("""
You're trying to run get_y_true() when generator option 'shuffle_on_epoch_end is True.
""")
return self.y[:self.steps*self.batch_size, :]
def prepare_dataset(self):
df = self.dataset_dataframe.sample(frac=1., random_state=self.random_state)
self.x_path, self.y = df["Image Index"].values, df[self.class_names].values
def on_epoch_end(self):
if self.shuffle:
self.random_state += 1
self.prepare_dataset()
| 36.514286 | 98 | 0.608764 |
1445f10be6b7c76c871b537c688c55d7df206c69 | 10,009 | py | Python | scripts/particles.py | Remosy/v2e | efc81cbcc113ca55d1631603323150be5ef8eb30 | [
"MIT"
] | null | null | null | scripts/particles.py | Remosy/v2e | efc81cbcc113ca55d1631603323150be5ef8eb30 | [
"MIT"
] | null | null | null | scripts/particles.py | Remosy/v2e | efc81cbcc113ca55d1631603323150be5ef8eb30 | [
"MIT"
] | null | null | null | # generates many linearly moving particles
# use it like this:
# v2e --leak_rate=0 --shot=0 --cutoff_hz=300 --sigma_thr=.05 --pos_thr=.2 --neg_thr=.2 --dvs_exposure duration .01 --output_folder "g:\Qsync\particles" --overwrite --dvs_aedat2=particles --output_width=346 --output_height=260 --batch=64 --disable_slomo --synthetic_input=scripts.particles --no_preview
# NOTE: There are nonintuitive effects of low contrast dot moving repeatedly over the same circle:
# The dot initially makes events and then appears to disappear. The cause is that the mean level of dot
# is encoded by the baseLogFrame which is initially at zero but increases to code the average of dot and background.
# Then the low contrast of dot causes only a single ON event on first cycle
import argparse
import atexit
import numpy as np
import cv2
import os
from tqdm import tqdm
from v2ecore.base_synthetic_input import base_synthetic_input
from v2ecore.v2e_utils import *
import sys
from typing import Tuple, Optional
logger = logging.getLogger(__name__)
class particles(base_synthetic_input): # the class name should be the same as the filename, like in Java
""" Generates moving dots on linear trajectories
"""
CONTRAST = 1.2
TOTAL_TIME = 1
NUM_PARTICLES = 40
RADIUS=.25
DT=100e-6
SPEED_MIN=10
SPEED_MAX=1000
def __init__(self, width: int = 346, height: int = 260, avi_path: Optional[str] = None, preview=False,
arg_list = None) -> None:
""" Constructs moving-dot class to make frames for v2e
:param width: width of frames in pixels
:param height: height in pixels
:param avi_path: folder to write video to, or None if not needed
:param preview: set true to show the pix array as cv frame
"""
super().__init__(width, height, avi_path, preview, arg_list)
parser=argparse.ArgumentParser(arg_list)
parser.add_argument('--num_particles',type=int,default=particles.NUM_PARTICLES)
parser.add_argument('--contrast',type=float,default=particles.CONTRAST)
parser.add_argument('--radius',type=float,default=particles.RADIUS)
parser.add_argument('--total_time',type=float,default=particles.TOTAL_TIME)
parser.add_argument('--speed_min',type=float,default=particles.SPEED_MIN)
parser.add_argument('--speed_max',type=float,default=particles.SPEED_MAX)
parser.add_argument('--dt',type=float,default=particles.DT)
args=parser.parse_args(arg_list)
self.avi_path = avi_path # to write AVI
self.contrast: float = args.contrast # compare this with pos_thres and neg_thres and sigma_thr, e.g. use 1.2 for dot to be 20% brighter than backgreound
self.dt = args.dt # frame interval sec
self.radius: float = args.radius # gaussian sigma of dot in pixels
# moving particle distribution
self.speed_pps_min = args.speed_min # final speed, pix/s
self.speed_pps_max = args.speed_max # final speed, pix/s
self.num_particles=args.num_particles # at any one time
self.particle_count=0
self.t_total = args.total_time
self.particles=[]
for i in range(self.num_particles):
p=self.particle(width=width,height=height,time=0,radius=self.radius,speed_min=self.speed_pps_min,speed_max=self.speed_pps_max)
self.particles.append(p)
self.particle_count+=1
# computed values below here
# self.t_total = 4 * np.pi * self.radius * self.cycles / self.speed_pps
# t_total=cycles*period
self.times = np.arange(0, self.t_total, self.dt)
# constant speed
self.w = width
self.h = height
self.frame_number = 0
self.out = None
self.log = sys.stdout
self.cv2name = 'v2e'
self.codec = 'HFYU'
self.preview = preview
self.pix_arr: np.ndarray = self.bg * np.ones((self.h, self.w), dtype=np.uint8)
logger.info(f'speed(pixels/s): {self.speed_pps_min} to {self.speed_pps_max}\n'
f'radius(pixels): {self.radius}\n'
f'contrast(factor): {self.contrast}\n'
f'log_contrast(base_e): {np.log(self.contrast)}\n'
f'duration(s): {self.t_total}\n'
f'dt(s): {self.dt}\n'
f'codec: {self.codec}\n')
if self.preview:
cv2.namedWindow(self.cv2name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.cv2name, self.w, self.h)
atexit.register(self.cleanup)
def cleanup(self):
logger.info(f'particles() generated {self.particle_count} particles in {self.time}s')
class particle():
def __init__(self, width:int, height:int , time:float, radius:float, speed_min, speed_max):
self.width=width
self.height=height
# generate particle on some edge, moving into the array with random velocity
edge=np.random.randint(0,4) # nsew
if edge==0 or edge==1: #north/south
pos_x=np.random.randint(0,width)
pos_y=0 if edge==0 else height
else: # e or w
pos_y=np.random.randint(0,height)
pos_x=0 if edge==3 else width
angle_rad=0
if edge==1: #n
angle_rad=np.random.uniform(0,-np.pi)
elif edge==0: # s
angle_rad=np.random.uniform(0,np.pi)
elif edge==3: # e
angle_rad=np.random.uniform(-np.pi/2,np.pi/2)
elif edge==2: # w
angle_rad=np.random.uniform(np.pi/2,3*np.pi/2)
self.position=np.array([pos_x,pos_y])
self.speed=np.random.uniform(speed_min,speed_max)
self.velocity=np.array([self.speed*np.cos(angle_rad),self.speed*np.sin(angle_rad)])
self.contrast=np.random.uniform(1.19,1.21) # right at threshold
self.time=time
self.radius=radius
def update(self,time:float):
dt=time-self.time
self.position=self.position+dt*self.velocity
self.time=time
def is_out_of_bounds(self):
return self.position[0]<0 or self.position[0]>self.width or self.position[1]<0 or self.position[1]>self.height
def draw(self, pix_arr):
bg=base_synthetic_input.BACKGROUND
fg= int(bg * self.contrast) # foreground dot brightness
fill_dot(pix_arr,self.position[0], self.position[1], fg, bg, self.radius)
def total_frames(self):
""":returns: total number of frames"""
return len(self.times)
def next_frame(self) -> Tuple[Optional[np.ndarray], float]:
""" Returns the next frame and its time, or None when finished
:returns: (frame, time)
If there are no more frames frame is None.
time is in seconds.
"""
if self.frame_number >= len(self.times):
if self.avi_path is not None:
self.out.release()
cv2.destroyAllWindows()
logger.info(f'finished after {self.frame_number} frames having made {self.particle_count} particles')
return None, self.times[-1]
time = self.times[self.frame_number]
self.pix_arr.fill(self.bg)
for p in self.particles:
if p.is_out_of_bounds():
self.particles.remove(p)
newp=particles.particle(self.w,self.h,time,self.radius,self.speed_pps_min,self.speed_pps_max)
self.particles.append(newp)
self.particle_count+=1
# logger.info(f'made new particle {newp}')
else:
p.update(time)
p.draw(self.pix_arr)
if self.preview and self.frame_number % 10 == 0:
cv2.imshow(self.cv2name, self.pix_arr)
if self.avi_path is not None:
self.out.write(cv2.cvtColor(self.pix_arr, cv2.COLOR_GRAY2BGR))
if self.preview and self.frame_number % 50 == 0:
k = cv2.waitKey(1)
if k == ord('x'):
logger.warning('aborted output after {} frames'.format(self.frame_number))
cv2.destroyAllWindows()
return None, time
self.frame_number += 1
return (self.pix_arr, time)
@njit
def fill_dot(pix_arr: np.ndarray, x: float, y: float, fg: int, bg: int, radius: float):
""" Generates intensity values for the 'dot'
:param pix_arr: the 2d pixel array to fill values to
:param x: center of dot x in pixels
:param y: center of dot y in pixels
:param d: square radius range to generate dot over
:param fg: the foreground intensity (peak value) of center of dot
:param bg: the background value outside of dot that we approach at edge of dot
:param radius: the sigma of Gaussian, i.e. radius of dot
"""
x0, y0 = round(x), round(y)
d=int(radius * 5)
for iy in range(-d, +d):
for ix in range(-d, +d):
thisx, thisy = int(x0 + ix), int(y0 + iy)
# bounds check, remember that cv2 uses y for first axis
if thisx<0 or thisx>=pix_arr.shape[1] or thisy<0 or thisy>=pix_arr.shape[0]:
continue
ddx, ddy = thisx - x, thisy - y # distances of this pixel to float dot location
dist2 = ddx * ddx + ddy * ddy # square distance
v = 10 * np.exp(-dist2 / (radius * radius)) # gaussian normalized intensity value
if v > 1: # make a disk, not a gaussian blob
v = 1
elif v < .01:
v = 0
v = bg + (fg - bg) * v # intensity value from 0-1 intensity
pix_arr[thisy][thisx] = v
if __name__ == "__main__":
m = particles()
(fr, time) = m.next_frame()
with tqdm(total=m.total_frames(), desc='moving-dot', unit='fr') as pbar: # instantiate progress bar
while fr is not None:
(fr, time) = m.next_frame()
pbar.update(1)
| 42.591489 | 301 | 0.618743 |
061f5a0abf2d8c6f8dbc6f25b814caa8b38c5613 | 15,422 | py | Python | pw_build_mcuxpresso/py/pw_build_mcuxpresso/components.py | curtin-space/pigweed | fe2e1743e03fabd2676f01d9de0ac9d34a426076 | [
"Apache-2.0"
] | 86 | 2021-03-09T23:49:40.000Z | 2022-03-30T08:14:51.000Z | pw_build_mcuxpresso/py/pw_build_mcuxpresso/components.py | curtin-space/pigweed | fe2e1743e03fabd2676f01d9de0ac9d34a426076 | [
"Apache-2.0"
] | 4 | 2021-07-27T20:32:03.000Z | 2022-03-08T10:39:07.000Z | pw_build_mcuxpresso/py/pw_build_mcuxpresso/components.py | curtin-space/pigweed | fe2e1743e03fabd2676f01d9de0ac9d34a426076 | [
"Apache-2.0"
] | 22 | 2021-03-11T15:15:47.000Z | 2022-02-09T06:16:36.000Z | # Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Finds components for a given manifest."""
from typing import Any, List, Optional, Tuple
import pathlib
import sys
import xml.etree.ElementTree
def _gn_str_out(name: str, val: Any):
"""Outputs scoped string in GN format."""
print(f'{name} = "{val}"')
def _gn_list_str_out(name: str, val: List[Any]):
"""Outputs list of strings in GN format with correct escaping."""
list_str = ','.join('"' + str(x).replace('"', r'\"').replace('$', r'\$') +
'"' for x in val)
print(f'{name} = [{list_str}]')
def _gn_list_path_out(name: str,
val: List[pathlib.Path],
path_prefix: Optional[str] = None):
"""Outputs list of paths in GN format with common prefix."""
if path_prefix is not None:
str_val = list(f'{path_prefix}/{str(d)}' for d in val)
else:
str_val = list(str(d) for d in val)
_gn_list_str_out(name, str_val)
def get_component(
root: xml.etree.ElementTree.Element, component_id: str
) -> Tuple[Optional[xml.etree.ElementTree.Element], Optional[pathlib.Path]]:
"""Parse <component> manifest stanza.
Schema:
<component id="{component_id}" package_base_path="component">
</component>
Args:
root: root of element tree.
component_id: id of component to return.
Returns:
(element, base_path) for the component, or (None, None).
"""
xpath = f'./components/component[@id="{component_id}"]'
component = root.find(xpath)
if component is None:
return (None, None)
try:
base_path = pathlib.Path(component.attrib['package_base_path'])
return (component, base_path)
except KeyError:
return (component, None)
def parse_defines(root: xml.etree.ElementTree.Element,
component_id: str) -> List[str]:
"""Parse pre-processor definitions for a component.
Schema:
<defines>
<define name="EXAMPLE" value="1"/>
<define name="OTHER"/>
</defines>
Args:
root: root of element tree.
component_id: id of component to return.
Returns:
list of str NAME=VALUE or NAME for the component.
"""
xpath = f'./components/component[@id="{component_id}"]/defines/define'
return list(_parse_define(define) for define in root.findall(xpath))
def _parse_define(define: xml.etree.ElementTree.Element) -> str:
"""Parse <define> manifest stanza.
Schema:
<define name="EXAMPLE" value="1"/>
<define name="OTHER"/>
Args:
define: XML Element for <define>.
Returns:
str with a value NAME=VALUE or NAME.
"""
name = define.attrib['name']
value = define.attrib.get('value', None)
if value is None:
return name
return f'{name}={value}'
def parse_include_paths(root: xml.etree.ElementTree.Element,
component_id: str) -> List[pathlib.Path]:
"""Parse include directories for a component.
Schema:
<component id="{component_id}" package_base_path="component">
<include_paths>
<include_path relative_path="./" type="c_include"/>
</include_paths>
</component>
Args:
root: root of element tree.
component_id: id of component to return.
Returns:
list of include directories for the component.
"""
(component, base_path) = get_component(root, component_id)
if component is None:
return []
include_paths: List[pathlib.Path] = []
for include_type in ('c_include', 'asm_include'):
include_xpath = f'./include_paths/include_path[@type="{include_type}"]'
include_paths.extend(
_parse_include_path(include_path, base_path)
for include_path in component.findall(include_xpath))
return include_paths
def _parse_include_path(include_path: xml.etree.ElementTree.Element,
base_path: Optional[pathlib.Path]) -> pathlib.Path:
"""Parse <include_path> manifest stanza.
Schema:
<include_path relative_path="./" type="c_include"/>
Args:
include_path: XML Element for <input_path>.
base_path: prefix for paths.
Returns:
Path, prefixed with `base_path`.
"""
path = pathlib.Path(include_path.attrib['relative_path'])
if base_path is None:
return path
return base_path / path
def parse_headers(root: xml.etree.ElementTree.Element,
component_id: str) -> List[pathlib.Path]:
"""Parse header files for a component.
Schema:
<component id="{component_id}" package_base_path="component">
<source relative_path="./" type="c_include">
<files mask="example.h"/>
</source>
</component>
Args:
root: root of element tree.
component_id: id of component to return.
Returns:
list of header files for the component.
"""
return _parse_sources(root, component_id, 'c_include')
def parse_sources(root: xml.etree.ElementTree.Element,
component_id: str) -> List[pathlib.Path]:
"""Parse source files for a component.
Schema:
<component id="{component_id}" package_base_path="component">
<source relative_path="./" type="src">
<files mask="example.cc"/>
</source>
</component>
Args:
root: root of element tree.
component_id: id of component to return.
Returns:
list of source files for the component.
"""
source_files = []
for source_type in ('src', 'src_c', 'src_cpp', 'asm_include'):
source_files.extend(_parse_sources(root, component_id, source_type))
return source_files
def parse_libs(root: xml.etree.ElementTree.Element,
component_id: str) -> List[pathlib.Path]:
"""Parse pre-compiled libraries for a component.
Schema:
<component id="{component_id}" package_base_path="component">
<source relative_path="./" type="lib">
<files mask="example.a"/>
</source>
</component>
Args:
root: root of element tree.
component_id: id of component to return.
Returns:
list of pre-compiler libraries for the component.
"""
return _parse_sources(root, component_id, 'lib')
def _parse_sources(root: xml.etree.ElementTree.Element, component_id: str,
source_type: str) -> List[pathlib.Path]:
"""Parse <source> manifest stanza.
Schema:
<component id="{component_id}" package_base_path="component">
<source relative_path="./" type="{source_type}">
<files mask="example.h"/>
</source>
</component>
Args:
root: root of element tree.
component_id: id of component to return.
source_type: type of source to search for.
Returns:
list of source files for the component.
"""
(component, base_path) = get_component(root, component_id)
if component is None:
return []
sources: List[pathlib.Path] = []
source_xpath = f'./source[@type="{source_type}"]'
for source in component.findall(source_xpath):
relative_path = pathlib.Path(source.attrib['relative_path'])
if base_path is not None:
relative_path = base_path / relative_path
sources.extend(relative_path / files.attrib['mask']
for files in source.findall('./files'))
return sources
def parse_dependencies(root: xml.etree.ElementTree.Element,
component_id: str) -> List[str]:
"""Parse the list of dependencies for a component.
Optional dependencies are ignored for parsing since they have to be
included explicitly.
Schema:
<dependencies>
<all>
<component_dependency value="component"/>
<component_dependency value="component"/>
<any_of>
<component_dependency value="component"/>
<component_dependency value="component"/>
</any_of>
</all>
</dependencies>
Args:
root: root of element tree.
component_id: id of component to return.
Returns:
list of component id dependencies of the component.
"""
dependencies = []
xpath = f'./components/component[@id="{component_id}"]/dependencies/*'
for dependency in root.findall(xpath):
dependencies.extend(_parse_dependency(dependency))
return dependencies
def _parse_dependency(dependency: xml.etree.ElementTree.Element) -> List[str]:
"""Parse <all>, <any_of>, and <component_dependency> manifest stanzas.
Schema:
<all>
<component_dependency value="component"/>
<component_dependency value="component"/>
<any_of>
<component_dependency value="component"/>
<component_dependency value="component"/>
</any_of>
</all>
Args:
dependency: XML Element of dependency.
Returns:
list of component id dependencies.
"""
if dependency.tag == 'component_dependency':
return [dependency.attrib['value']]
if dependency.tag == 'all':
dependencies = []
for subdependency in dependency:
dependencies.extend(_parse_dependency(subdependency))
return dependencies
if dependency.tag == 'any_of':
# Explicitly ignore.
return []
# Unknown dependency tag type.
return []
def check_dependencies(root: xml.etree.ElementTree.Element,
component_id: str,
include: List[str],
exclude: Optional[List[str]] = None) -> bool:
"""Check the list of optional dependencies for a component.
Verifies that the optional dependencies for a component are satisfied by
components listed in `include` or `exclude`.
Args:
root: root of element tree.
component_id: id of component to check.
include: list of component ids included in the project.
exclude: list of component ids explicitly excluded from the project.
Returns:
True if dependencies are satisfied, False if not.
"""
xpath = f'./components/component[@id="{component_id}"]/dependencies/*'
for dependency in root.findall(xpath):
if not _check_dependency(dependency, include, exclude=exclude):
return False
return True
def _check_dependency(dependency: xml.etree.ElementTree.Element,
include: List[str],
exclude: Optional[List[str]] = None) -> bool:
"""Check a dependency for a component.
Verifies that the given {dependency} is satisfied by components listed in
`include` or `exclude`.
Args:
dependency: XML Element of dependency.
include: list of component ids included in the project.
exclude: list of component ids explicitly excluded from the project.
Returns:
True if dependencies are satisfied, False if not.
"""
if dependency.tag == 'component_dependency':
component_id = dependency.attrib['value']
return component_id in include or (exclude is not None
and component_id in exclude)
if dependency.tag == 'all':
for subdependency in dependency:
if not _check_dependency(subdependency, include, exclude=exclude):
return False
return True
if dependency.tag == 'any_of':
for subdependency in dependency:
if _check_dependency(subdependency, include, exclude=exclude):
return True
tree = xml.etree.ElementTree.tostring(dependency).decode('utf-8')
print(f'Unsatisfied dependency from: {tree}', file=sys.stderr)
return False
# Unknown dependency tag type.
return True
def create_project(
root: xml.etree.ElementTree.Element,
include: List[str],
exclude: Optional[List[str]] = None
) -> Tuple[List[str], List[str], List[pathlib.Path], List[pathlib.Path],
List[pathlib.Path], List[pathlib.Path]]:
"""Create a project from a list of specified components.
Args:
root: root of element tree.
include: list of component ids included in the project.
exclude: list of component ids excluded from the project.
Returns:
(component_ids, defines, include_paths, headers, sources, libs) for the
project.
"""
# Build the project list from the list of included components by expanding
# dependencies.
project_list = []
pending_list = include
while len(pending_list) > 0:
component_id = pending_list.pop(0)
if component_id in project_list:
continue
if exclude is not None and component_id in exclude:
continue
project_list.append(component_id)
pending_list.extend(parse_dependencies(root, component_id))
return (
project_list,
sum((parse_defines(root, component_id)
for component_id in project_list), []),
sum((parse_include_paths(root, component_id)
for component_id in project_list), []),
sum((parse_headers(root, component_id)
for component_id in project_list), []),
sum((parse_sources(root, component_id)
for component_id in project_list), []),
sum((parse_libs(root, component_id) for component_id in project_list),
[]),
)
def project(manifest_path: pathlib.Path,
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
path_prefix: Optional[str] = None):
"""Output GN scope for a project with the specified components.
Args:
manifest_path: path to SDK manifest XML.
include: list of component ids included in the project.
exclude: list of component ids excluded from the project.
path_prefix: string prefix to prepend to all paths.
"""
assert include is not None, "Project must include at least one component."
tree = xml.etree.ElementTree.parse(manifest_path)
root = tree.getroot()
(component_ids, defines, include_dirs, headers, sources, libs) = \
create_project(root, include, exclude=exclude)
for component_id in component_ids:
if not check_dependencies(
root, component_id, component_ids, exclude=exclude):
return
_gn_list_str_out('defines', defines)
_gn_list_path_out('include_dirs', include_dirs, path_prefix=path_prefix)
_gn_list_path_out('public', headers, path_prefix=path_prefix)
_gn_list_path_out('sources', sources, path_prefix=path_prefix)
_gn_list_path_out('libs', libs, path_prefix=path_prefix)
| 32.467368 | 79 | 0.635002 |
5033761e12b3058c3de02f355c9fa7becda6c1f7 | 1,037 | py | Python | examples/response/alert_bulk_resolve.py | redcanaryco/cbapi-python | 18d332d5b0a7868c6929746b0b02c73545bbd0f9 | [
"MIT"
] | 2 | 2017-07-19T07:24:28.000Z | 2021-07-23T03:57:08.000Z | examples/response/alert_bulk_resolve.py | redcanaryco/cbapi-python | 18d332d5b0a7868c6929746b0b02c73545bbd0f9 | [
"MIT"
] | null | null | null | examples/response/alert_bulk_resolve.py | redcanaryco/cbapi-python | 18d332d5b0a7868c6929746b0b02c73545bbd0f9 | [
"MIT"
] | 3 | 2019-03-12T23:01:28.000Z | 2021-07-23T03:57:22.000Z | #!/usr/bin/env python
import sys
from cbapi.response.models import Alert
from cbapi.example_helpers import build_cli_parser, get_cb_response_object
import time
def main():
parser = build_cli_parser("Bulk resolve alerts")
parser.add_argument("--query", action="store", default="",
help="The query string of alerts to resolve. All matching alerts will be resolved.")
args = parser.parse_args()
cb = get_cb_response_object(args)
alert_query = cb.select(Alert).where("-status:Resolved")
alert_query = alert_query.where(args.query)
alert_count = len(alert_query)
if alert_count > 0:
print("Resolving {0:d} alerts...".format(len(alert_query)))
alert_query.change_status("Resolved")
print("Waiting for alert changes to take effect...")
time.sleep(25)
print("Complete. Resolved {0:d} alerts.".format(alert_count))
else:
print("Congratulations! You have no unresolved alerts!")
if __name__ == "__main__":
sys.exit(main())
| 28.027027 | 108 | 0.677917 |
12efc19bb57c682000d09a6bf30d566dfc349b5e | 300 | py | Python | lambda/functions/virtualmail/common/exceptions.py | virtualmail/virtualmail | c960cda1131848cc34dfd7f153e1d586afce930a | [
"MIT"
] | null | null | null | lambda/functions/virtualmail/common/exceptions.py | virtualmail/virtualmail | c960cda1131848cc34dfd7f153e1d586afce930a | [
"MIT"
] | null | null | null | lambda/functions/virtualmail/common/exceptions.py | virtualmail/virtualmail | c960cda1131848cc34dfd7f153e1d586afce930a | [
"MIT"
] | null | null | null | class PassthroughException(Exception):
def __init__(self, e, when, text=None, stop_processing=False):
self.msg = 'Exception "{}" while {}'.format(e, when)
self.e = e
self.when = when
self.text = text
self.stop_processing = stop_processing
super().__init__(self.msg) | 33.333333 | 64 | 0.66 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.