text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from os import path
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from s3.s3crud import S3CRUD
from s3.s3filter import S3DateFilter, S3OptionsFilter, S3TextFilter
from s3.s3utils import s3_avatar_represent
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
return homepage()
# =============================================================================
class datalist():
""" Alternate URL for homepage """
def __call__(self):
return homepage()
# =============================================================================
class datalist_dl_post():
""" AJAX URL for CMS Posts (for Homepage) """
def __call__(self):
return homepage()
# =============================================================================
def homepage():
"""
Custom Homepage
- DataList of CMS Posts
"""
if not current.auth.is_logged_in():
return login()
T = current.T
s3db = current.s3db
request = current.request
response = current.response
s3 = response.s3
current.deployment_settings.ui.customize_cms_post()
list_layout = render_homepage_posts
filter_widgets = [S3TextFilter(["body"],
label="",
_class="filter-search",
_placeholder=T("Search").upper()),
S3OptionsFilter("series_id",
label=T("Filter by Type"),
represent="%(name)s",
cols=3),
S3OptionsFilter("location_id",
label=T("Filter by Location"),
represent="%(name)s",
widget="multiselect",
cols=3),
S3OptionsFilter("created_by$organisation_id",
label=T("Filter by Organization"),
represent="%(name)s",
widget="multiselect",
cols=3),
S3DateFilter("created_on",
label=T("Filter by Date")),
]
s3db.configure("cms_post",
filter_formstyle = filter_formstyle,
filter_submit = (T("Filter Results"), "btn btn-primary"),
filter_widgets = filter_widgets,
list_layout = list_layout,
)
s3.dl_pagelength = 6 # 5 forces an AJAX call
if "datalist_dl_post" in request.args:
ajax = True
else:
ajax = False
def prep(r):
if ajax:
r.representation = "dl"
return True
s3.prep = prep
request.args = ["datalist"]
output = current.rest_controller("cms", "post",
list_ajaxurl = URL(f="index", args="datalist_dl_post"))
if ajax:
response.view = "plain.html"
else:
form = output["form"]
# Remove duplicate Submit button
form[0][-1] = ""
if form.errors:
s3.jquery_ready.append('''$("#myModal").modal("show")''')
# Set Title & View after REST Controller, in order to override
output["title"] = response.title = current.deployment_settings.get_system_name()
view = path.join(request.folder, "private", "templates",
"CSN", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
# Latest 5 Disasters
resource = s3db.resource("event_event")
list_fields = ["name",
"zero_hour",
"closed",
]
orderby = resource.get_config("list_orderby",
~resource.table.created_on)
datalist, numrows, ids = resource.datalist(fields=list_fields,
start=None,
limit=5,
listid="event_datalist",
orderby=orderby,
layout=render_homepage_events)
if numrows == 0:
# Empty table or just no match?
table = resource.table
if "deleted" in table:
available_records = current.db(table.deleted != True)
else:
available_records = current.db(table._id > 0)
if available_records.select(table._id,
limitby=(0, 1)).first():
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_no_match"),
_class="empty")
else:
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_list_empty"),
_class="empty")
data = msg
else:
# Render the list
dl = datalist.html()
data = dl
output["disasters"] = data
return output
# -----------------------------------------------------------------------------
def login():
"""
Custom Login page
"""
response = current.response
request = current.request
view = path.join(request.folder, "private", "templates",
"CSN", "views", "login.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = current.T("Login")
request.args = ["login"]
auth = current.auth
auth.settings.formstyle = "bootstrap"
login = auth()
return dict(
form = login
)
# -----------------------------------------------------------------------------
def filter_formstyle(row_id, label, widget, comment):
"""
Custom Formstyle for FilterForm
@param row_id: HTML id for the row
@param label: the label
@param widget: the form widget
@param comment: the comment
"""
if label:
return DIV(TR(label),
TR(widget))
else:
return widget
# -----------------------------------------------------------------------------
def render_homepage_posts(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for CMS Posts on the Homepage
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "cms_post.id"
# Construct the item ID
listid = "datalist"
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail"
db = current.db
raw = record._row
series = record["cms_post.series_id"]
date = record["cms_post.created_on"]
body = record["cms_post.body"]
location = record["cms_post.location_id"]
location_id = raw["cms_post.location_id"]
location_url = URL(c="gis", f="location", args=[location_id])
# Attachment(s)?
document = raw["doc_document.file"]
if document:
doc_url = URL(c="default", f="download",
args=[document]
)
doc_link = A(I(_class="icon icon-paper-clip fright"),
_href=doc_url)
else:
doc_link = ""
if series not in ("News", "Twitter", "Ushahidi", "YouTube"):
# We expect an Author
author = record["cms_post.created_by"]
author_id = raw["cms_post.created_by"]
organisation = record["auth_user.organisation_id"]
organisation_id = raw["auth_user.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id])
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
avatar = s3_avatar_represent(author_id,
_class="media-object",
_style="width:50px;padding:5px;padding-top:0px;")
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
avatar = A(avatar,
_href=person_url,
_class="pull-left",
)
card_person = DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
doc_link,
_class="card-person",
)
else:
# No Author
card_person = DIV(doc_link,
_class="card-person",
)
avatar = None
if series == "News":
icon = URL(c="static", f="img",
args=["markers", "gis_marker.image.News.png"])
elif series == "Twitter":
icon = URL(c="static", f="img", args=["social", "twitter.png"])
elif series == "Ushahidi":
icon = URL(c="static", f="img",
args=["markers", "gis_marker.image.Ushahidi.png"])
elif series == "YouTube":
#icon = URL(c="static", f="img", args=["social", "YouTube.png"])
avatar = DIV(IFRAME(_width=320,
_height=180,
_src=raw["cms_post.comments"],
_frameborder=0),
_class="pull-left"
)
if not avatar:
avatar = DIV(IMG(_src=icon,
_class="media-object",
_style="width:50px;padding:5px;padding-top:0px;",
),
_class="pull-left")
# Edit Bar
permit = current.auth.s3_has_permission
table = db.cms_post
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="cms", f="post", args=[record_id, "update"]),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_href=URL(c="cms", f="post",
args=[record_id, "delete"]),
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if series == "Alert":
item_class = "%s disaster" % item_class
# Overall layout
item = DIV(DIV(I(SPAN(" %s" % current.T(series),
_class="card-title",
),
_class="icon icon-%s" % series.lower(),
),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
card_person,
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_homepage_events(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for CMS Posts on the Homepage
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "event_event.id"
# Construct the item ID
listid = "event_datalist"
if pkey in record:
item_id = "%s-%s" % (listid, record[pkey])
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail"
raw = record._row
record_id = raw["event_event.id"]
name = record["event_event.name"]
date = record["event_event.zero_hour"]
closed = raw["event_event.closed"]
if closed:
edit_bar = DIV()
else:
item_class = "%s disaster" % item_class
# @ToDo: Check Permissions
edit_bar = DIV(A(I(" ",
_class="icon icon-edit",
),
_href=URL(c="event", f="event", args=[record_id]),
),
A(I(" ",
_class="icon icon-remove-sign",
),
_href=URL(c="event", f="event",
args=[record_id, "delete"]),
),
_class="edit-bar fright",
)
# Render the item
item = DIV(edit_bar,
H5(name),
SPAN(date,
_class="date-title",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class secondary():
""" Custom Navigation """
def __call__(self):
view = path.join(current.request.folder, "private", "templates",
"CSN", "views", "secondary.html")
try:
# Pass view as file not str to work in compiled mode
current.response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
return dict()
# END =========================================================================
|
{
"content_hash": "2dd03ebc7423813b1133ac80be40cd14",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 121,
"avg_line_length": 34.04157549234136,
"alnum_prop": 0.4302243363116282,
"repo_name": "flavour/tldrmp",
"id": "7a95d726a58adb4058a96ab9b2fafb34919db3ba",
"size": "15582",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "private/templates/CSN/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1155062"
},
{
"name": "JavaScript",
"bytes": "16011189"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "25852675"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2003150"
}
],
"symlink_target": ""
}
|
"""Logging
"""
import sys
import os
import logging
from pip._vendor import colorama, pkg_resources
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
def should_color(consumer, environ, std=(sys.stdout, sys.stderr)):
real_consumer = (
consumer if not isinstance(consumer, colorama.AnsiToWin32)
else consumer.wrapped
)
# If consumer isn't stdout or stderr we shouldn't colorize it
if real_consumer not in std:
return False
# If consumer is a tty we should color it
if hasattr(real_consumer, "isatty") and real_consumer.isatty():
return True
# If we have an ASNI term we should color it
if environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def should_warn(current_version, removal_version):
# Our Significant digits on versions is 2, so remove everything but the
# first two places.
current_version = ".".join(current_version.split(".")[:2])
removal_version = ".".join(removal_version.split(".")[:2])
# Our warning threshold is one minor version before removal, so we
# decrement the minor version by one
major, minor = removal_version.split(".")
minor = str(int(minor) - 1)
warn_version = ".".join([major, minor])
# Test if our current_version should be a warn
return (pkg_resources.parse_version(current_version)
< pkg_resources.parse_version(warn_version))
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
VERBOSE_DEBUG = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO + logging.WARN) / 2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
COLORS = {
WARN: _color_wrap(colorama.Fore.YELLOW),
ERROR: _color_wrap(colorama.Fore.RED),
FATAL: _color_wrap(colorama.Fore.RED),
}
def __init__(self):
self.consumers = []
self.indent = 0
self.explicit_levels = False
self.in_progress = None
self.in_progress_hanging = False
def add_consumers(self, *consumers):
if sys.platform.startswith("win"):
for level, consumer in consumers:
if hasattr(consumer, "write"):
self.consumers.append(
(level, colorama.AnsiToWin32(consumer)),
)
else:
self.consumers.append((level, consumer))
else:
self.consumers.extend(consumers)
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
# render
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' ' * self.indent + rendered
if self.explicit_levels:
# FIXME: should this be a name, not a level number?
rendered = '%02i %s' % (level, rendered)
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if hasattr(consumer, 'write'):
write_content = rendered + '\n'
if should_color(consumer, os.environ):
# We are printing to stdout or stderr and it supports
# colors so render our text colored
colorizer = self.COLORS.get(level, lambda x: x)
write_content = colorizer(write_content)
consumer.write(write_content)
if hasattr(consumer, 'flush'):
consumer.flush()
else:
consumer(rendered)
def _show_progress(self):
"""Should we display download progress?"""
return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty())
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self._show_progress():
sys.stdout.write(' ' * self.indent + msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
self.last_message = None
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self._show_progress():
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
# These erase any messages shown with show_progress
# (besides .'s)
logger.show_progress('')
logger.show_progress('')
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self, message=None):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
if message is None:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.last_message:
padding = ' ' * max(
0,
len(self.last_message) - len(message)
)
else:
padding = ''
sys.stdout.write(
'\r%s%s%s%s' %
(' ' * self.indent, self.in_progress, message, padding)
)
sys.stdout.flush()
self.last_message = message
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger()
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None or stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
def move_stdout_to_stderr(self):
to_remove = []
to_add = []
for consumer_level, consumer in self.consumers:
if consumer == sys.stdout:
to_remove.append((consumer_level, consumer))
to_add.append((consumer_level, sys.stderr))
for item in to_remove:
self.consumers.remove(item)
self.consumers.extend(to_add)
logger = Logger()
|
{
"content_hash": "751c7ff06c7c01d5853990e215fc5815",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 33.69257950530035,
"alnum_prop": 0.545883586785527,
"repo_name": "EnviroCentre/jython-upgrade",
"id": "5a5fd9cdce17426bad33de67bac61579d970bc57",
"size": "9535",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "jython/lib/site-packages/pip/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "112532"
},
{
"name": "NSIS",
"bytes": "1982"
},
{
"name": "PowerShell",
"bytes": "216"
},
{
"name": "Python",
"bytes": "15437225"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v10.services.types import geo_target_constant_service
from .base import GeoTargetConstantServiceTransport, DEFAULT_CLIENT_INFO
class GeoTargetConstantServiceGrpcTransport(GeoTargetConstantServiceTransport):
"""gRPC backend transport for GeoTargetConstantService.
Service to fetch geo target constants.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = (
grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = (
grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def suggest_geo_target_constants(
self,
) -> Callable[
[geo_target_constant_service.SuggestGeoTargetConstantsRequest],
geo_target_constant_service.SuggestGeoTargetConstantsResponse,
]:
r"""Return a callable for the suggest geo target constants method over gRPC.
Returns GeoTargetConstant suggestions by location name or by
resource name.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__
`GeoTargetConstantSuggestionError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.SuggestGeoTargetConstantsRequest],
~.SuggestGeoTargetConstantsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "suggest_geo_target_constants" not in self._stubs:
self._stubs[
"suggest_geo_target_constants"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.GeoTargetConstantService/SuggestGeoTargetConstants",
request_serializer=geo_target_constant_service.SuggestGeoTargetConstantsRequest.serialize,
response_deserializer=geo_target_constant_service.SuggestGeoTargetConstantsResponse.deserialize,
)
return self._stubs["suggest_geo_target_constants"]
def close(self):
self.grpc_channel.close()
__all__ = ("GeoTargetConstantServiceGrpcTransport",)
|
{
"content_hash": "48994daefc42c52952568a08cba58aee",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 112,
"avg_line_length": 44.416666666666664,
"alnum_prop": 0.6032747740064813,
"repo_name": "googleads/google-ads-python",
"id": "09cc1f2b7ddf4a4f407b6a48bc7329370c39c975",
"size": "12326",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v10/services/services/geo_target_constant_service/transports/grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
extensions = [
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tsuru'
copyright = u'2015, Globo.com'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.11'
# The full version, including alpha/beta/rc tags.
release = '0.11.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'tsuru'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['theme']
import os
if not os.environ.get('READTHEDOCS', None):
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tsurudoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tsuru.tex', u'tsuru Documentation',
u'timeredbull', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tsuru', u'tsuru Documentation',
[u'timeredbull'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tsuru', u'tsuru Documentation',
u'timeredbull', 'tsuru', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "b7615f67d0d6344dc016b493516280ec",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 80,
"avg_line_length": 30.723214285714285,
"alnum_prop": 0.7008137169427492,
"repo_name": "RichardKnop/tsuru",
"id": "0286a6082072a28ce25b1a3deb78790bdf324447",
"size": "7901",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Go",
"bytes": "2262657"
},
{
"name": "HTML",
"bytes": "315"
},
{
"name": "Makefile",
"bytes": "3751"
},
{
"name": "Shell",
"bytes": "7915"
}
],
"symlink_target": ""
}
|
from trtools.tools.attrdict import attrdict
|
{
"content_hash": "5ccd422bd854b93c50719e75e96feb83",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 43,
"avg_line_length": 44,
"alnum_prop": 0.8636363636363636,
"repo_name": "dalejung/trtools",
"id": "8bea7cf7b684f96fe638800ec2679a787cd23ca8",
"size": "44",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trtools/tools/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "336302"
}
],
"symlink_target": ""
}
|
import numpy as np
from pyspark.mllib.common import callMLlibFunc
from pyspark.rdd import RDD
class KernelDensity(object):
"""
Estimate probability density at required points given an RDD of samples
from the population.
>>> kd = KernelDensity()
>>> sample = sc.parallelize([0.0, 1.0])
>>> kd.setSample(sample)
>>> kd.estimate([0.0, 1.0])
array([ 0.12938758, 0.12938758])
"""
def __init__(self):
self._bandwidth = 1.0
self._sample = None
def setBandwidth(self, bandwidth):
"""Set bandwidth of each sample. Defaults to 1.0"""
self._bandwidth = bandwidth
def setSample(self, sample):
"""Set sample points from the population. Should be a RDD"""
if not isinstance(sample, RDD):
raise TypeError("samples should be a RDD, received %s" % type(sample))
self._sample = sample
def estimate(self, points):
"""Estimate the probability density at points"""
points = list(points)
densities = callMLlibFunc(
"estimateKernelDensity", self._sample, self._bandwidth, points)
return np.asarray(densities)
|
{
"content_hash": "ecd5a1dc97405ffdb8476f0548d15af4",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 82,
"avg_line_length": 31.2972972972973,
"alnum_prop": 0.6278065630397237,
"repo_name": "wzhfy/spark",
"id": "56444c152f0ba0cb1a7df2b6b65976789654a6fc",
"size": "1943",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/pyspark/mllib/stat/KernelDensity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "50609"
},
{
"name": "Batchfile",
"bytes": "25763"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "24294"
},
{
"name": "Dockerfile",
"bytes": "9556"
},
{
"name": "HTML",
"bytes": "40561"
},
{
"name": "HiveQL",
"bytes": "1890746"
},
{
"name": "Java",
"bytes": "4213400"
},
{
"name": "JavaScript",
"bytes": "218161"
},
{
"name": "Jupyter Notebook",
"bytes": "31865"
},
{
"name": "Makefile",
"bytes": "1591"
},
{
"name": "PLSQL",
"bytes": "7715"
},
{
"name": "PLpgSQL",
"bytes": "389551"
},
{
"name": "PowerShell",
"bytes": "3879"
},
{
"name": "Python",
"bytes": "3330124"
},
{
"name": "R",
"bytes": "1238296"
},
{
"name": "Roff",
"bytes": "36740"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "34437552"
},
{
"name": "Shell",
"bytes": "219852"
},
{
"name": "TSQL",
"bytes": "483581"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
}
|
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('problem', '__first__'),
('account', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=192, unique=True, verbose_name='标题')),
('description', models.TextField(blank=True, verbose_name='内容')),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now_add=True)),
('register_start_time', models.DateTimeField(blank=True, verbose_name='开始注册时间')),
('register_end_time', models.DateTimeField(blank=True, verbose_name='结束注册时间')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Contest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=192, verbose_name='标题')),
('description', models.TextField(blank=True, verbose_name='描述')),
('allowed_lang', models.CharField(default='c,cc14,cc17,cpp,java,pas,py2,pypy,pypy3,python,text', max_length=192, verbose_name='允许语言')),
('contest_type', models.IntegerField(choices=[(0, '常规比赛'), (1, '作业')], default=0)),
('start_time', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True, verbose_name='开始时间')),
('end_time', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True, verbose_name='结束时间')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('standings_update_time', models.DateTimeField(blank=True, null=True, verbose_name='榜单更新时间')),
('freeze', models.BooleanField(default=False, verbose_name='是否封榜')),
('freeze_time', models.DateTimeField(blank=True, null=True, verbose_name='封榜时间')),
('scoring_method', models.CharField(choices=[('acm', 'ACM 赛制'), ('oi', 'OI 赛制'), ('cf', 'School of Data Analysis (SDA) 赛制 (Codeforces...)'), ('tcmtime', 'TCM/TIME 赛制 (GCJ...)')], default='acm', max_length=10, verbose_name='计分规则')),
('run_tests_during_contest', models.CharField(choices=[('all', '所有测试点'), ('pretest', '只测试 Pretests'), ('sample', '只测试样例'), ('none', '不作测试')], default='all', max_length=10, verbose_name='比赛过程中对代码进行评测')),
('allow_code_share', models.IntegerField(choices=[(0, '不允许'), (1, '代码在赛后对 AC 用户公开(默认)'), (2, '代码在赛后完全公开'), (3, '代码在比赛过程中对 AC 用户公开')], default=1, verbose_name='允许代码共享')),
('last_counts', models.BooleanField(default=False, verbose_name='认为最后一次提交有效(默认使用成绩最好的)')),
('penalty_counts', models.PositiveIntegerField(default=1200, verbose_name='错误提交罚时(秒)')),
('standings_without_problem', models.BooleanField(default=False, verbose_name='排行榜上不显示具体题目的通过情况')),
('case_public', models.PositiveIntegerField(choices=[(0, '不允许'), (1, '评测报告有偿公开'), (2, '评测报告总是开放')], default=0)),
('system_tested', models.BooleanField(default=False, verbose_name='系统测试准备就绪')),
('access_level', models.PositiveIntegerField(choices=[(0, '仅比赛管理员可见'), (10, '仅受邀用户可见,赛后题目不公开'), (20, '仅受邀用户可见,赛后题目直接公开'), (30, '公开,需要比赛前注册'), (40, '公开')], default=0, verbose_name='访问控制')),
('common_status_access_level', models.IntegerField(choices=[(-10, '不可见'), (0, '默认'), (10, '总是可见')], default=0, verbose_name='所有提交和榜单的访问控制')),
('ip_sensitive', models.BooleanField(default=False, verbose_name='首次登录绑定 IP')),
('analysis_blog_id', models.IntegerField(default=0, verbose_name='题解博客 ID')),
('pdf_statement', models.FileField(blank=True, null=True, upload_to='contest_statements/%Y%m%d/', verbose_name='PDF 题面')),
('authors', models.ManyToManyField(related_name='written_contests', to=settings.AUTH_USER_MODEL)),
('managers', models.ManyToManyField(related_name='managing_contests', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-pk'],
},
),
migrations.CreateModel(
name='ContestProblemPlag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fingerprint', models.CharField(max_length=100)),
('status', models.IntegerField(choices=[(-1, 'Pending'), (0, 'Ready'), (1, 'Failed')])),
('identifier', models.CharField(blank=True, max_length=20)),
('language', models.CharField(default='c/c++', max_length=20)),
('keep_match', models.PositiveIntegerField(default=20)),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contest.Contest')),
],
),
migrations.CreateModel(
name='ContestProblem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(max_length=12)),
('weight', models.IntegerField(default=100)),
('ac_user_count', models.PositiveIntegerField(default=0)),
('total_user_count', models.PositiveIntegerField(default=0)),
('ac_count', models.PositiveIntegerField(default=0)),
('total_count', models.PositiveIntegerField(default=0)),
('first_yes_time', models.DurationField(blank=True, null=True)),
('first_yes_by', models.PositiveIntegerField(blank=True, null=True)),
('max_score', models.FloatField(default=0)),
('avg_score', models.FloatField(default=0)),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contest.Contest')),
('problem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='problem.Problem')),
],
options={
'ordering': ['identifier'],
'unique_together': {('problem', 'contest')},
},
),
migrations.CreateModel(
name='ContestParticipant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('star', models.BooleanField(default=False)),
('comment', models.TextField(blank=True)),
('hidden_comment', models.TextField(blank=True)),
('score', models.IntegerField(default=0)),
('penalty', models.BigIntegerField(default=0)),
('detail_raw', models.TextField(blank=True)),
('is_disabled', models.BooleanField(default=False)),
('ip_address', models.GenericIPAddressField(blank=True, null=True)),
('join_time', models.DateTimeField(blank=True, null=True)),
('is_confirmed', models.BooleanField(default=False)),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contest.Contest')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-is_confirmed', '-score', 'penalty', 'star'),
'unique_together': {('user', 'contest')},
},
),
migrations.CreateModel(
name='ContestClarification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True)),
('time', models.DateTimeField(auto_now=True)),
('important', models.BooleanField(default=False)),
('answer', models.TextField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contest.Contest')),
],
options={
'ordering': ['-time'],
},
),
migrations.AddField(
model_name='contest',
name='participants',
field=models.ManyToManyField(related_name='contests', through='contest.ContestParticipant', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='contest',
name='problems',
field=models.ManyToManyField(through='contest.ContestProblem', to='problem.Problem'),
),
migrations.AddField(
model_name='contest',
name='volunteers',
field=models.ManyToManyField(related_name='volunteering_contests', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='ActivityParticipant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('real_name', models.CharField(max_length=30, verbose_name='真实姓名')),
('student_id', models.CharField(max_length=30, verbose_name='学号')),
('email', models.CharField(max_length=192, validators=[django.core.validators.EmailValidator()], verbose_name='电子邮箱')),
('phone', models.CharField(blank=True, max_length=30, verbose_name='电话')),
('major', models.CharField(blank=True, choices=[('art', '艺术'), ('accounting', '会计'), ('business', '商业'), ('business_admin', '工商管理'), ('chemistry', '化学'), ('communication', '通信'), ('ce', '计算机工程'), ('cs', '计算机科学'), ('economics', '经济'), ('education', '教育'), ('ee', '电子工程'), ('finance', '金融'), ('geology', '地理'), ('interaction', '人机交互'), ('it', '信息技术'), ('life', '生命科学'), ('mechanics', '机械'), ('linguistics', '语言学'), ('literature', '文学'), ('math', '数学'), ('se', '软件工程'), ('philosophy', '哲学'), ('physics', '物理'), ('politics', '政治学'), ('psycho', '心理学'), ('social', '社会学'), ('translation', '翻译'), ('others', '其他')], max_length=30, verbose_name='专业')),
('gender', models.CharField(blank=True, choices=[('m', '男'), ('f', '女'), ('d', '拒绝回答')], max_length=5, verbose_name='性别')),
('graduate_year', models.IntegerField(blank=True, null=True, verbose_name='毕业年份')),
('is_deleted', models.BooleanField(default=False, verbose_name='已删除')),
('is_confirmed', models.BooleanField(default=False, verbose_name='已确认')),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contest.Activity')),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.School', verbose_name='学校')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'activity')},
},
),
migrations.AddField(
model_name='activity',
name='participants',
field=models.ManyToManyField(related_name='activities', through='contest.ActivityParticipant', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='ContestUserRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.IntegerField(default=1500)),
('solved', models.IntegerField()),
('rank', models.IntegerField()),
('modified', models.DateTimeField()),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contest.Contest')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-modified'],
'unique_together': {('contest', 'user')},
},
),
migrations.CreateModel(
name='ContestInvitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('star', models.BooleanField(default=False)),
('code', models.CharField(max_length=24)),
('comment', models.TextField(blank=True)),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contest.Contest')),
],
options={
'ordering': ['-pk'],
'unique_together': {('contest', 'code')},
},
),
]
|
{
"content_hash": "ad49505b030ce2629e9612dbe2ad58c9",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 660,
"avg_line_length": 64.93333333333334,
"alnum_prop": 0.5739953065415078,
"repo_name": "ultmaster/eoj3",
"id": "9d77c53b8d30caa65d651472db072b6fa2ec5e1a",
"size": "14515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contest/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "34134"
},
{
"name": "CSS",
"bytes": "16519"
},
{
"name": "HTML",
"bytes": "257689"
},
{
"name": "JavaScript",
"bytes": "60776"
},
{
"name": "Python",
"bytes": "767732"
},
{
"name": "TeX",
"bytes": "21976"
}
],
"symlink_target": ""
}
|
import base64
import os
import random
import re
import subprocess
import tempfile
import time
import pymongo
CHAR_RNN_DIR = '/home/music/char-rnn'
ABCM2PS_PATH = '/home/music/abcm2ps/abcm2ps'
ABC2MIDI_PATH = '/home/music/abcmidi/abc2midi'
PNG_OUTPUT_PATH = '/var/nn/png'
DATA_LENGTH = 8192
MONGO_URL = 'mongodb://localhost:27017/neural-noise'
client = pymongo.MongoClient(MONGO_URL)
db = client.get_default_database()
RE_REPEAT = re.compile('\|:(.*?):\|')
RE_TITLE = re.compile('T:(.*)')
RE_COMPOSER = re.compile('C:(.*)')
def get_generated_data(temperature, cp_path):
cmdline = ('th sample.lua cv/%(file)s -primetext "X:1" -length %(length)s '
'-temperature %(temp)s -gpuid -1 -verbose 0 -seed %(seed)s' % {
'file': cp_path,
'length': DATA_LENGTH,
'temp': temperature,
'seed': int(float(time.time()) * 1000)
})
pipe = subprocess.Popen(
cmdline, executable='/bin/bash', shell=True, cwd=CHAR_RNN_DIR,
stdout=subprocess.PIPE)
data, _ = pipe.communicate()
return data
section_count = 0
def strip_and_count_sections(song):
def process_section(md):
global section_count
if md.group(1):
section_count += 1
return md.group(1)
return RE_REPEAT.sub(process_section, song)
def get_generated_songs(temperature, cp_path):
global section_count
data = get_generated_data(temperature, cp_path)
songs = data.split('X:1\n')
for song in songs:
if not song:
continue
section_count = 0
song = strip_and_count_sections(song)
if section_count < 2:
continue
song = 'X:1\n' + song
yield song
def insert_song(song, checkpoint, fields=None):
if not fields:
fields = {}
with tempfile.NamedTemporaryFile() as f:
f.write(song)
f.flush()
with tempfile.NamedTemporaryFile() as m:
args = [ABC2MIDI_PATH, f.name, '-silent', '-o', m.name]
subprocess.call(args)
m.seek(0)
midi = m.read()
data = {
'random': random.random(),
'created_at': int(time.time()),
'checkpoint': checkpoint,
'abc': song,
'midi': base64.b64encode(midi),
}
data.update(fields)
db.songs.insert(data)
# Convert from abc to SVG, then to PNG
png_path = os.path.join(PNG_OUTPUT_PATH, str(data['_id']) + '.png')
with tempfile.NamedTemporaryFile() as s:
args = [ABCM2PS_PATH, '-q', '-O', s.name, f.name]
subprocess.call(args)
args = ['convert', '-density', '100', '-trim', s.name, png_path]
subprocess.call(args)
def insert_generated_songs(temperature, cp_path):
count = 0
for song in get_generated_songs(temperature, cp_path):
count += 1
insert_song(song, cp_path, fields={
'temperature': temperature
})
return count
def fill_minimum(temperature, cp_path, min_):
num = 0
while num < min_:
num += insert_generated_songs(temperature, cp_path)
return num
def fill_all_temps(cp_path, min_):
db.checkpoints.update({'name': {'$eq': cp_path}}, {'name': cp_path}, True)
for i in range(50, 105, 5):
temperature = str(i/100.0)
num = fill_minimum(temperature, cp_path, min_)
print '%s songs inserted, temperature=%s' % (num, temperature)
# Utility method that is not called from the main process of producing songs.
def fill_from_disk(dir_path, checkpoint):
count = 0
for dirpath, dirnames, filenames in os.walk(dir_path):
for filename in filenames:
if filename.endswith('.abc'):
full_path = os.path.join(dirpath, filename)
with open(full_path) as f:
song = strip_and_count_sections(f.read())
fields = {
'file': filename,
}
md = RE_TITLE.search(song)
title = None
if md:
fields['title'] = md.group(1)
md = RE_COMPOSER.search(song)
composer = None
if md:
fields['composer'] = md.group(1)
print 'Inserting song "%s":%s' % (fields.get('title'), fields['file'])
insert_song(song, checkpoint, fields=fields)
count += 1
print '%s songs inserted, checkpoint=%s' % (count, checkpoint)
# Another utility method, for when songs have outlived their welcome
def purge_checkpoint(checkpoint):
count = 0
for song in db.songs.find({'checkpoint': {'$eq': checkpoint}}):
db.songs.remove(song)
count += 1
db.checkpoints.remove({'name': checkpoint})
print '%s songs removed, checkpoint %s purged' % (count, checkpoint)
if __name__ == '__main__':
import sys
min_ = 100
if len(sys.argv) > 1:
cp_path = sys.argv[1]
if len(sys.argv) > 2:
min_ = int(sys.argv[2])
full_cp_path = os.path.join(CHAR_RNN_DIR, 'cv', cp_path)
if not os.path.isfile(full_cp_path):
print 'Path %s for checkpoint file could not be found'
print 'usage: produce.py <path> [num per temp]'
sys.exit(1)
fill_all_temps(cp_path, min_)
|
{
"content_hash": "bd0e12cdd27973762e0bd62821f9a321",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 80,
"avg_line_length": 28.88235294117647,
"alnum_prop": 0.6177189409368635,
"repo_name": "audiodude/neural-noise",
"id": "202ce42296a70710bd3455b22801ea46750a26cc",
"size": "4910",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "producer/produce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "923909"
},
{
"name": "HTML",
"bytes": "12376"
},
{
"name": "JavaScript",
"bytes": "170956"
},
{
"name": "Python",
"bytes": "17548"
},
{
"name": "Shell",
"bytes": "334"
}
],
"symlink_target": ""
}
|
"""This code example gets all content.
This feature is only available to DFP video publishers.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
content_service = client.GetService('ContentService', version='v201502')
# Create statement object to select all content.
statement = dfp.FilterStatement()
# Get content by statement.
while True:
response = content_service.getContentByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for content_item in response['results']:
print ('Content with id \'%s\', name \'%s\', and status \'%s\' was '
'found.' % (content_item['id'], content_item['name'],
content_item['status']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
{
"content_hash": "49ca55fe2bfd2495f83a7ea18580da0a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 31.3953488372093,
"alnum_prop": 0.6874074074074074,
"repo_name": "wubr2000/googleads-python-lib",
"id": "c22cf0bbd34433c0d122100737b9aad2690b95c1",
"size": "1968",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201502/content_service/get_all_content.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0009_auto_20161026_0651'),
]
operations = [
migrations.AddField(
model_name='cheatcode',
name='admin_only',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "926e93eb06af6f9e567335c9a9190b78",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 21.27777777777778,
"alnum_prop": 0.5953002610966057,
"repo_name": "ej2/pixelpuncher",
"id": "9f07b18daeed66a1cbfcc998a8d8e9a3947957da",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pixelpuncher/game/migrations/0010_cheatcode_admin_only.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "155880"
},
{
"name": "HTML",
"bytes": "108414"
},
{
"name": "JavaScript",
"bytes": "29178"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "282954"
},
{
"name": "Shell",
"bytes": "4535"
}
],
"symlink_target": ""
}
|
from stargazer import StarGazer
__all__ = [ StarGazer ]
|
{
"content_hash": "b5a75f8a79f13d18cc2c08fec574e57d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 31,
"avg_line_length": 19,
"alnum_prop": 0.7017543859649122,
"repo_name": "personalrobotics/stargazer",
"id": "07a760af7af3f60f23f58b79759fe9b4adde2db8",
"size": "57",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/stargazer/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "230"
},
{
"name": "Python",
"bytes": "19678"
}
],
"symlink_target": ""
}
|
import json
from django.core.urlresolvers import reverse
from rest_framework import serializers
import mkt
from mkt.access import acl
from mkt.api.fields import ReverseChoiceField
from mkt.files.models import FileUpload
from mkt.webapps.models import Preview, Webapp
class AppStatusSerializer(serializers.ModelSerializer):
status = ReverseChoiceField(choices_dict=mkt.STATUS_CHOICES_API,
required=False)
disabled_by_user = serializers.BooleanField(required=False)
allowed_statuses = {
# You can push to the pending queue.
mkt.STATUS_NULL: [mkt.STATUS_PENDING],
# Approved apps can be public or unlisted.
mkt.STATUS_APPROVED: [mkt.STATUS_PUBLIC, mkt.STATUS_UNLISTED],
# Public apps can choose to become private (APPROVED) or unlisted.
mkt.STATUS_PUBLIC: [mkt.STATUS_APPROVED, mkt.STATUS_UNLISTED],
# Unlisted apps can become private or public.
mkt.STATUS_UNLISTED: [mkt.STATUS_APPROVED, mkt.STATUS_PUBLIC],
}
class Meta:
model = Webapp
fields = ('status', 'disabled_by_user')
def validate_status(self, status):
# Admins can change any status, skip validation for them.
# It's dangerous, but with great powers comes great responsability.
if ('request' in self.context and self.context['request'].user and
acl.action_allowed(self.context['request'], 'Admin', '%')):
return status
# An incomplete app's status can not be changed.
if not self.instance.is_fully_complete():
raise serializers.ValidationError(
self.instance.completion_error_msgs())
# Only some specific changes are possible depending on the app current
# status.
if (self.instance.status not in self.allowed_statuses or
status not in self.allowed_statuses[self.instance.status]):
raise serializers.ValidationError(
'App status can not be changed to the one you specified.')
return status
class FileUploadSerializer(serializers.ModelSerializer):
id = serializers.CharField(source='pk', read_only=True)
processed = serializers.BooleanField(read_only=True)
class Meta:
model = FileUpload
fields = ('id', 'processed', 'valid', 'validation')
def to_representation(self, obj):
data = super(FileUploadSerializer, self).to_representation(obj)
if obj.validation:
data['validation'] = json.loads(obj.validation)
return data
class PreviewSerializer(serializers.ModelSerializer):
filetype = serializers.CharField()
id = serializers.IntegerField(source='pk')
image_url = serializers.CharField(read_only=True)
resource_uri = serializers.SerializerMethodField()
thumbnail_url = serializers.CharField(read_only=True)
class Meta:
model = Preview
fields = ['filetype', 'image_url', 'id', 'resource_uri',
'thumbnail_url']
def get_resource_uri(self, obj):
if obj:
return reverse('app-preview-detail', kwargs={'pk': obj})
class SimplePreviewSerializer(PreviewSerializer):
class Meta(PreviewSerializer.Meta):
fields = ['filetype', 'id', 'image_url', 'thumbnail_url']
class FeedPreviewESSerializer(PreviewSerializer):
"""
Preview serializer for feed where we want to know the image orientation to
scale feed app tiles appropriately.
"""
id = serializers.IntegerField()
thumbnail_size = serializers.ReadOnlyField()
class Meta(PreviewSerializer.Meta):
fields = ['id', 'thumbnail_size', 'thumbnail_url']
|
{
"content_hash": "c34a15510bfeb348513df76326d97c0e",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 78,
"avg_line_length": 35.96078431372549,
"alnum_prop": 0.6706652126499455,
"repo_name": "ingenioustechie/zamboni",
"id": "8d339cae0098bdc5441308b7259204de967dc0a8",
"size": "3668",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "mkt/submit/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "354315"
},
{
"name": "HTML",
"bytes": "2379391"
},
{
"name": "JavaScript",
"bytes": "529996"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4672122"
},
{
"name": "Shell",
"bytes": "11147"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
}
|
"""This module contains all functionality on the level of a single audio file.
"""
import errno
import os
import re
import shutil
import traceback
from typing import Any, Dict, List, Literal, Optional
import phrydy
from tmep import Functions, Template
from tmep.format import asciify, delchars, deldupchars, replchars
from .job import Job
from .meta import Meta, compare_dicts
DestinationType = Literal["source", "target"]
class AudioFile:
"""
:param path: The path string of the audio file.
:param job: The current `job` object.
:param string file_type: Either “source” or “target”.
:param string prefix: The path prefix of the audio file, for example the
base folder of your music collection. Used to shorten the path strings
in the progress messaging.
"""
__path: str
type: DestinationType
job: Job
__prefix: Optional[str]
def __init__(
self,
path: str,
job: Job,
file_type: DestinationType = "source",
prefix: Optional[str] = None,
):
self.__path = path
self.type = file_type
self.job = job
self.__prefix = prefix
self.shorten_symbol = "[…]"
@property
def shell_friendly(self):
if not self.job:
return True
else:
return self.job.template_settings.shell_friendly
@property
def meta(self) -> Optional[Meta]:
if self.exists:
try:
return Meta(self.abspath, self.shell_friendly)
except Exception as e:
tb = traceback.TracebackException.from_exception(e)
print("".join(tb.stack.format()))
@property
def abspath(self) -> str:
"""The absolute path of the audio file."""
return os.path.abspath(self.__path)
@property
def prefix(self):
if self.__prefix and len(self.__prefix) > 1:
if self.__prefix[-1] != os.path.sep:
return self.__prefix + os.path.sep
else:
return self.__prefix
@property
def exists(self):
return os.path.exists(self.abspath)
@property
def extension(self) -> str:
"""The file extension of the audio file."""
return self.abspath.split(".")[-1].lower()
@property
def short(self) -> str:
if self.prefix:
short = self.abspath.replace(self.prefix, "")
else:
short = os.path.basename(self.abspath)
return self.shorten_symbol + short
@property
def filename(self) -> str:
"""The file name of the audio file."""
return os.path.basename(self.abspath)
@property
def dir_and_file(self) -> str:
"""The parent directory name and the file name."""
path_segments = self.abspath.split(os.path.sep)
return os.path.sep.join(path_segments[-2:])
class MBTrackListing:
def __init__(self):
self.counter = 0
def format_audiofile(self, album: str, title: str, length: int) -> str:
self.counter += 1
m, s = divmod(length, 60)
mmss = "{:d}:{:02d}".format(int(m), int(s))
output = "{:d}. {:s}: {:s} ({:s})".format(self.counter, album, title, mmss)
output = output.replace("Op.", "op.")
return output.replace("- ", "")
mb_track_listing = MBTrackListing()
def find_target_path(target: str, extensions: List[str]) -> Optional[str]:
"""Get the path of a existing audio file target. Search for audio files
with different extensions.
"""
target = os.path.splitext(target)[0]
for extension in extensions:
audio_file = target + "." + extension
if os.path.exists(audio_file):
return audio_file
def detect_best_format(source: Meta, target: Meta, job: Job) -> DestinationType:
"""
:param source: The metadata object of the source file.
:param target: The metadata object of the target file.
:param job: The `job` object.
:return: Either the string `source` or the string `target`
"""
def get_highest(dictionary: Dict[Any, DestinationType]) -> DestinationType:
out: DestinationType = "target"
for _, value in sorted(dictionary.items()):
out = value
return out
if source.format == target.format:
bitrates: Dict[int, Literal["source", "target"]] = {}
bitrates[source.bitrate] = "source"
bitrates[target.bitrate] = "target"
best = get_highest(bitrates)
job.msg.best_format(best, "bitrate", source, target)
return best
else:
# All types:
#
# 'aac'
# 'aiff'
# 'alac': Apple Lossless Audio Codec (losless)
# 'ape'
# 'asf'
# 'dsf'
# 'flac'
# 'mp3'
# 'mpc'
# 'ogg'
# 'opus'
# 'wv': WavPack (losless)
ranking = {
"flac": 10,
"alac": 9,
"aac": 8,
"mp3": 5,
"ogg": 2,
"wma": 1,
}
types = {}
types[ranking[source.type] if source.type in ranking else 0] = "source"
types[ranking[target.type] if target.type in ranking else 0] = "target"
best = get_highest(types)
job.msg.best_format(best, "type", source, target)
return best
def process_target_path(meta: Meta, format_string: str, shell_friendly: bool = True):
"""
:param dict meta: The to a dictionary converted attributes of a
meta object :class:`audiorename.meta.Meta`.
:param string format_string:
:param boolean shell_friendly:
"""
template = Template(format_string)
functions = Functions(meta)
target = template.substitute(meta, functions.functions())
if isinstance(target, str):
if shell_friendly:
target = asciify(target)
target = delchars(target, "().,!\"'’")
target = replchars(target, "-", " ")
# asciify generates new characters which must be sanitzed, e. g.:
# ¿ -> ?
target = delchars(target, ':*?"<>|\\~&{}')
target = deldupchars(target)
return re.sub(r"\.$", "", target)
class Action:
"""
:param job: The `job` object.
:type job: audiorename.job.Job
"""
job: Job
def __init__(self, job: Job):
self.job = job
self.dry_run = job.rename.dry_run
def count(self, counter_name: str):
self.job.stats.counter.count(counter_name)
def cleanup(self, audio_file: AudioFile):
if self.job.rename.cleaning_action == "backup":
self.backup(audio_file)
elif self.job.rename.cleaning_action == "delete":
self.delete(audio_file)
def backup(self, audio_file: AudioFile):
backup_file = AudioFile(
os.path.join(
self.job.rename.backup_folder, os.path.basename(audio_file.abspath)
),
job=self.job,
file_type="target",
)
self.job.msg.action_two_path("Backup", audio_file, backup_file)
self.count("backup")
if not self.dry_run:
self.create_dir(backup_file)
shutil.move(audio_file.abspath, backup_file.abspath)
def copy(self, source: AudioFile, target: AudioFile):
self.job.msg.action_two_path("Copy", source, target)
self.count("copy")
if not self.dry_run:
self.create_dir(target)
shutil.copy2(source.abspath, target.abspath)
def create_dir(self, audio_file: AudioFile):
path = os.path.dirname(audio_file.abspath)
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def delete(self, audio_file: AudioFile):
self.job.msg.action_one_path("Delete", audio_file)
self.count("delete")
if not self.dry_run:
os.remove(audio_file.abspath)
def move(self, source: AudioFile, target: AudioFile):
self.job.msg.action_two_path("Move", source, target)
self.count("move")
if not self.dry_run:
self.create_dir(target)
shutil.move(source.abspath, target.abspath)
def metadata(
self, audio_file: AudioFile, enrich: bool = False, remap: bool = False
) -> None:
if not audio_file.meta:
raise Exception("The given audio file has no meta property.")
meta = audio_file.meta
pre = meta.export_dict(sanitize=False)
def single_action(
meta: Meta,
method_name: Literal["enrich_metadata", "remap_classical"],
message: str,
):
pre = meta.export_dict(sanitize=False)
method = getattr(meta, method_name)
method()
post = meta.export_dict(sanitize=False)
diff = compare_dicts(pre, post)
if diff:
self.count(method_name)
self.job.msg.output(message)
for change in diff:
self.job.msg.diff(change[0], change[1], change[2])
if enrich:
single_action(meta, "enrich_metadata", "Enrich metadata")
if remap:
single_action(meta, "remap_classical", "Remap classical")
post = meta.export_dict(sanitize=False)
diff = compare_dicts(pre, post)
if not self.dry_run and diff:
meta.save()
def do_job_on_audiofile(source_path: str, job: Job):
def count(key: str):
job.stats.counter.count(key)
skip = False
action = Action(job)
source = AudioFile(source_path, job=job, prefix=os.getcwd(), file_type="source")
if not job.cli_output.mb_track_listing:
job.msg.next_file(source)
if not source.meta:
skip = True
##
# Skips
##
if skip:
job.msg.status("Broken file", status="error")
count("broken_file")
return
##
# Output only
##
if not source.meta:
raise Exception("source.meta must not be empty.")
if job.cli_output.mb_track_listing:
print(
mb_track_listing.format_audiofile(
source.meta.album, source.meta.title, source.meta.length
)
)
return
if job.cli_output.debug:
phrydy.print_debug(
source.abspath,
Meta,
Meta.fields,
job.cli_output.color,
)
return
if job.filters.field_skip and (
not hasattr(source.meta, job.filters.field_skip)
or not getattr(source.meta, job.filters.field_skip)
):
job.msg.status("No field", status="error")
count("no_field")
return
##
# Metadata actions
##
if job.metadata_actions.remap_classical or job.metadata_actions.enrich_metadata:
action.metadata(
source,
job.metadata_actions.enrich_metadata,
job.metadata_actions.remap_classical,
)
if (
source.meta.genre is not None
and getattr(source.meta, "genre", "").lower() in job.filters.genre_classical
):
if not job.metadata_actions.remap_classical:
action.metadata(source, job.metadata_actions.enrich_metadata, True)
##
# Rename action
##
if job.rename.move_action != "no_rename":
if (
source.meta.genre is not None
and getattr(source.meta, "genre", "").lower() in job.filters.genre_classical
):
format_string = job.path_templates.classical
elif source.meta.ar_combined_soundtrack:
if job.args.no_soundtrack and source.meta.comp:
format_string = job.path_templates.compilation
else:
format_string = job.path_templates.soundtrack
elif source.meta.comp:
format_string = job.path_templates.compilation
else:
format_string = job.path_templates.default
meta_dict = source.meta.export_dict()
desired_target_path = process_target_path(
meta_dict, format_string, job.template_settings.shell_friendly
)
# Remove the leading path separator to prevent the audio files from
# ending up in a folder other than the target folder.
desired_target_path = re.sub(r"^" + os.path.sep + r"+", "", desired_target_path)
desired_target_path = os.path.join(
job.selection.target, desired_target_path + "." + source.extension
)
desired_target = AudioFile(
desired_target_path,
job=job,
prefix=job.selection.target,
file_type="target",
)
# Do nothing
if source.abspath == desired_target.abspath:
job.msg.status("Renamed", status="ok")
count("renamed")
return
# Search existing target
target = False
target_path = find_target_path(desired_target.abspath, job.filters.extension)
if target_path:
target = AudioFile(
target_path, job=job, prefix=job.selection.target, file_type="target"
)
# Both file exist
if target:
if not target.meta:
raise Exception("target.meta must not be empty.")
best = detect_best_format(source.meta, target.meta, job)
if job.rename.cleaning_action:
# delete source
if not job.rename.best_format or (
job.rename.best_format and best == "target"
):
action.cleanup(source)
# delete target
if job.rename.best_format and best == "source":
action.cleanup(target)
# Unset target object to trigger copy or move actions.
target = None
if target:
job.msg.status("Exists", status="error")
# copy
elif job.rename.move_action == "copy":
action.copy(source, desired_target)
# move
elif job.rename.move_action == "move":
action.move(source, desired_target)
|
{
"content_hash": "ca7d63d6e2d3627dc11e9f6ae3a3e6e9",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 88,
"avg_line_length": 29.62133891213389,
"alnum_prop": 0.5689667349389081,
"repo_name": "Josef-Friedrich/audiorename",
"id": "5e7bcdadd8e48a1de0b77576616d2c4e6885430a",
"size": "14172",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "audiorename/audiofile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "258342"
}
],
"symlink_target": ""
}
|
"""
====================
Biadjacency matrices
====================
"""
# Copyright (C) 2013-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import itertools
from networkx.convert import _prep_create_using
from networkx.convert_matrix import _generate_weighted_edges
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
'Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['biadjacency_matrix','from_biadjacency_matrix']
def biadjacency_matrix(G, row_order, column_order=None,
dtype=None, weight='weight', format='csr'):
r"""Return the biadjacency matrix of the bipartite graph G.
Let `G = (U, V, E)` be a bipartite graph with node sets
`U = u_{1},...,u_{r}` and `V = v_{1},...,v_{s}`. The biadjacency
matrix [1]_ is the `r` x `s` matrix `B` in which `b_{i,j} = 1`
if, and only if, `(u_i, v_j) \in E`. If the parameter `weight` is
not `None` and matches the name of an edge attribute, its value is
used instead of 1.
Parameters
----------
G : graph
A NetworkX graph
row_order : list of nodes
The rows of the matrix are ordered according to the list of nodes.
column_order : list, optional
The columns of the matrix are ordered according to the list of nodes.
If column_order is None, then the ordering of columns is arbitrary.
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None, optional (default='weight')
The edge data key used to provide each value in the matrix.
If None, then each edge has weight 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [2]_ for details.
Returns
-------
M : SciPy sparse matrix
Biadjacency matrix representation of the bipartite graph G.
Notes
-----
No attempt is made to check that the input graph is bipartite.
For directed bipartite graphs only successors are considered as neighbors.
To obtain an adjacency matrix with ones (or weight values) for both
predecessors and successors you have to generate two biadjacency matrices
where the rows of one of them are the columns of the other, and then add
one to the transpose of the other.
See Also
--------
adjacency_matrix
from_biadjacency_matrix
References
----------
.. [1] http://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph
.. [2] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
nlen = len(row_order)
if nlen == 0:
raise nx.NetworkXError("row_order is empty list")
if len(row_order) != len(set(row_order)):
msg = "Ambiguous ordering: `row_order` contained duplicates."
raise nx.NetworkXError(msg)
if column_order is None:
column_order = list(set(G) - set(row_order))
mlen = len(column_order)
if len(column_order) != len(set(column_order)):
msg = "Ambiguous ordering: `column_order` contained duplicates."
raise nx.NetworkXError(msg)
row_index = dict(zip(row_order, itertools.count()))
col_index = dict(zip(column_order, itertools.count()))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((row_index[u],col_index[v],d.get(weight,1))
for u,v,d in G.edges(row_order,data=True)
if u in row_index and v in col_index))
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,mlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def from_biadjacency_matrix(A, create_using=None, edge_attribute='weight'):
r"""Creates a new bipartite graph from a biadjacency matrix given as a
SciPy sparse matrix.
Parameters
----------
A: scipy sparse matrix
A biadjacency matrix representation of a graph
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
The nodes are labeled with the attribute `bipartite` set to an integer
0 or 1 representing membership in part 0 or part 1 of the bipartite graph.
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph` and the entries of `A` are of type ``int``,
then this function returns a multigraph (of the same type as
`create_using`) with parallel edges. In this case, `edge_attribute` will be
ignored.
See Also
--------
biadjacency_matrix
from_numpy_matrix
References
----------
[1] http://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph
"""
G = _prep_create_using(create_using)
n, m = A.shape
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n), bipartite=0)
G.add_nodes_from(range(n,n+m), bipartite=1)
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = ((u, n+v, d) for (u, v, d) in _generate_weighted_edges(A))
# If the entries in the adjacency matrix are integers and the graph is a
# multigraph, then create parallel edges, each with weight 1, for each
# entry in the adjacency matrix. Otherwise, create one edge for each
# positive entry in the adjacency matrix and set the weight of that edge to
# be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph():
chain = itertools.chain.from_iterable
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import scipy
except:
raise SkipTest("SciPy not available")
|
{
"content_hash": "a36823ee9754ca77addaec0c02634b4f",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 94,
"avg_line_length": 37.45454545454545,
"alnum_prop": 0.6401699029126213,
"repo_name": "SanketDG/networkx",
"id": "496ed21810cd85aefe3eb2536697e4732e607808",
"size": "6616",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "networkx/algorithms/bipartite/matrix.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "PowerShell",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "3167640"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import importlib
import androguard.session as session_module
from androguard.gui.DataModel import *
from androguard.gui.apiwindow import APIWindow
from androguard.gui.binwindow import binWidget
from androguard.gui.fileloading import FileLoadingThread
from androguard.gui.helpers import class2func
from androguard.gui.methodswindow import MethodsWindow
from androguard.gui.resourceswindow import ResourcesWindow
from androguard.gui.sourcewindow import SourceWindow
from androguard.gui.stringswindow import StringsWindow
from androguard.gui.treewindow import TreeWindow
import os
import logging
log = logging.getLogger("androguard.gui")
class TabsWindow(QtWidgets.QTabWidget):
def __init__(self, bin_windows, parent=None):
super(TabsWindow, self).__init__(parent)
self.bin_windows = bin_windows
self.setTabsClosable(True)
self.tabCloseRequested.connect(self.tabCloseRequestedHandler)
self.currentChanged.connect(self.currentTabChanged)
self.closeAllTabs = QtWidgets.QAction(
"Close all tabs",
self,
triggered=self.actioncloseAllTabs)
self.closeOtherTabs = QtWidgets.QAction(
"Close other tabs",
self,
triggered=self.actioncloseOtherTabs)
self.closeLeftTabs = QtWidgets.QAction(
"Close left tabs",
self,
triggered=self.actioncloseLeftTabs)
self.closeRightTabs = QtWidgets.QAction(
"Close right tabs",
self,
triggered=self.actioncloseRightTabs)
def actioncloseAllTabs(self):
self.clear()
def actioncloseOtherTabs(self):
for i in range(self.currentIndex() - 1, -1, -1):
self.removeTab(i)
for i in range(self.count(), self.currentIndex(), -1):
self.removeTab(i)
def actioncloseLeftTabs(self):
for i in range(self.currentIndex() - 1, -1, -1):
self.removeTab(i)
def actioncloseRightTabs(self):
for i in range(self.count(), self.currentIndex(), -1):
self.removeTab(i)
def tabCloseRequestedHandler(self, index):
self.removeTab(index)
def currentTabChanged(self, index):
log.debug("curentTabChanged -> %d (%s)" % (index, self.tabToolTip(index)))
if index == -1:
return
current_title = self.tabToolTip(index)
for title in self.bin_windows:
if title != current_title:
log.debug("Disable %s" % title)
self.bin_windows[title].disable()
if current_title in self.bin_windows:
log.debug("Enable %s" % title)
self.bin_windows[current_title].enable()
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu(self)
menu.addAction(self.closeAllTabs)
menu.addAction(self.closeOtherTabs)
menu.addAction(self.closeLeftTabs)
menu.addAction(self.closeRightTabs)
menu.exec_(event.globalPos())
class MainWindow(QtWidgets.QMainWindow):
"""Main window:
self.central: QTabWidget in center area
self.dock: QDockWidget in left area
self.tree: TreeWindow(QTreeWidget) in self.dock
"""
def __init__(self, parent=None, session=session_module.Session(), input_file=None, input_plugin=None):
super(MainWindow, self).__init__(parent)
self.session = session
self.bin_windows = {}
self.setupFileMenu()
self.setupViewMenu()
self.setupPluginsMenu()
self.setupHelpMenu()
self.setupCentral()
self.setupEmptyTree()
self.setupDock()
self.setupSession()
self.setWindowTitle("Androguard GUI")
self.showStatus("Androguard GUI")
self.installEventFilter(self)
self.input_plugin = input_plugin
if input_file:
self._openFile(input_file)
root = os.path.dirname(os.path.realpath(__file__))
self.setWindowIcon(QtGui.QIcon(os.path.join(root, "androguard.ico")))
def eventFilter(self, watched, event):
for bin_window in list(self.bin_windows.values()):
bin_window.eventFilter(watched, event)
return False
def showStatus(self, msg):
"""Helper function called by any window to display a message
in status bar.
"""
log.debug(msg)
self.statusBar().showMessage(msg)
def about(self):
"""User clicked About menu. Display a Message box."""
QtWidgets.QMessageBox.about(self, "About Androguard GUI",
"<p><b>Androguard GUI</b> is basically a GUI for Androguard :)." \
"<br>Have fun !</p>")
def setupSession(self):
log.debug("Setup Session")
self.fileLoadingThread = FileLoadingThread(self)
self.fileLoadingThread.file_loaded.connect(self.loadedFile)
def loadedFile(self, success):
if not success:
self.showStatus("Analysis of %s failed :(" %
str(self.fileLoadingThread.file_path))
return
self.updateDockWithTree()
self.cleanCentral()
self.showStatus("Analysis of %s done!" %
str(self.fileLoadingThread.file_path))
if self.input_plugin:
self._runPlugin(self.input_plugin)
def openFile(self):
self.session.reset()
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Open File", '.',
"Android Files (*.apk *.jar *.dex *.odex *.dey);;Androguard Session (*.ag)")
self._openFile(filepath)
def _openFile(self, filepath=None):
if filepath:
self.setupTree()
self.showStatus("Analyzing %s..." % str(filepath))
self.fileLoadingThread.load(filepath)
def addFile(self):
if not self.session.isOpen():
return
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Add File", '',
"Android Files (*.apk *.jar *.dex *.odex *.dey)")
if filepath:
self.showStatus("Analyzing %s..." % str(filepath))
self.fileLoadingThread.load(filepath)
def saveFile(self):
"""User clicked Save menu. Display a Dialog to ask whwre to save."""
filepath, _ = QtWidgets.QFileDialog.getSaveFileName(
self, "Save File", '', "Androguard Session (*.ag)")
if filepath:
self.showStatus("Saving %s..." % str(filepath))
self.saveSession(filepath)
def saveSession(self, filepath):
"""Save androguard session."""
try:
session_module.Save(self.session, filepath)
except RuntimeError as e:
log.error(str(e))
os.remove(filepath)
log.warning("Session not saved")
def _runPlugin(self, filepath):
log.debug("RUN plugin from %s" % filepath)
module_name = os.path.splitext(os.path.basename(filepath))[0]
f, filename, description = importlib.find_module(
module_name,
[os.path.dirname(filepath)])
print(f, filename, description)
mod = importlib.load_module(module_name, f, filename, description)
mod.PluginEntry(self.session)
def openRunPluginWindow(self):
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Open File", '',
"Python Files (*.py);;")
if filepath:
self._runPlugin(filepath)
def closeEvent(self, event):
"""Clicked [x] to close main window"""
event.accept()
def setupEmptyTree(self):
"""Setup empty Tree at startup. """
if hasattr(self, "tree"):
del self.tree
self.tree = QtWidgets.QTreeWidget(self)
self.tree.header().close()
def setupDock(self):
"""Setup empty Dock at startup. """
self.dock = QtWidgets.QDockWidget("Classes", self)
self.dock.setWidget(self.tree)
self.dock.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock)
def setupTree(self):
log.debug("Setup Tree")
self.tree = TreeWindow(win=self, session=self.session)
self.tree.setWindowTitle("Tree model")
self.dock.setWidget(self.tree)
def setupCentral(self):
"""Setup empty window supporting tabs at startup. """
self.central = TabsWindow(self.bin_windows, self)
self.setCentralWidget(self.central)
def cleanCentral(self):
self.central.actioncloseAllTabs()
def setupFileMenu(self):
log.debug("Setup File Menu")
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction("&Open...", self.openFile, "Ctrl+O")
self.fileMenu.addAction("&Add...", self.addFile, "Ctrl+A")
self.fileMenu.addAction("&Save...", self.saveFile, "Ctrl+S")
self.fileMenu.addAction("E&xit", self.close, "Ctrl+Q")
def setupViewMenu(self):
log.debug("Setup View Menu")
self.viewMenu = self.menuBar().addMenu("&View")
self.viewMenu.addAction("&Strings...", self.openStringsWindow)
self.viewMenu.addAction("&Methods...", self.openMethodsWindow)
self.viewMenu.addAction("&API...", self.openAPIWindow)
self.viewMenu.addAction("&APK...", self.openApkWindow)
self.viewMenu.addAction("&Resources...", self.openResourcesWindow)
def setupPluginsMenu(self):
log.debug("Setup Plugins Menu")
self.pluginsMenu = self.menuBar().addMenu("&Plugins")
self.pluginsMenu.addAction("&Run...", self.openRunPluginWindow)
def setupHelpMenu(self):
log.debug("Setup Help Menu")
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction("&About", self.about)
self.helpMenu.addAction("About &Qt", QtWidgets.qApp.aboutQt)
def updateDockWithTree(self, empty=False):
"""Update the classes tree. Called when
- a new APK has been imported
- a classe has been renamed (displayed in the tree)
"""
self.setupTree()
self.tree.fill()
def openStringsWindow(self):
stringswin = StringsWindow(win=self, session=self.session)
self.central.addTab(stringswin, stringswin.title)
self.central.setTabToolTip(self.central.indexOf(stringswin),
stringswin.title)
self.central.setCurrentWidget(stringswin)
def openMethodsWindow(self):
methodswin = MethodsWindow(win=self, session=self.session)
self.central.addTab(methodswin, methodswin.title)
self.central.setTabToolTip(self.central.indexOf(methodswin),
methodswin.title)
self.central.setCurrentWidget(methodswin)
def openResourcesWindow(self):
resourceswin = ResourcesWindow(win=self, session=self.session)
self.central.addTab(resourceswin, resourceswin.title)
self.central.setTabToolTip(self.central.indexOf(resourceswin),
resourceswin.title)
self.central.setCurrentWidget(resourceswin)
def openAPIWindow(self):
apiwin = APIWindow(win=self, session=self.session)
self.central.addTab(apiwin, apiwin.title)
self.central.setTabToolTip(self.central.indexOf(apiwin),
apiwin.title)
self.central.setCurrentWidget(apiwin)
def openApkWindow(self):
log.debug("openApkWindow for %s" % self.session.analyzed_apk)
bin_window = binWidget(self, ApkModel(self.session.get_objects_apk(self.fileLoadingThread.file_path)[0]), "APK")
bin_window.activateWindow()
self.central.addTab(bin_window, bin_window.title)
self.central.setCurrentWidget(bin_window)
self.bin_windows[bin_window.title] = bin_window
def openBinWindow(self, current_class):
log.debug("openBinWindow for %s" % current_class)
bin_window = self.getMeOpenedWindowIfExists(current_class.current_title)
if not bin_window:
bin_window = binWidget(self, DexClassModel(current_class), current_class.get_name())
bin_window.activateWindow()
self.central.addTab(bin_window, current_class.current_title)
self.central.setTabToolTip(self.central.indexOf(bin_window),
current_class.current_title)
self.bin_windows[current_class.current_title] = bin_window
bin_window.enable()
self.central.setCurrentWidget(bin_window)
def openSourceWindow(self, current_class, method=None):
"""Main function to open a decompile source window
It checks if it already opened and open that tab,
otherwise, initialize a new window.
"""
log.debug("openSourceWindow for %s" % current_class)
sourcewin = self.getMeOpenedWindowIfExists(current_class.current_title + "(S)")
if not sourcewin:
current_filename = self.session.get_filename_by_class(current_class)
current_digest = self.session.get_digest_by_class(current_class)
sourcewin = SourceWindow(win=self,
current_class=current_class,
current_title=current_class.current_title + "(S)",
current_filename=current_filename,
current_digest=current_digest,
session=self.session)
sourcewin.reload_java_sources()
self.central.addTab(sourcewin, sourcewin.title)
self.central.setTabToolTip(self.central.indexOf(sourcewin),
sourcewin.title)
if method:
sourcewin.browse_to_method(method)
self.central.setCurrentWidget(sourcewin)
def getMeOpenedWindowIfExists(self, name):
for idx in range(self.central.count()):
if name == self.central.tabToolTip(idx):
log.debug("Tab %s already opened at: %d" %
(name, idx))
return self.central.widget(idx)
return None
def doesClassExist(self, path):
arg = class2func(path)
try:
getattr(self.d, arg)
except AttributeError:
return False
return True
|
{
"content_hash": "5348ce44df65cc1115135a8f43c0adee",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 120,
"avg_line_length": 36.58186397984887,
"alnum_prop": 0.6165392825173862,
"repo_name": "huangtao2003/androguard",
"id": "2df00959ca8b7b83109c63e72372bb6eca157942",
"size": "14523",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "androguard/gui/mainwindow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1130199"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os, sys
def replace_code(start, txt1, txt2, extn, search=None, force=False):
"""replace all txt1 by txt2 in files with extension (extn)"""
import webnotes.utils
import os, re
esc = webnotes.utils.make_esc('[]')
if not search: search = esc(txt1)
for wt in os.walk(start, followlinks=1):
for fn in wt[2]:
if fn.split('.')[-1]==extn:
fpath = os.path.join(wt[0], fn)
with open(fpath, 'r') as f:
content = f.read()
if re.search(search, content):
res = search_replace_with_prompt(fpath, txt1, txt2, force)
if res == 'skip':
return 'skip'
def search_replace_with_prompt(fpath, txt1, txt2, force=False):
""" Search and replace all txt1 by txt2 in the file with confirmation"""
from termcolor import colored
with open(fpath, 'r') as f:
content = f.readlines()
tmp = []
for c in content:
if c.find(txt1) != -1:
print fpath
print colored(txt1, 'red').join(c[:-1].split(txt1))
a = ''
if force:
c = c.replace(txt1, txt2)
else:
while a.lower() not in ['y', 'n', 'skip']:
a = raw_input('Do you want to Change [y/n/skip]?')
if a.lower() == 'y':
c = c.replace(txt1, txt2)
elif a.lower() == 'skip':
return 'skip'
tmp.append(c)
with open(fpath, 'w') as f:
f.write(''.join(tmp))
print colored('Updated', 'green')
def pull(remote, branch, build=False):
os.system('cd lib && git pull %s %s' % (remote, branch))
os.system('cd app && git pull %s %s' % (remote, branch))
if build: rebuild()
def rebuild():
# build js / css
from webnotes import build
build.bundle(False)
def apply_latest_patches():
import webnotes.modules.patch_handler
webnotes.modules.patch_handler.run_all()
print '\n'.join(webnotes.modules.patch_handler.log_list)
def sync_all(force=0):
import webnotes.model.sync
webnotes.model.sync.sync_all(force)
def update_erpnext(remote='origin', branch='master'):
pull(remote, branch)
from webnotes.utils import execute_in_shell
execute_in_shell("lib/wnf.py --patch_sync_build", verbose=1)
def patch_sync_build():
patch_sync()
rebuild()
def patch_sync():
apply_latest_patches()
import webnotes.modules.patch_handler
for l in webnotes.modules.patch_handler.log_list:
if "failed: STOPPED" in l:
return
sync_all()
clear_cache()
def clear_cache():
import webnotes.sessions
webnotes.sessions.clear_cache()
def append_future_import():
"""appends from __future__ import unicode_literals to py files if necessary"""
import os
import conf
conf_path = os.path.abspath(conf.__file__)
if conf_path.endswith("pyc"):
conf_path = conf_path[:-1]
base_path = os.path.dirname(conf_path)
for path, folders, files in os.walk(base_path):
for f in files:
if f.endswith('.py'):
file_path = os.path.join(path, f)
with open(file_path, 'r') as pyfile:
content = pyfile.read()
future_import = 'from __future__ import unicode_literals'
if future_import in content: continue
content = content.split('\n')
idx = -1
for c in content:
idx += 1
if c and not c.startswith('#'):
break
content.insert(idx, future_import)
content = "\n".join(content)
with open(file_path, 'w') as pyfile:
pyfile.write(content)
def setup_options():
from optparse import OptionParser
parser = OptionParser()
# install
parser.add_option('--install', nargs=2, metavar = "NEW_DB_NAME SOURCE_PATH",
help="install db")
parser.add_option('--install_fresh', nargs=1, metavar = "NEW_DB_NAME",
help="install fresh db")
parser.add_option('--reinstall', default=False, action="store_true",
help="install fresh db in db_name specified in conf.py")
parser.add_option('--make_demo', default=False, action="store_true",
help="install in database 'demo'")
parser.add_option('--make_demo_fresh', default=False, action="store_true",
help="install in database 'demo'")
# update
parser.add_option("-u", "--update",
help="Pull, run latest patches and sync all",
default=False, action="store_true", metavar="ORIGIN BRANCH")
parser.add_option("--backup", help="Takes backup of database in backup folder",
default=False, action="store_true")
# build
parser.add_option("-b", "--build", default=False, action="store_true",
help="minify + concat js files")
parser.add_option("-w", "--watch", default=False, action="store_true",
help="watch and minify + concat js files, if necessary")
parser.add_option("--no_cms", default=False, action="store_true",
help="do not build wn-web.js and wn-css.js")
parser.add_option("--docs", default=False, action="store_true",
help="Build docs")
parser.add_option("-d", "--db",
dest="db_name",
help="Apply the patches on given db")
parser.add_option("--password",
help="Password for given db", nargs=1)
parser.add_option("--root_password",
help="Password for mysql root user", nargs=1)
parser.add_option("--clear_web", default=False, action="store_true",
help="clear web cache")
parser.add_option("--clear_cache", default=False, action="store_true",
help="clear cache")
parser.add_option("--clear_defaults", default=False, action="store_true",
help="clear cache of defaults")
parser.add_option("--domain", metavar="DOMAIN",
help="store domain in Website Settings", nargs=1)
# git
parser.add_option("--status", default=False, action="store_true",
help="git status")
parser.add_option("--git", nargs=1, default=False,
metavar = "git options",
help="run git with options in both repos")
parser.add_option("--pull", nargs=2, default=False,
metavar = "remote branch",
help="git pull (both repos)")
parser.add_option("-c", "--commit", nargs=1, default=False,
metavar = "commit both repos",
help="git commit -a -m [comment]")
parser.add_option("-p", "--push", default=False,
action="store_true",
metavar = "remote branch",
help="git push (both repos) [remote] [branch]")
parser.add_option("--checkout", nargs=1, default=False,
metavar = "branch",
help="git checkout [branch]")
parser.add_option("-l", "--latest",
action="store_true", dest="run_latest", default=False,
help="Apply the latest patches")
# patch
parser.add_option("--patch", nargs=1, dest="patch_list",
metavar='patch_module',
action="append",
help="Apply patch")
parser.add_option("-f", "--force",
action="store_true", dest="force", default=False,
help="Force Apply all patches specified using option -p or --patch")
parser.add_option('--reload_doc', nargs=3, metavar = "module doctype docname",
help="reload doc")
parser.add_option('--export_doc', nargs=2, metavar = "doctype docname",
help="export doc")
# diff
parser.add_option('--diff_ref_file', nargs=0, \
help="Get missing database records and mismatch properties, with file as reference")
parser.add_option('--diff_ref_db', nargs=0, \
help="Get missing .txt files and mismatch properties, with database as reference")
# scheduler
parser.add_option('--run_scheduler', default=False, action="store_true",
help="Trigger scheduler")
parser.add_option('--run_scheduler_event', nargs=1, metavar="[all|daily|weekly|monthly]",
help="Run scheduler event")
# misc
parser.add_option("--replace", nargs=3, default=False,
metavar = "search replace_by extension",
help="file search-replace")
parser.add_option("--sync_all", help="Synchronize all DocTypes using txt files",
nargs=0)
parser.add_option("--sync", help="Synchronize given DocType using txt file",
nargs=2, metavar="module doctype (use their folder names)")
parser.add_option("--patch_sync_build", action="store_true", default=False,
help="run latest patches, sync all and rebuild js css")
parser.add_option("--patch_sync", action="store_true", default=False,
help="run latest patches, sync all")
parser.add_option("--cleanup_data", help="Cleanup test data", default=False,
action="store_true")
parser.add_option("--append_future_import", default=False, action="store_true",
help="append from __future__ import unicode literals to py files")
parser.add_option("--build_message_files", default=False, action="store_true",
help="Build message files for translation")
parser.add_option('--export_messages', nargs=2, metavar="LANG FILENAME",
help="""Export all messages for a language to translation in a csv file.
Example, lib/wnf.py --export_messages hi hindi.csv""")
parser.add_option('--import_messages', nargs=2, metavar="LANG FILENAME",
help="""Import messages for a language and make language files.
Example, lib/wnf.py --import_messages hi hindi.csv""")
parser.add_option('--google_translate', nargs=3, metavar="LANG INFILE OUTFILE",
help="""Auto translate using Google Translate API""")
parser.add_option('--translate', nargs=1, metavar="LANG",
help="""Rebuild translation for the given langauge and
use Google Translate to tranlate untranslated messages. use "all" """)
parser.add_option("--reset_perms", default=False, action="store_true",
help="Reset permissions for all doctypes.")
parser.add_option("--make_conf", default=False, action="store_true",
help="Create new conf.py file")
# bean helpers
parser.add_option('--export_doclist', nargs=3, metavar="DOCTYPE NAME PATH",
help="""Export doclist as json to the given path, use '-' as name for Singles.""")
parser.add_option('--export_csv', nargs=2, metavar="DOCTYPE PATH",
help="""Dump DocType as csv.""")
parser.add_option('--import_doclist', nargs=1, metavar="PATH",
help="""Import (insert/update) doclist. If the argument is a directory, all files ending with .json are imported""")
return parser.parse_args()
def run():
sys.path.append('.')
sys.path.append('lib')
sys.path.append('app')
(options, args) = setup_options()
# build
if options.build:
from webnotes import build
if options.no_cms:
cms_make = False
else:
cms_make = True
build.bundle(False, cms_make)
return
elif options.watch:
from webnotes import build
build.watch(True)
return
# code replace
elif options.replace:
print options.replace
replace_code('.', options.replace[0], options.replace[1], options.replace[2], force=options.force)
return
# git
elif options.status:
os.chdir('lib')
os.system('git status')
os.chdir('../app')
os.system('git status')
return
elif options.git:
os.chdir('lib')
os.system('git %s' % options.git)
os.chdir('../app')
os.system('git %s' % options.git)
return
import webnotes
try:
import conf
except ImportError, e:
conf = webnotes._dict({})
from webnotes.db import Database
import webnotes.modules.patch_handler
webnotes.print_messages = True
# connect
if options.db_name is not None:
if options.password:
webnotes.connect(options.db_name, options.password)
else:
webnotes.connect(options.db_name)
elif not any([options.install, options.pull, options.install_fresh, options.reinstall, options.make_conf]):
webnotes.connect(conf.db_name)
if options.pull:
pull(options.pull[0], options.pull[1], build=True)
elif options.commit:
os.chdir('lib')
os.system('git commit -a -m "%s"' % (options.commit))
os.chdir('../app')
os.system('git commit -a -m "%s"' % (options.commit))
elif options.push:
if not args:
args = ["origin", conf.branch]
os.chdir('lib')
os.system('git push %s %s' % (args[0], args[1]))
os.chdir('../app')
os.system('git push %s %s' % (args[0], args[1]))
elif options.checkout:
os.chdir('lib')
os.system('git checkout %s' % options.checkout)
os.chdir('../app')
os.system('git checkout %s' % options.checkout)
# patch
elif options.patch_list:
# clear log
webnotes.modules.patch_handler.log_list = []
# run individual patches
for patch in options.patch_list:
webnotes.modules.patch_handler.run_single(\
patchmodule = patch, force = options.force)
print '\n'.join(webnotes.modules.patch_handler.log_list)
# reload
elif options.reload_doc:
webnotes.modules.patch_handler.reload_doc(\
{"module":options.reload_doc[0], "dt":options.reload_doc[1], "dn":options.reload_doc[2]})
print '\n'.join(webnotes.modules.patch_handler.log_list)
elif options.export_doc:
from webnotes.modules import export_doc
export_doc(options.export_doc[0], options.export_doc[1])
# run all pending
elif options.run_latest:
apply_latest_patches()
elif options.install:
from webnotes.install_lib.install import Installer
inst = Installer('root', options.root_password)
inst.import_from_db(options.install[0], source_path=options.install[1],
verbose = 1)
elif options.install_fresh:
from webnotes.install_lib.install import Installer
inst = Installer('root', options.root_password)
inst.import_from_db(options.install_fresh, verbose = 1)
elif options.reinstall:
from webnotes.install_lib.install import Installer
inst = Installer('root', options.root_password)
import conf
inst.import_from_db(conf.db_name, verbose = 1)
elif options.make_demo:
import utilities.demo.make_demo
utilities.demo.make_demo.make()
elif options.make_demo_fresh:
import utilities.demo.make_demo
utilities.demo.make_demo.make(reset=True)
elif options.diff_ref_file is not None:
import webnotes.modules.diff
webnotes.modules.diff.diff_ref_file()
elif options.diff_ref_db is not None:
import webnotes.modules.diff
webnotes.modules.diff.diff_ref_db()
elif options.run_scheduler:
import webnotes.utils.scheduler
print webnotes.utils.scheduler.execute()
elif options.run_scheduler_event is not None:
import webnotes.utils.scheduler
print webnotes.utils.scheduler.trigger('execute_' + options.run_scheduler_event)
elif options.sync_all is not None:
sync_all(options.force or 0)
elif options.sync is not None:
webnotes.reload_doc(options.sync[0], "doctype", options.sync[1])
elif options.update:
if not args:
args = ["origin", conf.branch]
update_erpnext(args[0], args[1])
elif options.patch_sync_build:
patch_sync_build()
elif options.patch_sync:
patch_sync()
elif options.cleanup_data:
from utilities import cleanup_data
cleanup_data.run()
elif options.domain:
webnotes.conn.set_value('Website Settings', None, 'subdomain', options.domain)
webnotes.conn.commit()
print "Domain set to", options.domain
elif options.clear_web:
# build wn-web.js and wn-web.css
from website.doctype.website_settings.make_web_include_files import make
make()
import webnotes.webutils
webnotes.webutils.clear_cache()
elif options.clear_cache:
clear_cache()
elif options.clear_defaults:
import webnotes.defaults
webnotes.defaults.clear_cache()
webnotes.clear_cache()
elif options.append_future_import:
append_future_import()
elif options.backup:
from webnotes.utils.backups import scheduled_backup
scheduled_backup(ignore_files = True)
# print messages
if webnotes.message_log:
print '\n'.join(webnotes.message_log)
elif options.build_message_files:
import webnotes.translate
webnotes.translate.build_message_files()
elif options.export_messages:
import webnotes.translate
webnotes.translate.export_messages(*options.export_messages)
elif options.import_messages:
import webnotes.translate
webnotes.translate.import_messages(*options.import_messages)
elif options.google_translate:
from webnotes.translate import google_translate
google_translate(*options.google_translate)
elif options.translate:
from webnotes.translate import translate
translate(options.translate)
elif options.docs:
from core.doctype.documentation_tool.documentation_tool import write_static
write_static()
elif options.export_doclist:
from core.page.data_import_tool.data_import_tool import export_json
export_json(*list(options.export_doclist))
elif options.export_csv:
from core.page.data_import_tool.data_import_tool import export_csv
export_csv(*options.export_csv)
elif options.import_doclist:
import json
if os.path.isdir(options.import_doclist):
docs = [os.path.join(options.import_doclist, f) \
for f in os.listdir(options.import_doclist)]
else:
docs = [options.import_doclist]
for f in docs:
if f.endswith(".json"):
with open(f, "r") as infile:
b = webnotes.bean(json.loads(infile.read())).insert_or_update()
print "Imported: " + b.doc.doctype + " / " + b.doc.name
webnotes.conn.commit()
if f.endswith(".csv"):
from core.page.data_import_tool.data_import_tool import import_file_by_path
import_file_by_path(f, ignore_links=True)
webnotes.conn.commit()
elif options.reset_perms:
for d in webnotes.conn.sql_list("""select name from `tabDocType`
where ifnull(istable, 0)=0 and ifnull(custom, 0)=0"""):
try:
webnotes.clear_cache(doctype=d)
webnotes.reset_perms(d)
except:
pass
elif options.make_conf:
if os.path.exists("conf.py"):
os.system("mv conf.py conf.py.bak")
with open("lib/conf/conf.py", "r") as confsrc:
confstr = confsrc.read()
db_name = raw_input("Database Name: ")
if not db_name:
print "Database Name Required"
return
db_password = raw_input("Database Password: ")
if not db_password:
print "Database Name Required"
return
with open("conf.py", "w") as conftar:
conftar.write(confstr % {"db_name": db_name, "db_password": db_password })
if __name__=='__main__':
run()
|
{
"content_hash": "ba5e0e6095f5f355c4aa6f83b849fc95",
"timestamp": "",
"source": "github",
"line_count": 585,
"max_line_length": 118,
"avg_line_length": 30.046153846153846,
"alnum_prop": 0.684189565909996,
"repo_name": "gangadhar-kadam/sapphite_lib",
"id": "48c4959491f81ac09644b2c56e51e5678119a077",
"size": "17687",
"binary": false,
"copies": "2",
"ref": "refs/heads/1310",
"path": "wnf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "74826"
},
{
"name": "HTML",
"bytes": "36644"
},
{
"name": "JavaScript",
"bytes": "1134668"
},
{
"name": "Python",
"bytes": "563769"
}
],
"symlink_target": ""
}
|
"""
/properties/
/properties/:id/
/properties/groups/
/properties/groups/:id/
/properties/groups/:name/
"""
from django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from ..models import Property, PropertyGroup
from ..defaults import PROPERTY_TEXT_FIELD
class TestPropertiesCRUD(APITestCase):
fixtures = ['erp_test/tests/fixtures/properties_crud.json',]
def test_property_list(self):
url = reverse('api:property-list')
response = self.client.get(url, format='json')
data = [{'id': obj.id, 'name': obj.name, 'title': obj.title,
'required': obj.required, 'position': obj.position,
'type': obj.type, 'unit': obj.unit
} for obj in Property.objects.all()]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
def test_property_create(self):
url = reverse('api:property-list')
data = {'name': 'test', 'title': 'Test', 'required': False,
'position': 999, 'type': PROPERTY_TEXT_FIELD}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_server_update(self):
url = reverse('api:property-detail', args=[1])
data = {'name': 'new server'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_server_delete(self):
url = reverse('api:property-detail', args=[1])
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class TestPropertyGroupCRUD(APITestCase):
fixtures = ['erp_test/tests/fixtures/property_groups.json',]
def test_property_group_list(self):
url = reverse('api:property-group-list')
data = [{'id': obj.id, 'name': obj.name}
for obj in PropertyGroup.objects.all()]
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
def test_property_group_detail_by_pk(self):
url = reverse('api:property-group-detail', args=[2])
data = {'id': 2, 'name': 'cpu',
'properties': [
{'id': 2, 'name': 'cpu.socket',
'title': 'CPU Socket', 'required': True,
'position': 2, 'type': 3, 'unit': ''},
]}
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
def test_property_group_detail_by_name(self):
url = reverse('api:property-group-detail-by_name', args=['cpu'])
data = {'id': 2, 'name': 'cpu',
'properties': [
{'id': 2, 'name': 'cpu.socket',
'title': 'CPU Socket', 'required': True,
'position': 2, 'type': 3, 'unit': ''},
]}
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
|
{
"content_hash": "36ca1c1b52bb899153c11144655d390b",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 82,
"avg_line_length": 41.48148148148148,
"alnum_prop": 0.6047619047619047,
"repo_name": "baffolobill/mb_test_1",
"id": "95a7cabb2d3644b3db2409047f10fec280c548fe",
"size": "3376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mbtest1/erp_test/tests/test_properties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "228768"
},
{
"name": "HTML",
"bytes": "54315"
},
{
"name": "JavaScript",
"bytes": "3483943"
},
{
"name": "Makefile",
"bytes": "135"
},
{
"name": "Python",
"bytes": "305003"
},
{
"name": "Shell",
"bytes": "187"
}
],
"symlink_target": ""
}
|
"""
This module provides views for the Puppy Shelter project.
"""
#pylint: disable=import-error,no-member
from flask import render_template, url_for, redirect, request, flash
from puppy_shelter import app
from puppy_shelter.models import Puppy, Shelter
from puppy_shelter.database import db_session, get_or_create
# View for Shelter List
@app.route('/')
def index():
"""Function to return a page listing all shelters."""
shelters = db_session.query(Shelter).all()
return render_template('shelter_list.html', shelters=shelters)
# View for Shelter Information
@app.route('/shelter/<int:shelter_id>/')
def shelter_info(shelter_id):
"""Function to return a page to view a shelter's information.
Args:
shelter_id: ID of the shelter to view.
"""
shelter = db_session.query(Shelter).filter_by(shelter_id=shelter_id).first()
if not shelter:
return render_template('404.html')
puppies = db_session.query(Puppy).filter_by(shelter_id=shelter_id).all()
return render_template('shelter_info.html', shelter=shelter, puppies=puppies)
# View for Adopted Puppies
@app.route('/adopted-puppies/')
def adopted_puppies():
"""Function to return a page to view a list of adopted puppies."""
puppies = db_session.query(Puppy)\
.filter(Puppy.shelter_id == None, Puppy.owner != '').all()
return render_template('adopted_puppies.html', puppies=puppies)
# View for Puppy Information
@app.route('/puppy/<int:puppy_id>/')
def puppy_info(puppy_id):
"""Function to return a page to view a puppy's information.
Args:
puppy_id: ID of the puppy to view.
"""
puppy = db_session.query(Puppy).filter_by(puppy_id=puppy_id).first()
if not puppy:
return render_template('404.html', headline_text='Puppy Not Found!')
shelter = db_session.query(Shelter).filter_by(shelter_id=puppy.shelter_id).first()
return render_template('puppy_info.html', puppy=puppy, shelter=shelter)
# View for Adding a New Puppy
@app.route('/shelter/<int:shelter_id>/new_puppy/', methods=['GET', 'POST'])
def new_puppy(shelter_id):
"""Function to return a page to add a new puppy.
Args:
shelter_id: ID of the shelter where the new puppy will live.
"""
shelter = db_session.query(Shelter).filter_by(shelter_id=shelter_id).first()
if not shelter:
redirect(url_for('shelter_info', shelter_id=shelter.shelter_id))
if request.method == 'POST':
puppy = get_or_create(db_session, Puppy,
name=request.form['name'],
picture=request.form['picture'],
gender=request.form['gender'],
weight=request.form['weight'],
shelter_id=shelter_id)
shelter.current_occupancy = shelter.current_occupancy + 1
db_session.add(shelter)
db_session.commit()
flash("New puppy {} created!".format(puppy.name))
return redirect(url_for('shelter_info', shelter_id=shelter_id))
else:
return render_template('new_puppy.html', shelter=shelter)
# View for Editing a Puppy
@app.route('/puppy/<int:puppy_id>/edit/', methods=['GET', 'POST'])
def edit_puppy(puppy_id):
"""Function to return a page to edit a puppy.
Args:
puppy_id: ID of the puppy to edit.
"""
puppy = db_session.query(Puppy)\
.filter_by(puppy_id=puppy_id).first()
if not puppy:
return render_template('404.html', headline_text='Puppy Not Found!')
# Get all available shelters. This is used in the template for
# populating the dropdown list of available shelters that have
# capacity available.
shelters = db_session.query(Shelter)\
.filter(Shelter.current_occupancy < Shelter.maximum_capacity).all()
if request.method == 'POST':
puppy.name = request.form['name']
puppy.picture = request.form['picture']
puppy.gender = request.form['gender']
puppy.weight = request.form['weight']
# The same form is used, but depending on the adoption status,
# it will sometimes have shelter_id info, and sometimes not.
if 'original_shelter_id' in request.form:
original_shelter_id = request.form['original_shelter_id']
shelter_id = request.form['shelter_id']
# If form shelter id doesn't match puppy's original shelter id,
# update both the shelter's and original shelter's occupancy.
if shelter_id != original_shelter_id:
puppy.shelter_id = shelter_id
shelter = db_session.query(Shelter)\
.filter_by(shelter_id=shelter_id).first()
original_shelter = db_session.query(Shelter)\
.filter_by(shelter_id=original_shelter_id).first()
if not shelter or not original_shelter:
return render_template('404.html', headline_text='Shelter Not Found!')
shelter.current_occupancy = shelter.current_occupancy + 1
original_shelter.current_occupancy = original_shelter.current_occupancy - 1
db_session.add_all([shelter, original_shelter])
db_session.commit()
else:
puppy.owner = request.form['owner']
db_session.add(puppy)
db_session.commit()
flash("{} updated!".format(puppy.name))
return redirect(url_for('puppy_info', puppy_id=puppy_id))
else:
return render_template('edit_puppy.html', puppy=puppy,
shelter=puppy.shelter, shelters=shelters)
# View for Adopting a Puppy
@app.route('/puppy/<int:puppy_id>/adopt/', methods=['GET', 'POST'])
def adopt_puppy(puppy_id):
"""Function to return a page to adopt a puppy.
Args:
puppy_id: ID of the puppy to adopt.
"""
puppy = db_session.query(Puppy)\
.filter_by(puppy_id=puppy_id).first()
if not puppy:
return render_template('404.html', headline_text='Puppy Not Found!')
if request.method == 'POST':
# Remove shelter_id and shelter from Puppy object, and add owner.
# Update Shelter's occupancy. Commit both objects to DB session.
shelter = db_session.query(Shelter)\
.filter_by(shelter_id=puppy.shelter_id).first()
if not shelter:
return render_template('404.html', headline_text='Shelter Not Found!')
puppy.owner = request.form['name']
puppy.shelter_id = ''
puppy.shelter = None
shelter.current_occupancy = shelter.current_occupancy - 1
db_session.add_all([puppy, shelter])
db_session.commit()
flash("{} has been adopted!".format(puppy.name))
return redirect(url_for('puppy_info', puppy_id=puppy_id))
else:
return render_template('adopt_puppy.html', puppy=puppy, shelter=puppy.shelter)
|
{
"content_hash": "887e727745b24eb3ac4a2639ae4e23e2",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 91,
"avg_line_length": 39.01129943502825,
"alnum_prop": 0.6333091962346126,
"repo_name": "hessler/udacity-courses",
"id": "7e0cd95f5de689f2a803acef3edb487de1d3371f",
"size": "6905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "full_stack_foundations/vagrant/puppy_shelter/puppy_shelter/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5676"
},
{
"name": "HTML",
"bytes": "48037"
},
{
"name": "Makefile",
"bytes": "1920"
},
{
"name": "Python",
"bytes": "120709"
},
{
"name": "Ruby",
"bytes": "814"
},
{
"name": "Shell",
"bytes": "769"
}
],
"symlink_target": ""
}
|
"""
Abstract: Given a file of gene or OTU IDs per group, this script will output
groups present in each gene or OTU IDs.
"""
import sys
import csv
import argparse
from collections import defaultdict
def handle_program_options():
parser = argparse.ArgumentParser(
description=("Given a file of gene or OTU IDs per group, this script "
"will output groups present in each gene or OTU IDs.")
)
parser.add_argument("in_fnh",
help="Input file containing identifiers as columns "
"and gene or OTU IDs as rows, one per line.")
parser.add_argument("out_fnh",
help="Output file handle name. First column contains "
"gene or OTU IDs and subsequenct columns contain "
"identifiers/groups they are present in.")
return parser.parse_args()
def main():
args = handle_program_options()
# Get input
with open(args.in_fnh, "rU") as tykyuo:
reader = csv.DictReader(tykyuo, delimiter="\t")
data = [line for line in reader]
# Collect groups from input data - column names are assumed as group names
groups = data[1].keys()
# Get master list of gene or OTU IDs
all_ids = set()
for d in data:
for grp in groups:
if d[grp] != "":
all_ids.add(d[grp])
# Compare master list to all groups
id_groups = defaultdict(list)
for d in data:
for i in sorted(all_ids):
for grp in groups:
if i == d[grp]:
id_groups[i].append(grp)
# Write to output
with open(args.out_fnh, "w") as uyiouy:
for k, v in id_groups.iteritems():
uyiouy.write("{}\t{}\n".format(k, "\t".join(sorted(v))))
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "9a435bd78766a2899c3b3237aa9a173e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 32.3448275862069,
"alnum_prop": 0.5692963752665245,
"repo_name": "akshayparopkari/kadambari",
"id": "a37f195ad30bebe1c1b588ca641f2e98366e33ae",
"size": "1899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/overlap_comparisions.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "192334"
},
{
"name": "R",
"bytes": "8515"
},
{
"name": "Shell",
"bytes": "13450"
}
],
"symlink_target": ""
}
|
"""Config file for training NCSN with technique 5 only."""
import ml_collections
def get_config():
config = ml_collections.ConfigDict()
# training
config.training = training = ml_collections.ConfigDict()
training.batch_size = 128
training.n_epochs = 500000
training.n_iters = 300001
training.snapshot_freq = 5000
training.snapshot_freq_for_preemption = 2500
training.snapshot_sampling = True
training.anneal_power = 2
training.loss = 'ncsnv2'
# shared configs for sample generation
step_size = 0.00002
n_steps_each = 100
ckpt_id = 300000
final_only = True
noise_removal = False
# sampling
config.sampling = sampling = ml_collections.ConfigDict()
sampling.method = 'ald'
sampling.step_size = step_size
sampling.n_steps_each = n_steps_each
sampling.ckpt_id = ckpt_id
sampling.final_only = final_only
sampling.noise_removal = noise_removal
# eval
config.eval = evaluate = ml_collections.ConfigDict()
evaluate.batch_size = 1024
evaluate.num_samples = 1000
evaluate.step_size = step_size
evaluate.n_steps_each = n_steps_each
evaluate.begin_ckpt = 1
evaluate.end_ckpt = 62
evaluate.verbose = False
# data
config.data = data = ml_collections.ConfigDict()
data.dataset = 'CIFAR10'
data.centered = False
data.image_size = 32
data.random_flip = True
# model
config.model = model = ml_collections.ConfigDict()
model.name = 'ncsn'
model.scale_by_sigma = False
model.sigma_begin = 1
model.num_classes = 10
model.ema_rate = 0.999
model.sigma_dist = 'geometric'
model.sigma_end = 0.01
model.normalization = 'InstanceNorm++'
model.nonlinearity = 'elu'
model.nf = 128
model.interpolation = 'bilinear'
# optim
config.optim = optim = ml_collections.ConfigDict()
optim.weight_decay = 0
optim.optimizer = 'Adam'
optim.lr = 1e-3
optim.beta1 = 0.9
optim.amsgrad = False
optim.eps = 1e-8
optim.warmup = 0
optim.grad_clip = -1.
config.seed = 42
return config
|
{
"content_hash": "db11d93bef2f59b2058b8bdfa66edd43",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 58,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7017276422764228,
"repo_name": "google-research/google-research",
"id": "9aca2deb02ac6efba0cded43d58c985b8a364a70",
"size": "2576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ncsnv3/configs/ncsn/cifar10_5.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='comment_preference',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='userprofile',
name='like_preference',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "c9e2ca0ddf44a0618584755366f28dc8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 53,
"avg_line_length": 24.217391304347824,
"alnum_prop": 0.5870736086175943,
"repo_name": "andela/codango",
"id": "44d0bccb0429542616567bb0d1a3bb26650b7234",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "codango/userprofile/migrations/0002_auto_20160606_1505.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "75253"
},
{
"name": "HTML",
"bytes": "84350"
},
{
"name": "JavaScript",
"bytes": "7162710"
},
{
"name": "Python",
"bytes": "248732"
},
{
"name": "Shell",
"bytes": "128"
}
],
"symlink_target": ""
}
|
import hr_timesheet_invoice_create
import hr_timesheet_analytic_profit
import hr_timesheet_final_invoice_create
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "9edf0dc6131f66b6911892ced9c4ea8a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 65,
"avg_line_length": 30,
"alnum_prop": 0.8333333333333334,
"repo_name": "ntiufalara/openerp7",
"id": "b718be1acbb529bc570745e550ca768439366428",
"size": "1159",
"binary": false,
"copies": "425",
"ref": "refs/heads/master",
"path": "openerp/addons/hr_timesheet_invoice/wizard/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C#",
"bytes": "93691"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "583265"
},
{
"name": "Groff",
"bytes": "8138"
},
{
"name": "HTML",
"bytes": "125159"
},
{
"name": "JavaScript",
"bytes": "5109152"
},
{
"name": "Makefile",
"bytes": "14036"
},
{
"name": "NSIS",
"bytes": "14114"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9373763"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "6430"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
}
|
"""
=============================================================
Compare the effect of different scalers on data with outliers
=============================================================
Feature 0 (median income in a block) and feature 5 (number of households) of
the `California housing dataset
<http://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html>`_ have very
different scales and contain some very large outliers. These two
characteristics lead to difficulties to visualize the data and, more
importantly, they can degrade the predictive performance of many machine
learning algorithms. Unscaled data can also slow down or even prevent the
convergence of many gradient-based estimators.
Indeed many estimators are designed with the assumption that each feature takes
values close to zero or more importantly that all features vary on comparable
scales. In particular, metric-based and gradient-based estimators often assume
approximately standardized data (centered features with unit variances). A
notable exception are decision tree-based estimators that are robust to
arbitrary scaling of the data.
This example uses different scalers, transformers, and normalizers to bring the
data within a pre-defined range.
Scalers are linear (or more precisely affine) transformers and differ from each
other in the way to estimate the parameters used to shift and scale each
feature.
``QuantileTransformer`` provides a non-linear transformation in which distances
between marginal outliers and inliers are shrunk.
Unlike the previous transformations, normalization refers to a per sample
transformation instead of a per feature transformation.
The following code is a bit verbose, feel free to jump directly to the analysis
of the results_.
"""
# Author: Raghav RV <rvraghav93@gmail.com>
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Thomas Unterthiner
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing.data import QuantileTransformer
from sklearn.datasets import fetch_california_housing
print(__doc__)
dataset = fetch_california_housing()
X_full, y_full = dataset.data, dataset.target
# Take only 2 features to make visualization easier
# Feature of 0 has a long tail distribution.
# Feature 5 has a few but very large outliers.
X = X_full[:, [0, 5]]
distributions = [
('Unscaled data', X),
('Data after standard scaling',
StandardScaler().fit_transform(X)),
('Data after min-max scaling',
MinMaxScaler().fit_transform(X)),
('Data after max-abs scaling',
MaxAbsScaler().fit_transform(X)),
('Data after robust scaling',
RobustScaler(quantile_range=(25, 75)).fit_transform(X)),
('Data after quantile transformation (uniform pdf)',
QuantileTransformer(output_distribution='uniform')
.fit_transform(X)),
('Data after quantile transformation (gaussian pdf)',
QuantileTransformer(output_distribution='normal')
.fit_transform(X)),
('Data after sample-wise L2 normalizing',
Normalizer().fit_transform(X))
]
# scale the output between 0 and 1 for the colorbar
y = minmax_scale(y_full)
def create_axes(title, figsize=(16, 6)):
fig = plt.figure(figsize=figsize)
fig.suptitle(title)
# define the axis for the first plot
left, width = 0.1, 0.22
bottom, height = 0.1, 0.7
bottom_h = height + 0.15
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter = plt.axes(rect_scatter)
ax_histx = plt.axes(rect_histx)
ax_histy = plt.axes(rect_histy)
# define the axis for the zoomed-in plot
left = width + left + 0.2
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter_zoom = plt.axes(rect_scatter)
ax_histx_zoom = plt.axes(rect_histx)
ax_histy_zoom = plt.axes(rect_histy)
# define the axis for the colorbar
left, width = width + left + 0.13, 0.01
rect_colorbar = [left, bottom, width, height]
ax_colorbar = plt.axes(rect_colorbar)
return ((ax_scatter, ax_histy, ax_histx),
(ax_scatter_zoom, ax_histy_zoom, ax_histx_zoom),
ax_colorbar)
def plot_distribution(axes, X, y, hist_nbins=50, title="",
x0_label="", x1_label=""):
ax, hist_X1, hist_X0 = axes
ax.set_title(title)
ax.set_xlabel(x0_label)
ax.set_ylabel(x1_label)
# The scatter plot
colors = cm.plasma_r(y)
ax.scatter(X[:, 0], X[:, 1], alpha=0.5, marker='o', s=5, lw=0, c=colors)
# Removing the top and the right spine for aesthetics
# make nice axis layout
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
# Histogram for axis X1 (feature 5)
hist_X1.set_ylim(ax.get_ylim())
hist_X1.hist(X[:, 1], bins=hist_nbins, orientation='horizontal',
color='grey', ec='grey')
hist_X1.axis('off')
# Histogram for axis X0 (feature 0)
hist_X0.set_xlim(ax.get_xlim())
hist_X0.hist(X[:, 0], bins=hist_nbins, orientation='vertical',
color='grey', ec='grey')
hist_X0.axis('off')
###############################################################################
# Two plots will be shown for each scaler/normalizer/transformer. The left
# figure will show a scatter plot of the full data set while the right figure
# will exclude the extreme values considering only 99 % of the data set,
# excluding marginal outliers. In addition, the marginal distributions for each
# feature will be shown on the side of the scatter plot.
def make_plot(item_idx):
title, X = distributions[item_idx]
ax_zoom_out, ax_zoom_in, ax_colorbar = create_axes(title)
axarr = (ax_zoom_out, ax_zoom_in)
plot_distribution(axarr[0], X, y, hist_nbins=200,
x0_label="Median Income",
x1_label="Number of households",
title="Full data")
# zoom-in
zoom_in_percentile_range = (0, 99)
cutoffs_X0 = np.percentile(X[:, 0], zoom_in_percentile_range)
cutoffs_X1 = np.percentile(X[:, 1], zoom_in_percentile_range)
non_outliers_mask = (
np.all(X > [cutoffs_X0[0], cutoffs_X1[0]], axis=1) &
np.all(X < [cutoffs_X0[1], cutoffs_X1[1]], axis=1))
plot_distribution(axarr[1], X[non_outliers_mask], y[non_outliers_mask],
hist_nbins=50,
x0_label="Median Income",
x1_label="Number of households",
title="Zoom-in")
norm = mpl.colors.Normalize(y_full.min(), y_full.max())
mpl.colorbar.ColorbarBase(ax_colorbar, cmap=cm.plasma_r,
norm=norm, orientation='vertical',
label='Color mapping for values of y')
########################################################################
# .. _results:
#
# Original data
# -------------
#
# Each transformation is plotted showing two transformed features, with the
# left plot showing the entire dataset, and the right zoomed-in to show the
# dataset without the marginal outliers. A large majority of the samples are
# compacted to a specific range, [0, 10] for the median income and [0, 6] for
# the number of households. Note that there are some marginal outliers (some
# blocks have more than 1200 households). Therefore, a specific pre-processing
# can be very beneficial depending of the application. In the following, we
# present some insights and behaviors of those pre-processing methods in the
# presence of marginal outliers.
make_plot(0)
#######################################################################
# StandardScaler
# --------------
#
# ``StandardScaler`` removes the mean and scales the data to unit variance.
# However, the outliers have an influence when computing the empirical mean and
# standard deviation which shrink the range of the feature values as shown in
# the left figure below. Note in particular that because the outliers on each
# feature have different magnitudes, the spread of the transformed data on
# each feature is very different: most of the data lie in the [-2, 4] range for
# the transformed median income feature while the same data is squeezed in the
# smaller [-0.2, 0.2] range for the transformed number of households.
#
# ``StandardScaler`` therefore cannot guarantee balanced feature scales in the
# presence of outliers.
make_plot(1)
##########################################################################
# MinMaxScaler
# ------------
#
# ``MinMaxScaler`` rescales the data set such that all feature values are in
# the range [0, 1] as shown in the right panel below. However, this scaling
# compress all inliers in the narrow range [0, 0.005] for the transformed
# number of households.
#
# As ``StandardScaler``, ``MinMaxScaler`` is very sensitive to the presence of
# outliers.
make_plot(2)
#############################################################################
# MaxAbsScaler
# ------------
#
# ``MaxAbsScaler`` differs from the previous scaler such that the absolute
# values are mapped in the range [0, 1]. On positive only data, this scaler
# behaves similarly to ``MinMaxScaler`` and therefore also suffers from the
# presence of large outliers.
make_plot(3)
##############################################################################
# RobustScaler
# ------------
#
# Unlike the previous scalers, the centering and scaling statistics of this
# scaler are based on percentiles and are therefore not influenced by a few
# number of very large marginal outliers. Consequently, the resulting range of
# the transformed feature values is larger than for the previous scalers and,
# more importantly, are approximately similar: for both features most of the
# transformed values lie in a [-2, 3] range as seen in the zoomed-in figure.
# Note that the outliers themselves are still present in the transformed data.
# If a separate outlier clipping is desirable, a non-linear transformation is
# required (see below).
make_plot(4)
###################################################################
# QuantileTransformer (uniform output)
# ------------------------------------
#
# ``QuantileTransformer`` applies a non-linear transformation such that the
# probability density function of each feature will be mapped to a uniform
# distribution. In this case, all the data will be mapped in the range [0, 1],
# even the outliers which cannot be distinguished anymore from the inliers.
#
# As ``RobustScaler``, ``QuantileTransformer`` is robust to outliers in the
# sense that adding or removing outliers in the training set will yield
# approximately the same transformation on held out data. But contrary to
# ``RobustScaler``, ``QuantileTransformer`` will also automatically collapse
# any outlier by setting them to the a priori defined range boundaries (0 and
# 1).
make_plot(5)
##############################################################################
# QuantileTransformer (Gaussian output)
# -------------------------------------
#
# ``QuantileTransformer`` has an additional ``output_distribution`` parameter
# allowing to match a Gaussian distribution instead of a uniform distribution.
# Note that this non-parametetric transformer introduces saturation artifacts
# for extreme values.
make_plot(6)
##############################################################################
# Normalizer
# ----------
#
# The ``Normalizer`` rescales the vector for each sample to have unit norm,
# independently of the distribution of the samples. It can be seen on both
# figures below where all samples are mapped onto the unit circle. In our
# example the two selected features have only positive values; therefore the
# transformed data only lie in the positive quadrant. This would not be the
# case if some original features had a mix of positive and negative values.
make_plot(7)
plt.show()
|
{
"content_hash": "eb90da4d44e8fc5c5b1dcf324519b900",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 79,
"avg_line_length": 38.72782874617737,
"alnum_prop": 0.6586386607706886,
"repo_name": "wazeerzulfikar/scikit-learn",
"id": "677386a00191c84955ba51f35626374635198c47",
"size": "12711",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "examples/preprocessing/plot_all_scaling.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7246657"
},
{
"name": "Shell",
"bytes": "19959"
}
],
"symlink_target": ""
}
|
import warnings
import xml
from defusedxml.ElementTree import fromstring
class ServerInfoItem(object):
def __init__(self, product_version, build_number, rest_api_version):
self._product_version = product_version
self._build_number = build_number
self._rest_api_version = rest_api_version
def __str__(self):
return (
"ServerInfoItem: [product version: "
+ self._product_version
+ ", build no.:"
+ self._build_number
+ ", REST API version:"
+ self.rest_api_version
+ "]"
)
@property
def product_version(self):
return self._product_version
@property
def build_number(self):
return self._build_number
@property
def rest_api_version(self):
return self._rest_api_version
@classmethod
def from_response(cls, resp, ns):
try:
parsed_response = fromstring(resp)
except xml.etree.ElementTree.ParseError as error:
warnings.warn("Unexpected response for ServerInfo: {}".format(resp))
return cls("Unknown", "Unknown", "Unknown")
product_version_tag = parsed_response.find(".//t:productVersion", namespaces=ns)
rest_api_version_tag = parsed_response.find(".//t:restApiVersion", namespaces=ns)
build_number = product_version_tag.get("build", None)
product_version = product_version_tag.text
rest_api_version = rest_api_version_tag.text
return cls(product_version, build_number, rest_api_version)
|
{
"content_hash": "b8535ca57c516d8d5bf8c5b4eeef8332",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 89,
"avg_line_length": 31.56,
"alnum_prop": 0.6153358681875792,
"repo_name": "tableau/server-client-python",
"id": "350ae3a0de0681c1a02c4f7a1990c1f48c6f2a37",
"size": "1578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tableauserverclient/models/server_info_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "858778"
},
{
"name": "Shell",
"bytes": "208"
}
],
"symlink_target": ""
}
|
HEADER_PROTEIN = 'Protein ID'
HEADER_PROTEINS = 'Protein ID(s)'
HEADER_GENEID = 'Gene ID'
HEADER_GENENAME = 'Gene Name'
HEADER_DESCRIPTION = 'Description'
HEADER_COVERAGE = 'Coverage'
HEADER_NO_PROTEIN = 'Protein count'
HEADER_CONTENTPROT = 'Proteins in group'
HEADER_NO_UNIPEP = 'Unique peptide count'
HEADER_NO_PEPTIDE = 'Peptide count'
HEADER_NO_PSM = 'PSM count'
HEADER_NO_PSMS_SUFFIX = ' - Quanted PSM count'
HEADER_NO_FULLQ_PSMS = 'Fully quanted PSM count'
HEADER_AREA = 'MS1 precursor area'
HEADER_QVAL = 'q-value'
HEADER_PEP = 'PEP'
HEADER_QVAL_MODELED = 'q-value (linear model)'
HEADER_QSCORE = 'Q-score best peptide'
PICKED_HEADER = [HEADER_QSCORE, HEADER_QVAL]
ACCESSIONS = {
'prottable': HEADER_PROTEIN,
'genetable': HEADER_GENEID,
'associdtable': HEADER_GENENAME,
}
TPROT_HEADER_ACCS = [HEADER_PROTEIN, HEADER_GENENAME, HEADER_GENEID, HEADER_PROTEIN]
|
{
"content_hash": "804b48f52f558e0de127f5bf11a89b9b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 84,
"avg_line_length": 34.53846153846154,
"alnum_prop": 0.7126948775055679,
"repo_name": "glormph/msstitch",
"id": "afc8f0030add8323a015b056d9922f5ddff071df",
"size": "898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/app/dataformats/prottable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "451340"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
import os
import tarfile
from bsonstream import KeyValueBSONInput
def read_languages(sourcedir, outdir):
# Read
user_langs = {}
languages = set()
for fname in os.listdir(sourcedir):
print("Opening " + fname)
f = tarfile.open(sourcedir + "/" + fname).extractfile("dump/github/repos.bson")
print("Parsing " + fname)
stream = KeyValueBSONInput(fh = f)
for _, repo in stream:
try:
user = repo["owner"]["login"]
language = repo["language"]
size = repo["size"]
if language:
languages.add(language)
if user in user_langs:
if language not in user_langs[user]:
user_langs[user][language] = 0
user_langs[user][language] += int(size)
else:
user_langs[user] = {language: int(size)}
except Exception as e:
print("Problem reading line:", line.strip(), file=sys.stderr)
print(e, file=sys.stderr)
f.close()
# Write
print("Writing to files")
files = {}
for language in languages:
files[language] = open(outdir + "/" + language.replace("/", "|"), "w")
for user in user_langs:
for l in user_langs[user]:
if(user_langs[user][l] > 0):
files[l].write(user + ',' + str(user_langs[user][l]) + '\n')
for f in files.values():
f.flush()
f.close()
if __name__ == "__main__":
if(len(sys.argv) < 3):
print("Usage: python languages.py inputdir outputdir")
else:
read_languages(sys.argv[1], sys.argv[2])
|
{
"content_hash": "0389f88ffab5b936051ab9b28e37cb71",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 87,
"avg_line_length": 31.263157894736842,
"alnum_prop": 0.5134680134680135,
"repo_name": "DevMine/devmine-features",
"id": "08f31cdc3d33a6903bb2129e93cdef8536f6e513",
"size": "1782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsing/bson/languages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33189"
},
{
"name": "Scala",
"bytes": "1505"
},
{
"name": "Shell",
"bytes": "3548"
}
],
"symlink_target": ""
}
|
import numpy as np
data = np.load('./test/rawdata/doorOpen.npy')
np.savetxt('doorOpen1.csv', data, delimiter=',')
|
{
"content_hash": "eee2275acb6f912c05f1f800722ebc72",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 48,
"avg_line_length": 28.75,
"alnum_prop": 0.7043478260869566,
"repo_name": "IoT-Expedition/Edge-Analytics",
"id": "4510779f1d8383b9ac855349ba7bc83c71d49195",
"size": "115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virtual_sensor/machine_learning/door/saveCsv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "2301"
},
{
"name": "Python",
"bytes": "99164"
}
],
"symlink_target": ""
}
|
import functools
import chainer
class cached_property(object):
"""Cache a result of computation of Chainer functions
Caches are stored for each chainer.config.enable_backprop.
The following example calls ``F.sigmoid`` only once.
>>> class C(object):
... def __init__(self, x):
... self.x = x
... @chainer.utils.cache.cached_property
... def y(self):
... return F.sigmoid(self.x)
... def loss(self, t):
... return F.mean_squared_error(self.y, t)
>>> x = chainer.Variable(np.array([2, 3], np.float32))
>>> obj = C(x)
>>> loss1 = obj.loss(np.array([0.1, 0.2], np.float32))
>>> loss2 = obj.loss(np.array([0.3, 0.4], np.float32))
However, the following example recomputes `obj.y` because the second call
requires the computational graph.
>>> with chainer.no_backprop_mode():
... loss1 = obj.loss(np.array([0.1, 0.2], np.float32))
>>> loss2 = obj.loss(np.array([0.3, 0.4], np.float32))
"""
def __init__(self, func):
functools.update_wrapper(self, func)
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
caches = obj.__dict__.setdefault(self.__name__, {})
backprop_enabled = chainer.config.enable_backprop
try:
return caches[backprop_enabled]
except KeyError:
value = self.func(obj)
caches[backprop_enabled] = value
return value
def __set__(self, obj, cls):
# Define __set__ to make cached_property a data descriptor
raise AttributeError(
'attribute \'{}\' of {} is readonly'.format(
self.__name__, cls))
|
{
"content_hash": "659d463b60d9cdaaef990f5e29dd0f17",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 30.982142857142858,
"alnum_prop": 0.562536023054755,
"repo_name": "pfnet/chainer",
"id": "b484ae875fc45525e2c6404f7ca8013dd578606e",
"size": "1735",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "chainer/utils/cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2564338"
}
],
"symlink_target": ""
}
|
import os
import sys
root_fldr = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'rawdata'))
sys.path.insert(1, root_fldr)
import generate
op_fldr = generate.dat_fldr + os.sep
words = generate.get_list_words()
places = generate.get_list_places()
# Dairy
custom_event = ['Sales Meeting', 'Workshop', 'Training', 'Phone Hookup', 'Admin', 'Development work', 'Testing', 'Documentation']
tpe_event = ['DATE', 'PEOPLE', 'PLACE', custom_event ]
t_event = generate.TableGenerator(15,tpe_event, ['DATE', 'Name', 'Location', 'Details'])
t_event.save_table(op_fldr + 'diary.csv')
# Tasks
custom_task = ['Write report', 'fix bug', 'work on documentation', 'Add new feature', 'test new version', 'Demo to customer']
t_task = generate.TableGenerator(8, ['PEOPLE', ['Hi', 'Med', 'Low'], custom_task], ['Assigned to', 'Priority', 'Task'])
t_task.save_table(op_fldr + 'tasks.csv')
# Contacts
lbl_contact = ['Year_met', 'Customer_id', 'Age', 'First Name', 'Last Name', 'Country', 'Amount']
tpe_contact = ['YEAR', 'STRING', 'INT', 'PEOPLE', 'PEOPLE', 'PLACE', 'CURRENCY']
t_contact = generate.TableGenerator(50, tpe_contact, lbl_contact)
t_contact.save_table(op_fldr + 'contacts.csv')
# Notes
t_notes = generate.TableGenerator(10, ['DATE','WORD','TEXT'], ['Date','Tags','Note Contents'])
t_notes.save_table(op_fldr + 'notes.csv')
|
{
"content_hash": "594c6bc113975b6c8ed0194c4035f865",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 129,
"avg_line_length": 41.6875,
"alnum_prop": 0.6724137931034483,
"repo_name": "acutesoftware/rawdata",
"id": "746de7d4aafcbd368d856d42e141682c8dca80d7",
"size": "1375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/create_pim_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "452"
},
{
"name": "HTML",
"bytes": "82439"
},
{
"name": "Python",
"bytes": "106572"
}
],
"symlink_target": ""
}
|
import abc
import inspect
import re
from mountapi.core import exceptions
class AbstractConverter(metaclass=abc.ABCMeta):
param_url: str = None
param_regex: str = None
@classmethod
def path_to_regex(cls, path):
return re.sub(cls.param_url, cls.param_regex, path) + '$'
class IntConverter(AbstractConverter):
param_url = r'<(?P<param>\w+):int>'
param_regex = r'(?P<\1>\\d+)'
class StrConverter(AbstractConverter):
param_url = r'<(?P<param>\w+):str>'
param_regex = r'(?P<\1>\\w+)'
class AbstractSchema(exceptions.NotImplementedMixin, metaclass=abc.ABCMeta):
@abc.abstractmethod
def build(self):
self.not_implemented()
@abc.abstractmethod
def match(self, path):
self.not_implemented()
class Schema(AbstractSchema):
_converter_map = {int: IntConverter, str: StrConverter}
def __init__(self, routes: list) -> None:
self._routes = routes
self._schema = None
def build(self) -> None:
if self._schema is None:
self._schema = self._build_schema()
def _build_schema(self):
schema = {}
for route in self._routes:
schema[route.path] = {
'endpoint': route.endpoint,
'regex': self._get_path_regex(route.path),
**self._get_schema_http_methods(route)
}
return schema
def _get_path_regex(self, path):
for converter in self._converter_map.values():
path = converter.path_to_regex(path)
return path
def _get_schema_http_methods(self, route):
return {
http_method: {
'handler': getattr(route.endpoint, http_method.lower()),
'params': self._get_func_params(
getattr(route.endpoint, http_method.lower())
)
} for http_method in route.endpoint.get_allowed_methods()
}
def _get_func_params(self, func):
return {
param.name: self._converter_map[param.annotation]
for param in inspect.signature(func).parameters.values()
if param.annotation != inspect.Parameter.empty
}
def match(self, path):
for route_path in self._schema:
route_match = re.match(self._schema[route_path]['regex'], path)
if route_match:
return {
'endpoint': self._schema[route_path]['endpoint'],
'kwargs': route_match.groupdict()
}
else:
raise exceptions.NotFound()
|
{
"content_hash": "9c9d5d3bdf77a252fd86408aabd0397a",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 76,
"avg_line_length": 28.61111111111111,
"alnum_prop": 0.5712621359223301,
"repo_name": "pyQuest/mount-api",
"id": "b39a8c2a43f3aa0270642ce5b0751b399c77a283",
"size": "2575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mountapi/schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "232"
},
{
"name": "Python",
"bytes": "16204"
}
],
"symlink_target": ""
}
|
from flask import Blueprint, render_template, redirect, request, url_for
from flask_jwt_extended import jwt_required
from potnanny.core.models import Room
from potnanny.core.schemas import RoomSchema
from potnanny.crud import CrudInterface
from potnanny.extensions import db
bp = Blueprint('dashboard', __name__, template_folder='templates')
ifc = CrudInterface(Room, RoomSchema)
@bp.route('/', methods=['GET'])
@jwt_required
def index():
serialized, err, code = ifc.get()
if err:
pass
return render_template('dashboard/index.html',
title='Dashboard',
rooms=serialized)
@bp.route('/<int:pk>', methods=['GET'])
@jwt_required
def edit(pk):
serialized, err, code = ifc.get(pk)
if err:
pass
return render_template('dashboard/edit.html',
title='Customize Room',
room=serialized)
|
{
"content_hash": "02503eada4c428f1637d690ade747172",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.6655367231638418,
"repo_name": "jeffleary00/greenery",
"id": "b20316e0d2a0dc643226023269fa9fe10adffd8e",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "potnanny/apps/dashboard/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "7782"
},
{
"name": "HTML",
"bytes": "221892"
},
{
"name": "JavaScript",
"bytes": "1356314"
},
{
"name": "Python",
"bytes": "69263"
},
{
"name": "Shell",
"bytes": "5343"
}
],
"symlink_target": ""
}
|
"""Run flake8 tests
This test goes through all Python files in the specified test_files_dirs
directories and runs flake8 <filename> and reports the results
Example invocation
python -m testing.test_flake8 --test_files_dirs=/kubeflow/application/tests,/kubeflow/common/tests,/kubeflow/jupyter/tests,/kubeflow/iap/tests,/kubeflow/gcp/tests,/kubeflow/tensorboard/tests,/kubeflow/examples/tests,/kubeflow/metacontroller/tests,/kubeflow/profiles/tests,/kubeflow/tf-training/tests # noqa: E501
"""
from __future__ import print_function
import argparse
import json
import logging
import os
from kubeflow.testing import test_helper, util
FLAKE8_OPTS = """--count --select=E901,E999,F821,F822,F823 --show-source
--statistics""".split()
# Test only files which end in '.py' or have no suffix
def should_test(file_path):
_, ext = os.path.splitext(file_path.lower())
return ext in ('.py', '')
def run(test_files_dirs, test_case):
# Go through each Python file in test_files_dirs and run flake8
for test_files_dir in test_files_dirs:
for root, _, files in os.walk(test_files_dir):
for test_file in files:
full_path = os.path.join(root, test_file)
assert root == os.path.dirname(full_path)
if should_test(full_path):
logging.info("Testing: %s", test_file)
try:
output = util.run(['flake8', full_path] + FLAKE8_OPTS, cwd=root)
try:
parsed = json.loads(output)
except AttributeError:
logging.error(
"Output of flake8 could not be parsed as json; "
"output: %s", output)
parsed = {}
if not hasattr(parsed, "get"):
# Legacy style tests emit true rather than a json object.
# Parsing the string as json converts it to a bool so we
# just use parsed as test_passed
# Old style tests actually use std.assert so flake8 will
# actually return an error in the case the test did
# not pass.
logging.warn(
"flake8 is using old style and not emitting an object. "
"Result was: %s. Output will be treated as a boolean", output)
test_passed = parsed
else:
test_passed = parsed.get("pass", False)
if not test_passed:
msg = '{} test failed'.format(test_file)
test_case.add_failure_info(msg)
logging.error(
'{}. See Subprocess output for details.'.format(msg))
except Exception as e:
msg = '{} test failed'.format(test_file)
test_case.add_failure_info(msg)
logging.error('{} with exception {}. See Subprocess output for '
'details.'.format(msg, e))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--test_files_dirs",
default=".",
type=str,
help="Comma separated directories containing Python files")
args, _ = parser.parse_known_args()
return args
def test_flake8(test_case): # pylint: disable=redefined-outer-name
args = parse_args()
if not args.test_files_dirs:
raise ValueError('--test_files_dirs needs to be set')
run(args.test_files_dirs.split(','), test_case)
if __name__ == "__main__":
test_case = test_helper.TestCase(name='test_flake8', test_func=test_flake8)
test_suite = test_helper.init(
name='flake8_test_suite', test_cases=[test_case])
test_suite.run()
|
{
"content_hash": "0bfa8eb4236146a54c500a59d76fe3b1",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 313,
"avg_line_length": 35.97979797979798,
"alnum_prop": 0.6162268388545761,
"repo_name": "kubeflow/kubeflow",
"id": "59a3ab5d7680a24919f1cea62d2db05d16bc073a",
"size": "4215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_flake8.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44370"
},
{
"name": "Dockerfile",
"bytes": "32340"
},
{
"name": "Go",
"bytes": "266067"
},
{
"name": "HTML",
"bytes": "65169"
},
{
"name": "JavaScript",
"bytes": "200781"
},
{
"name": "Jsonnet",
"bytes": "7366280"
},
{
"name": "Makefile",
"bytes": "43885"
},
{
"name": "PowerShell",
"bytes": "7883"
},
{
"name": "Pug",
"bytes": "15633"
},
{
"name": "Python",
"bytes": "319086"
},
{
"name": "SCSS",
"bytes": "21397"
},
{
"name": "Shell",
"bytes": "7933"
},
{
"name": "TypeScript",
"bytes": "870299"
}
],
"symlink_target": ""
}
|
from six.moves.urllib import parse
from mod_pywebsocket import common
from mod_pywebsocket import msgutil
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
send_payload = ''
r = request.ws_resource.split('?', 1)
if len(r) == 2:
params = parse.parse_qs(r[1])
if 'payload' in params:
send_payload = params['payload'][0]
msgutil.send_ping(request, send_payload)
# We need to use an internal function to detect a pong frame from the
# client.
opcode, recv_payload, final, reserved1, reserved2, reserved3 = \
request.ws_stream._receive_frame()
if (opcode == common.OPCODE_PONG and recv_payload.decode('UTF-8') == send_payload and final and not reserved1 and not reserved2
and not reserved3):
msgutil.send_message(request, 'PASS')
else:
msgutil.send_message(
request,
'FAIL: Received unexpected frame: opcode = %r, payload = %r, '
'final = %r, reserved1 = %r, reserved2 = %r, reserved3 = %r'
% (opcode, recv_payload, final, reserved1, reserved2, reserved3))
|
{
"content_hash": "2b7f6620932bea63ad81b0309b33bbc6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 131,
"avg_line_length": 35.875,
"alnum_prop": 0.6358885017421603,
"repo_name": "ric2b/Vivaldi-browser",
"id": "b9d8c98f264a26e6328f3018938cd6c0c00e728c",
"size": "1148",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "chromium/third_party/blink/web_tests/http/tests/websocket/pong_wsh.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Script that will remind developers to run updateApi."""
import argparse
import os.path
import sys
WARNING_COLOR = '\033[33m'
END_COLOR = '\033[0m'
WARNING_NO_API_FILES = """
{}**********************************************************************
You changed library classes, but you have no current.txt changes.
Did you forget to run ./gradlew updateApi?
**********************************************************************{}
""".format(WARNING_COLOR, END_COLOR)
WARNING_OLD_API_FILES = """
{}**********************************************************************
Your current.txt is older than your current changes in library classes.
Did you forget to re-run ./gradlew updateApi?
**********************************************************************{}
""".format(WARNING_COLOR, END_COLOR)
def main(args=None):
if not ('ENABLE_UPDATEAPI_WARNING' in os.environ):
sys.exit(0)
parser = argparse.ArgumentParser()
parser.add_argument('--file', '-f', nargs='*')
parser.set_defaults(format=False)
args = parser.parse_args()
api_files = [f for f in args.file
if f.endswith('.txt') and '/api/' in f]
source_files = [f for f in args.file
if (not "buildSrc/" in f and
"/src/main/" in f or
"/src/commonMain/" in f or
"/src/androidMain/" in f)]
if len(source_files) == 0:
sys.exit(0)
if len(api_files) == 0:
print(WARNING_NO_API_FILES)
sys.exit(77) # 77 is a warning code in repohooks
last_source_timestamp = max([os.path.getmtime(f) for f in source_files])
last_api_timestamp = max([os.path.getmtime(f) for f in api_files])
if last_source_timestamp > last_api_timestamp:
print(WARNING_OLD_API_FILES)
sys.exit(77) # 77 is a warning code in repohooks
sys.exit(0)
if __name__ == '__main__':
main()
|
{
"content_hash": "40b3dcc4a5003702c2e001d6df8009e5",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 74,
"avg_line_length": 31.724137931034484,
"alnum_prop": 0.5407608695652174,
"repo_name": "androidx/androidx",
"id": "e5395b3aa0d1c5a4d4a0f82359fa10e5d34a578d",
"size": "2465",
"binary": false,
"copies": "3",
"ref": "refs/heads/androidx-main",
"path": "development/apilint.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "263978"
},
{
"name": "ANTLR",
"bytes": "19860"
},
{
"name": "C",
"bytes": "4764"
},
{
"name": "C++",
"bytes": "9020585"
},
{
"name": "CMake",
"bytes": "11999"
},
{
"name": "HTML",
"bytes": "21175"
},
{
"name": "Java",
"bytes": "59499889"
},
{
"name": "JavaScript",
"bytes": "1343"
},
{
"name": "Kotlin",
"bytes": "66123157"
},
{
"name": "Python",
"bytes": "292398"
},
{
"name": "Shell",
"bytes": "167367"
},
{
"name": "Swift",
"bytes": "3153"
},
{
"name": "TypeScript",
"bytes": "7599"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.contrib.auth.models import Permission
from django.core import mail
# User
from notifier import shortcuts, models
###############################################################################
## Tests
###############################################################################
class PreferencesTests(TestCase):
def setUp(self):
self.email_backend = models.Backend.objects.get(name='email')
self.sms_backend = models.Backend.objects.create(
display_name='SMS',
name='sms',
enabled=True,
description='SMS delivery method',
klass='notifier.backends.BaseBackend')
self.test1_notification = models.Notification.objects.create(
display_name='Test Notification 1',
name='test-not-1',
public=True,
)
self.test1_notification.backends.add(
self.email_backend, self.sms_backend)
self.test2_notification = models.Notification.objects.create(
display_name='Test Notification 2',
name='test-not-2',
public=False,
)
self.test2_notification.backends.add(
self.email_backend)
self.user1 = User.objects.create(
username='user1',
email='user1@example.com'
)
self.group1 = Group.objects.create(
name='group1'
)
self.user1.groups.add(self.group1)
models.GroupPrefs.objects.create(
group=self.group1,
notification=self.test1_notification,
backend=self.email_backend,
notify=True
)
def test1GroupPreference(self):
"""Test if group preference applies to user"""
method_dict = self.test1_notification.get_user_prefs(user=self.user1)
# print models.Backend.objects.values_list(
# 'display_name', 'name', 'id')
# print method_dict
self.assertEqual(method_dict[self.email_backend], True,
msg='Group notification preference failed.')
def test2UserPreference(self):
"""Test if User preference supercedes group preference"""
models.UserPrefs.objects.create(
user=self.user1,
notification=self.test1_notification,
backend=self.email_backend,
notify=False
)
method_dict = self.test1_notification.get_user_prefs(user=self.user1)
# print models.Backend.objects.values_list(
# 'display_name', 'name', 'id')
# print method_dict
self.assertEqual(method_dict[self.email_backend], False,
msg='User notification preference failed.')
class PermissionTests(TestCase):
"""Tests related to permission checking for notifications."""
def setUp(self):
self.user1 = User.objects.create(
username='user1',
email='user1@example.com'
)
self.permission1 = Permission.objects.create(
codename='test-permission',
name='Test Permission',
content_type=ContentType.objects.get_for_model(User)
)
self.permission2 = Permission.objects.create(
codename='test-permission-2',
name='Test Permission 2',
content_type=ContentType.objects.get_for_model(User)
)
self.test1_notification = models.Notification.objects.create(
display_name='Test Notification 1',
name='test-not-1',
public=True,
)
self.test1_notification.permissions.add(self.permission1,
self.permission2)
def test1PermissionFunction(self):
"""Test the Notification.check_perms function."""
self.assertEqual(self.test1_notification.check_perms(self.user1),
False, msg='Permission check Failed')
self.user1.user_permissions.add(self.permission1)
# Django caches permissions on user, so refetch user from the database
self.user1 = User.objects.get(pk=self.user1.pk)
self.assertEqual(self.test1_notification.check_perms(self.user1),
False, msg='Permission check Failed')
self.user1.user_permissions.add(self.permission2)
# Django caches permissions on user, so refetch user from the database
self.user1 = User.objects.get(pk=self.user1.pk)
self.assertEqual(self.test1_notification.check_perms(self.user1),
True, msg='Permission check Failed')
class UtilityFunctionTests(TestCase):
def test1GetPermissionQueryset(self):
"""Test the shortcuts._get_permission_queryset function."""
permissions = Permission.objects.filter(id__in=[1, 2])
# Compare querysets after converting to lists, becuase different
# instance of same queryset will not test as equal.
resp = shortcuts._get_permission_queryset(permissions)
# print resp
self.assertEqual(list(resp), list(permissions),
msg='Queryset input failed')
resp = shortcuts._get_permission_queryset(permissions[0])
# print resp
self.assertEqual(resp, [permissions.get(id=1)],
msg='Single object input failed')
resp = shortcuts._get_permission_queryset(
list(permissions.values_list('codename', flat=True)))
# print resp
self.assertEqual(list(resp), list(permissions),
msg='Permission codename list input failed')
resp = shortcuts._get_permission_queryset(permissions[0].codename)
# print resp
self.assertEqual(list(resp), list(permissions.filter(id=1)),
msg='Permission codename input failed')
class EmailTests(TestCase):
def setUp(self):
self.user1 = User.objects.create(
username='user1',
email='user1@example.com'
)
self.email_backend = models.Backend.objects.get(name='email')
self.test_notification = shortcuts.create_notification(
'test-notification',
display_name='Test',
permissions=None, # No permissions required
backends=None, # All backend will be added ('email')
public=True
)
models.UserPrefs.objects.create(
user=self.user1,
notification=self.test_notification,
backend=self.email_backend,
notify=True
)
def test_send_notification(self):
shortcuts.send_notification('test-notification', self.user1)
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that the subject of the first message is correct.
self.assertEqual(mail.outbox[0].subject, 'django-notify test email')
|
{
"content_hash": "b96b40ec2792de3acdd6b79033704522",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 79,
"avg_line_length": 35.14795918367347,
"alnum_prop": 0.6124256060386123,
"repo_name": "scdoshi/django-notifier",
"id": "e8b4a19c11539f16b545403de4454d458683acac",
"size": "7069",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "notifier/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "64964"
},
{
"name": "Shell",
"bytes": "6719"
}
],
"symlink_target": ""
}
|
"""
Implementation of a GL Program object.
This class parses the source code to obtain the names and types of
uniforms, attributes, varyings and constants. This information is used
to provide the user with a natural way to set variables.
Gloo vs GLIR
------------
Done in this class:
* Check the data shape given for uniforms and attributes
* Convert uniform data to array of the correct type
* Check whether any variables are set that are not present in source code
Done by GLIR:
* Check whether a set uniform/attribute is not active (a warning is given)
* Check whether anactive attribute or uniform is not set (a warning is given)
"""
import re
import numpy as np
from .globject import GLObject
from .buffer import VertexBuffer, IndexBuffer, DataBuffer
from .texture import BaseTexture, Texture2D, Texture3D, Texture1D
from ..util import logger
from .util import check_enum
from ..ext.six import string_types
from .context import get_current_canvas
from .preprocessor import preprocess
# ----------------------------------------------------------- Program class ---
class Program(GLObject):
""" Shader program object
A Program is an object to which shaders can be attached and linked to
create the final program.
Uniforms and attributes can be set using indexing: e.g.
``program['a_pos'] = pos_data`` and ``program['u_color'] = (1, 0, 0)``.
Parameters
----------
vert : str
The vertex shader to be used by this program
frag : str
The fragment shader to be used by this program
count : int (optional)
The program will prepare a structured vertex buffer of count
vertices. All attributes set using ``prog['attr'] = X`` will
be combined into a structured vbo with interleaved elements, which
is more efficient than having one vbo per attribute.
Notes
-----
If several shaders are specified, only one can contain the main
function. OpenGL ES 2.0 does not support a list of shaders.
"""
_GLIR_TYPE = 'Program'
_gtypes = { # DTYPE, NUMEL
'float': (np.float32, 1),
'vec2': (np.float32, 2),
'vec3': (np.float32, 3),
'vec4': (np.float32, 4),
'int': (np.int32, 1),
'ivec2': (np.int32, 2),
'ivec3': (np.int32, 3),
'ivec4': (np.int32, 4),
'bool': (np.bool, 1),
'bvec2': (np.bool, 2),
'bvec3': (np.bool, 3),
'bvec4': (np.bool, 4),
'mat2': (np.float32, 4),
'mat3': (np.float32, 9),
'mat4': (np.float32, 16),
'sampler1D': (np.uint32, 1),
'sampler2D': (np.uint32, 1),
'sampler3D': (np.uint32, 1),
}
# ---------------------------------
def __init__(self, vert=None, frag=None, count=0):
GLObject.__init__(self)
# Init source code for vertex and fragment shader
self._shaders = '', ''
# Init description of variables obtained from source code
self._code_variables = {} # name -> (kind, type_, name)
# Init user-defined data for attributes and uniforms
self._user_variables = {} # name -> data / buffer / texture
# Init pending user-defined data
self._pending_variables = {} # name -> data
# NOTE: we *could* allow vert and frag to be a tuple/list of shaders,
# but that would complicate the GLIR implementation, and it seems
# unncessary
# Check and set shaders
if isinstance(vert, string_types) and isinstance(frag, string_types):
self.set_shaders(vert, frag)
elif not (vert is None and frag is None):
raise ValueError('Vert and frag must either both be str or None')
# Build associated structured vertex buffer if count is given.
# This makes it easy to create a structured vertex buffer
# without having to create a numpy array with structured dtype.
# All assignments must be done before the GLIR commands are
# sent away for parsing (in draw) though.
self._count = count
self._buffer = None # Set to None in draw()
if self._count > 0:
dtype = []
for kind, type_, name, size in self._code_variables.values():
if kind == 'attribute':
dt, numel = self._gtypes[type_]
dtype.append((name, dt, numel))
self._buffer = np.zeros(self._count, dtype=dtype)
self.bind(VertexBuffer(self._buffer))
def set_shaders(self, vert, frag):
""" Set the vertex and fragment shaders.
Parameters
----------
vert : str
Source code for vertex shader.
frag : str
Source code for fragment shaders.
"""
if not vert or not frag:
raise ValueError('Vertex and fragment code must both be non-empty')
# pre-process shader code for #include directives
vert, frag = preprocess(vert), preprocess(frag)
# Store source code, send it to glir, parse the code for variables
self._shaders = vert, frag
self._glir.command('SHADERS', self._id, vert, frag)
# All current variables become pending variables again
for key, val in self._user_variables.items():
self._pending_variables[key] = val
self._user_variables = {}
# Parse code (and process pending variables)
self._parse_variables_from_code()
@property
def shaders(self):
""" Source code for vertex and fragment shader
"""
return self._shaders
@property
def variables(self):
""" A list of the variables in use by the current program
The list is obtained by parsing the GLSL source code.
Returns
-------
variables : list
Each variable is represented as a tuple (kind, type, name),
where `kind` is 'attribute', 'uniform', 'uniform_array',
'varying' or 'const'.
"""
# Note that internally the variables are stored as a dict
# that maps names -> tuples, for easy looking up by name.
return [x[:3] for x in self._code_variables.values()]
def _parse_variables_from_code(self):
""" Parse uniforms, attributes and varyings from the source code.
"""
# Get one string of code with comments removed
code = '\n\n'.join(self._shaders)
code = re.sub(r'(.*)(//.*)', r'\1', code, re.M)
# Regexp to look for variable names
var_regexp = ("\s*VARIABLE\s+" # kind of variable
"((highp|mediump|lowp)\s+)?" # Precision (optional)
"(?P<type>\w+)\s+" # type
"(?P<name>\w+)\s*" # name
"(\[(?P<size>\d+)\])?" # size (optional)
"(\s*\=\s*[0-9.]+)?" # default value (optional)
"\s*;" # end
)
# Parse uniforms, attributes and varyings
self._code_variables = {}
for kind in ('uniform', 'attribute', 'varying', 'const'):
regex = re.compile(var_regexp.replace('VARIABLE', kind),
flags=re.MULTILINE)
for m in re.finditer(regex, code):
gtype = m.group('type')
size = int(m.group('size')) if m.group('size') else -1
this_kind = kind
if size >= 1:
# uniform arrays get added both as individuals and full
for i in range(size):
name = '%s[%d]' % (m.group('name'), i)
self._code_variables[name] = kind, gtype, name, -1
this_kind = 'uniform_array'
name = m.group('name')
self._code_variables[name] = this_kind, gtype, name, size
# Now that our code variables are up-to date, we can process
# the variables that were set but yet unknown.
self._process_pending_variables()
def bind(self, data):
""" Bind a VertexBuffer that has structured data
Parameters
----------
data : VertexBuffer
The vertex buffer to bind. The field names of the array
are mapped to attribute names in GLSL.
"""
# Check
if not isinstance(data, VertexBuffer):
raise ValueError('Program.bind() requires a VertexBuffer.')
# Apply
for name in data.dtype.names:
self[name] = data[name]
def _process_pending_variables(self):
""" Try to apply the variables that were set but not known yet.
"""
# Clear our list of pending variables
self._pending_variables, pending = {}, self._pending_variables
# Try to apply it. On failure, it will be added again
for name, data in pending.items():
self[name] = data
def __setitem__(self, name, data):
""" Setting uniform or attribute data
This method requires the information about the variable that we
know from parsing the source code. If this information is not
yet available, the data is stored in a list of pending data,
and we attempt to set it once new shading code has been set.
For uniforms, the data can represent a plain uniform or a
sampler. In the latter case, this method accepts a Texture
object or a numpy array which is used to update the existing
texture. A new texture is created if necessary.
For attributes, the data can be a tuple/float which GLSL will
use for the value of all vertices. This method also acceps VBO
data as a VertexBuffer object or a numpy array which is used
to update the existing VertexBuffer. A new VertexBuffer is
created if necessary.
By passing None as data, the uniform or attribute can be
"unregistered". This can be useful to get rid of variables that
are no longer present or active in the new source code that is
about to be set.
"""
# Deal with local buffer storage (see count argument in __init__)
if (self._buffer is not None) and not isinstance(data, DataBuffer):
if name in self._buffer.dtype.names:
self._buffer[name] = data
return
# Delete?
if data is None:
self._user_variables.pop(name, None)
self._pending_variables.pop(name, None)
return
if name in self._code_variables:
kind, type_, name, size = self._code_variables[name]
if kind == 'uniform':
if type_.startswith('sampler'):
# Texture data; overwrite or update
tex = self._user_variables.get(name, None)
if isinstance(data, BaseTexture):
pass
elif tex and hasattr(tex, 'set_data'):
tex.set_data(data)
return
elif type_ == 'sampler1D':
data = Texture1D(data)
elif type_ == 'sampler2D':
data = Texture2D(data)
elif type_ == 'sampler3D':
data = Texture3D(data)
else:
# This should not happen
raise RuntimeError('Unknown type %s' % type_)
# Store and send GLIR command
self._user_variables[name] = data
self.glir.associate(data.glir)
self._glir.command('TEXTURE', self._id, name, data.id)
else:
# Normal uniform; convert to np array and check size
dtype, numel = self._gtypes[type_]
data = np.array(data, dtype=dtype).ravel()
if data.size != numel:
raise ValueError('Uniform %r needs %i elements, '
'not %i.' % (name, numel, data.size))
# Store and send GLIR command
self._user_variables[name] = data
self._glir.command('UNIFORM', self._id, name, type_, data)
elif kind == 'uniform_array':
# Normal uniform; convert to np array and check size
dtype, numel = self._gtypes[type_]
data = np.atleast_2d(data).astype(dtype)
need_shape = (size, numel)
if data.shape != need_shape:
raise ValueError('Uniform array %r needs shape %s not %s'
% (name, need_shape, data.shape))
data = data.ravel()
# Store and send GLIR command
self._user_variables[name] = data
self._glir.command('UNIFORM', self._id, name, type_, data)
elif kind == 'attribute':
# Is this a constant value per vertex
is_constant = False
def isscalar(x):
return isinstance(x, (float, int))
if isscalar(data):
is_constant = True
elif isinstance(data, (tuple, list)):
is_constant = all([isscalar(e) for e in data])
if not is_constant:
# VBO data; overwrite or update
vbo = self._user_variables.get(name, None)
if isinstance(data, DataBuffer):
pass
elif vbo is not None and hasattr(vbo, 'set_data'):
vbo.set_data(data)
return
else:
data = VertexBuffer(data)
# Store and send GLIR command
if data.dtype is not None:
numel = self._gtypes[type_][1]
if data._last_dim and data._last_dim != numel:
raise ValueError('data.shape[-1] must be %s '
'not %s for %s'
% (numel, data._last_dim, name))
self._user_variables[name] = data
value = (data.id, data.stride, data.offset)
self.glir.associate(data.glir)
self._glir.command('ATTRIBUTE', self._id,
name, type_, value)
else:
# Single-value attribute; convert to array and check size
dtype, numel = self._gtypes[type_]
data = np.array(data, dtype=dtype)
if data.ndim == 0:
data.shape = data.size
if data.size != numel:
raise ValueError('Attribute %r needs %i elements, '
'not %i.' % (name, numel, data.size))
# Store and send GLIR command
self._user_variables[name] = data
value = tuple([0] + [i for i in data])
self._glir.command('ATTRIBUTE', self._id,
name, type_, value)
else:
raise KeyError('Cannot set data for a %s.' % kind)
else:
# This variable is not defined in the current source code,
# so we cannot establish whether this is a uniform or
# attribute, nor check its type. Try again later.
self._pending_variables[name] = data
def __getitem__(self, name):
""" Get user-defined data for attributes and uniforms.
"""
if name in self._user_variables:
return self._user_variables[name]
elif name in self._pending_variables:
return self._pending_variables[name]
else:
raise KeyError("Unknown uniform or attribute %s" % name)
def draw(self, mode='triangles', indices=None, check_error=True):
""" Draw the attribute arrays in the specified mode.
Parameters
----------
mode : str | GL_ENUM
'points', 'lines', 'line_strip', 'line_loop', 'triangles',
'triangle_strip', or 'triangle_fan'.
indices : array
Array of indices to draw.
check_error:
Check error after draw.
"""
# Invalidate buffer (data has already been sent)
self._buffer = None
# Check if mode is valid
mode = check_enum(mode)
if mode not in ['points', 'lines', 'line_strip', 'line_loop',
'triangles', 'triangle_strip', 'triangle_fan']:
raise ValueError('Invalid draw mode: %r' % mode)
# Check leftover variables, warn, discard them
# In GLIR we check whether all attributes are indeed set
for name in self._pending_variables:
logger.warn('Variable %r is given but not known.' % name)
self._pending_variables = {}
# Check attribute sizes
attributes = [vbo for vbo in self._user_variables.values()
if isinstance(vbo, DataBuffer)]
sizes = [a.size for a in attributes]
if len(attributes) < 1:
raise RuntimeError('Must have at least one attribute')
if not all(s == sizes[0] for s in sizes[1:]):
msg = '\n'.join(['%s: %s' % (str(a), a.size) for a in attributes])
raise RuntimeError('All attributes must have the same size, got:\n'
'%s' % msg)
# Get the glir queue that we need now
canvas = get_current_canvas()
assert canvas is not None
# Associate canvas
canvas.context.glir.associate(self.glir)
# Indexbuffer
if isinstance(indices, IndexBuffer):
canvas.context.glir.associate(indices.glir)
logger.debug("Program drawing %r with index buffer" % mode)
gltypes = {np.dtype(np.uint8): 'UNSIGNED_BYTE',
np.dtype(np.uint16): 'UNSIGNED_SHORT',
np.dtype(np.uint32): 'UNSIGNED_INT'}
selection = indices.id, gltypes[indices.dtype], indices.size
canvas.context.glir.command('DRAW', self._id, mode, selection)
elif indices is None:
selection = 0, attributes[0].size
logger.debug("Program drawing %r with %r" % (mode, selection))
canvas.context.glir.command('DRAW', self._id, mode, selection)
else:
raise TypeError("Invalid index: %r (must be IndexBuffer)" %
indices)
# Process GLIR commands
canvas.context.flush_commands()
|
{
"content_hash": "5b9029d94e4e8c6050b041998810a8f2",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 79,
"avg_line_length": 41.969565217391306,
"alnum_prop": 0.5254843053972859,
"repo_name": "hronoses/vispy",
"id": "f594390be05441d96d6ab724220cbc07b9f00d3f",
"size": "19633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vispy/gloo/program.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "171513"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1593"
},
{
"name": "PowerShell",
"bytes": "4151"
},
{
"name": "Python",
"bytes": "2858273"
}
],
"symlink_target": ""
}
|
import pbr.version
__version__ = pbr.version.VersionInfo('python-keystoneclient').version_string()
__all__ = [
# Modules
'generic',
'v2_0',
'v3',
# Packages
'access',
'client',
'exceptions',
'httpclient',
'service_catalog',
]
|
{
"content_hash": "c188492e6da8fa76554b246cca6c08ec",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 15,
"alnum_prop": 0.5666666666666667,
"repo_name": "raildo/python-keystoneclient",
"id": "ab4f7a1ce5a24c29ce95f74c378211a2696506ce",
"size": "906",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "keystoneclient/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32004"
},
{
"name": "JavaScript",
"bytes": "14806"
},
{
"name": "Python",
"bytes": "1344730"
},
{
"name": "Shell",
"bytes": "22768"
}
],
"symlink_target": ""
}
|
import sqlite3
import StringIO
import pylab
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, make_response
# configuration
DATABASE = '/tmp/finance.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('FINANCE_APP_SETTINGS', silent=True)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
g.db.close()
def update_balances(balances, changes):
if(len(balances) != len(changes)):
raise Exception("balances must be same length as changes")
result = []
for i, bucket_balance in enumerate(balances):
new_balance = changes[i]['amountcents'] + bucket_balance['balancecents']
if(bucket_balance['bucketname'] != changes[i]['bucketname']):
raise Exception("ERROR: bucket name mismatch: %s vs %s" %
(bucket_balance['bucketname'], changes[i]['bucketname']))
result.append( {
'bucketname': bucket_balance['bucketname'],
'balancecents': new_balance,
'balancestring': cents_to_string( new_balance )
} )
return result
def get_balances_at(datetime=None):
# NOTE: datetime == None means: get current balances (ie, most recent)
# datetime == "init" means: get the earliest recorded balances
cur = g.db.execute('select bucketid, bucketname, initialbalancecents from buckets where buckettype = "internal"')
initial_balances = [
dict(
bucketname=row[1],
balancecents=row[2],
balancestring=cents_to_string(row[2])
) for row in cur.fetchall() ]
if(datetime == "init"):
return initial_balances
balances = [] + initial_balances
list_of_changes = get_changes_by_entry_and_bucket(start=None, end=datetime)
for changes in list_of_changes:
balances = update_balances(balances, changes)
return balances
def rangeDateQuery(baseQuery, start, end):
if start == None and end == None:
cur = g.db.execute(baseQuery)
elif start == None:
cur = g.db.execute(baseQuery + ' where date <= ?', [end])
elif end == None:
cur = g.db.execute(baseQuery + ' where date >= ?', [start])
else:
cur = g.db.execute(baseQuery + ' where date >= ? and date <= ?',
[start, end])
return cur
def get_changes_by_entry_and_bucket(start=None, end=None):
cur = rangeDateQuery('select entryid, bucketid_for_change, bucketname_for_change, amountcents, date ' +
'from entries_with_bucket_changes', start, end)
rows = cur.fetchall()
prev_entryid = None
result = []
for row in rows:
if(row[0] != prev_entryid):
result.append( [] )
result[-1].append( dict(
bucketname = row[2],
amountcents = row[3],
amountstring = cents_to_string(int(row[3])) if row[3] != 0 else '-'
) )
prev_entryid = row[0]
return result
def get_ending_balances_by_entry_and_bucket(start=None, end=None):
init_datetime = "init" if start == None else start
balances = get_balances_at(init_datetime)
result = []
list_of_changes = get_changes_by_entry_and_bucket(start, end)
for changes in list_of_changes:
balances = update_balances(balances, changes)
result.append(balances)
return result
def get_entries(start=None, end=None):
cur = rangeDateQuery('select description, amountcents, srcbucketname, srcbucketid, ' +
'destbucketname, destbucketid, entryid, date from entries_labeled', start, end)
entries = [
dict(
description=row[0],
amountstring=cents_to_string(row[1]),
srcbucket=str(row[2]),
srcbucketid=row[3],
destbucket=str(row[4]),
destbucketid=row[5],
entryid=row[6],
datetime=row[7]
) for row in cur.fetchall() ]
return entries
def get_entries_with_changes_and_balances(start=None, end=None):
entries = get_entries(start, end)
init_datetime = "init" if start == None else start
initial_balances = get_balances_at(init_datetime)
changes = get_changes_by_entry_and_bucket(start, end)
balances = get_ending_balances_by_entry_and_bucket(start, end)
for i in range(len(entries)):
entries[i]['balances'] = balances[i]
entries[i]['changes'] = changes[i]
return (entries, initial_balances)
@app.route('/show_entries')
@app.route('/')
def show_entries():
start = request.args.get('start', None)
end = request.args.get('end', None)
if start == '': start = None
if end == '': end = None
history_img_url = url_for('history_png', start=start, end=end)
balance_pie_img_url = url_for('balance_pie_png', datetime=end)
entries, initial_balances = get_entries_with_changes_and_balances(start, end)
return render_template('show_entries.html', entries=entries,
initial_balances=initial_balances, start=start, end=end,
history_img_url=history_img_url, balance_pie_img_url=balance_pie_img_url)
@app.route('/add_entry', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
srcbucket = bucketname_to_int(request.form['srcbucket'])
destbucket = bucketname_to_int(request.form['destbucket'])
g.db.execute('insert into entries (date, description, amountcents, srcbucket, destbucket) ' +
'values (?, ?, ?, ?, ?)',
[request.form['datetime'], request.form['description'], string_to_cents(request.form['amount']),
srcbucket, destbucket ])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/show_buckets')
def show_buckets():
cur = g.db.execute('select bucketname, initialbalancecents, net_change, finalbalancecents from ' +
'buckets_with_net_change where buckettype = "internal" order by bucketid asc')
buckets = [dict(name=row[0], initialbalancestring=cents_to_string(row[1]),
netchangestring=cents_to_string(row[2]),
finalbalancestring=cents_to_string(row[3]) )
for row in cur.fetchall()]
cur = g.db.execute('select bucketid, bucketname from buckets where buckettype = "proportion"')
proportionnames = [row[1] for row in cur.fetchall()]
numproportions = len(proportionnames)
cur = g.db.execute('select percent from bucket_proportion_combos')
i = 0
row = cur.fetchone()
while (row != None):
if(i % numproportions == 0):
buckets[i / numproportions]['proportions'] = []
buckets[i / numproportions]['proportions'] += [row[0]]
row = cur.fetchone()
i += 1
return render_template('show_buckets.html', buckets=buckets, proportionnames=proportionnames)
@app.route('/add_bucket', methods=['POST'])
def add_bucket():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into buckets (bucketname, buckettype, initialbalancecents) values (?, "internal", ?)',
[request.form['name'], string_to_cents(request.form['initialbalance'])])
g.db.commit()
flash('New bucket was successfully added')
return redirect(url_for('show_buckets'))
@app.route('/history.png')
def history_png():
start = request.args.get('start', None)
end = request.args.get('end', None)
if start == '': start = None
if end == '': end = None
entries, initial_balances = get_entries_with_changes_and_balances(start, end)
xvalues = pylab.arange(0, len(entries)+1, 1)
yvalues = [[initial_balance['balancecents'] / 100.0] for initial_balance in initial_balances]
seriesnames = [initial_balance['bucketname'] for initial_balance in initial_balances]
for e in entries:
for i, balance in enumerate(e['balances']):
yvalues[i] += [ balance['balancecents'] / 100.0]
series = []
for yv in yvalues:
series += [xvalues, yv]
pylab.clf() # clear current figure
pylab.plot(*series)
pylab.legend(seriesnames, 'lower right')
imgdata = StringIO.StringIO()
pylab.savefig(imgdata, format='png', dpi=80)
imgdata.seek(0)
response = make_response( imgdata.read() )
response.mimetype = 'image/png'
return response
@app.route('/balance_pie.png')
def balance_pie_png():
datetime = request.args.get('datetime', None)
if datetime == '': datetime = None
balances = get_balances_at(datetime=datetime)
values = [b['balancecents'] for b in balances]
labels = [b['bucketname'] for b in balances]
pylab.clf() # clear current figure
pylab.pie(values, labels=labels)
imgdata = StringIO.StringIO()
pylab.savefig(imgdata, format='png', dpi=60)
imgdata.seek(0)
response = make_response( imgdata.read() )
response.mimetype = 'image/png'
return response
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
def bucketname_to_int(name):
return g.db.execute('select bucketid from buckets where bucketname = ?', [name]) \
.fetchall()[0][0]
def cents_to_string(cents):
sign = ''
if(cents == None):
return 'None'
elif(cents < 0):
sign = '-'
return sign + "$%d.%02d" % (int(abs(cents)/100), abs(cents) % 100)
def string_to_cents(s):
multiplier = 1
if(len(s) > 0 and s[0] == '-'):
multiplier = -1
s = s[1:]
if(len(s) > 0 and s[0] == '$'):
s = s[1:]
cents = multiplier * int(float(s)*100)
return cents
if __name__ == '__main__':
app.run()
|
{
"content_hash": "3f3bbcef53ddb8b5c82a75a8b5b92997",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 117,
"avg_line_length": 33.10793650793651,
"alnum_prop": 0.622686738901141,
"repo_name": "buggi22/finance_app",
"id": "c438e27daa5a29a10cc1adb1d57f79c01c973bcb",
"size": "10439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "finance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10439"
},
{
"name": "Shell",
"bytes": "141"
}
],
"symlink_target": ""
}
|
"""
Tools for generating forms based on mongoengine Document schemas.
"""
import sys
import decimal
from bson import ObjectId
from operator import itemgetter
try:
from collections import OrderedDict
except ImportError:
# Use bson's SON implementation instead
from bson import SON as OrderedDict
from wtforms import fields as f, validators
from mongoengine import ReferenceField
from flask_mongoengine.wtf.fields import ModelSelectField, ModelSelectMultipleField, DictField, NoneStringField, BinaryField
from flask_mongoengine.wtf.models import ModelForm
__all__ = (
'model_fields', 'model_form',
)
def converts(*args):
def _inner(func):
func._converter_for = frozenset(args)
return func
return _inner
class ModelConverter(object):
def __init__(self, converters=None):
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
def convert(self, model, field, field_args):
kwargs = {
'label': getattr(field, 'verbose_name', field.name),
'description': getattr(field, 'help_text', None) or '',
'validators': getattr(field, 'validators', None) or [],
'filters': getattr(field, 'filters', None) or [],
'default': field.default,
}
if field_args:
kwargs.update(field_args)
if kwargs['validators']:
# Create a copy of the list since we will be modifying it.
kwargs['validators'] = list(kwargs['validators'])
if field.required:
kwargs['validators'].append(validators.InputRequired())
else:
kwargs['validators'].append(validators.Optional())
ftype = type(field).__name__
if field.choices:
kwargs['choices'] = field.choices
if ftype in self.converters:
kwargs["coerce"] = self.coerce(ftype)
if kwargs.pop('multiple', False):
return f.SelectMultipleField(**kwargs)
if kwargs.pop('radio', False):
return f.RadioField(**kwargs)
return f.SelectField(**kwargs)
ftype = type(field).__name__
if hasattr(field, 'to_form_field'):
return field.to_form_field(model, kwargs)
if ftype in self.converters:
return self.converters[ftype](model, field, kwargs)
@classmethod
def _string_common(cls, model, field, kwargs):
if field.max_length or field.min_length:
kwargs['validators'].append(
validators.Length(max=field.max_length or -1,
min=field.min_length or -1))
@classmethod
def _number_common(cls, model, field, kwargs):
if field.max_value or field.min_value:
kwargs['validators'].append(
validators.NumberRange(max=field.max_value,
min=field.min_value))
@converts('StringField')
def conv_String(self, model, field, kwargs):
if field.regex:
kwargs['validators'].append(validators.Regexp(regex=field.regex))
self._string_common(model, field, kwargs)
if kwargs.pop('password', False):
return f.PasswordField(**kwargs)
if kwargs.pop('textarea', False) or not field.max_length:
return f.TextAreaField(**kwargs)
return f.StringField(**kwargs)
@converts('URLField')
def conv_URL(self, model, field, kwargs):
kwargs['validators'].append(validators.URL())
self._string_common(model, field, kwargs)
return NoneStringField(**kwargs)
@converts('EmailField')
def conv_Email(self, model, field, kwargs):
kwargs['validators'].append(validators.Email())
self._string_common(model, field, kwargs)
return NoneStringField(**kwargs)
@converts('IntField')
def conv_Int(self, model, field, kwargs):
self._number_common(model, field, kwargs)
return f.IntegerField(**kwargs)
@converts('FloatField')
def conv_Float(self, model, field, kwargs):
self._number_common(model, field, kwargs)
return f.FloatField(**kwargs)
@converts('DecimalField')
def conv_Decimal(self, model, field, kwargs):
self._number_common(model, field, kwargs)
return f.DecimalField(**kwargs)
@converts('BooleanField')
def conv_Boolean(self, model, field, kwargs):
return f.BooleanField(**kwargs)
@converts('DateTimeField')
def conv_DateTime(self, model, field, kwargs):
return f.DateTimeField(**kwargs)
@converts('BinaryField')
def conv_Binary(self, model, field, kwargs):
# TODO: may be set file field that will save file`s data to MongoDB
if field.max_bytes:
kwargs['validators'].append(validators.Length(max=field.max_bytes))
return BinaryField(**kwargs)
@converts('DictField')
def conv_Dict(self, model, field, kwargs):
return DictField(**kwargs)
@converts('ListField')
def conv_List(self, model, field, kwargs):
if isinstance(field.field, ReferenceField):
return ModelSelectMultipleField(model=field.field.document_type, **kwargs)
if field.field.choices:
kwargs['multiple'] = True
return self.convert(model, field.field, kwargs)
field_args = kwargs.pop("field_args", {})
unbound_field = self.convert(model, field.field, field_args)
unacceptable = {
'validators': [],
'filters': [],
'min_entries': kwargs.get('min_entries', 0)
}
kwargs.update(unacceptable)
return f.FieldList(unbound_field, **kwargs)
@converts('SortedListField')
def conv_SortedList(self, model, field, kwargs):
# TODO: sort functionality, may be need sortable widget
return self.conv_List(model, field, kwargs)
@converts('GeoLocationField')
def conv_GeoLocation(self, model, field, kwargs):
# TODO: create geo field and widget (also GoogleMaps)
return
@converts('ObjectIdField')
def conv_ObjectId(self, model, field, kwargs):
return
@converts('EmbeddedDocumentField')
def conv_EmbeddedDocument(self, model, field, kwargs):
kwargs = {
'validators': [],
'filters': [],
'default': field.default or field.document_type_obj,
}
form_class = model_form(field.document_type_obj, field_args={})
return f.FormField(form_class, **kwargs)
@converts('ReferenceField')
def conv_Reference(self, model, field, kwargs):
return ModelSelectField(model=field.document_type, **kwargs)
@converts('GenericReferenceField')
def conv_GenericReference(self, model, field, kwargs):
return
def coerce(self, field_type):
coercions = {
"IntField": int,
"BooleanField": bool,
"FloatField": float,
"DecimalField": decimal.Decimal,
"ObjectIdField": ObjectId
}
if sys.version_info >= (3, 0):
return coercions.get(field_type, str)
else:
return coercions.get(field_type, unicode)
def model_fields(model, only=None, exclude=None, field_args=None, converter=None):
"""
Generate a dictionary of fields for a given database model.
See `model_form` docstring for description of parameters.
"""
from mongoengine.base import BaseDocument, DocumentMetaclass
if not isinstance(model, (BaseDocument, DocumentMetaclass)):
raise TypeError('model must be a mongoengine Document schema')
converter = converter or ModelConverter()
field_args = field_args or {}
if sys.version_info >= (3, 0):
names = ((k, v.creation_counter) for k, v in model._fields.items())
else:
names = ((k, v.creation_counter) for k, v in model._fields.iteritems())
field_names = [x for x in map(itemgetter(0), sorted(names, key=itemgetter(1)))]
if only:
field_names = [x for x in only if x in set(field_names)]
elif exclude:
field_names = [x for x in field_names if x not in set(exclude)]
field_dict = OrderedDict()
for name in field_names:
model_field = model._fields[name]
field = converter.convert(model, model_field, field_args.get(name))
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, base_class=ModelForm, only=None, exclude=None, field_args=None, converter=None):
"""
Create a wtforms Form for a given mongoengine Document schema::
from flask_mongoengine.wtf import model_form
from myproject.myapp.schemas import Article
ArticleForm = model_form(Article)
:param model:
A mongoengine Document schema class
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
field_dict = model_fields(model, only, exclude, field_args, converter)
field_dict['model_class'] = model
return type(model.__name__ + 'Form', (base_class,), field_dict)
|
{
"content_hash": "794ac00cf620f39090337cba924d9a03",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 124,
"avg_line_length": 35.496428571428574,
"alnum_prop": 0.6260187141563538,
"repo_name": "losintikfos/flask-mongoengine",
"id": "9cb2beca2b089fef9f746858364a17d75b9894de",
"size": "9939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_mongoengine/wtf/orm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6502"
},
{
"name": "Python",
"bytes": "79171"
}
],
"symlink_target": ""
}
|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import time
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/route -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_route
short_description: Create, modify, and idempotently manage openshift routes.
description:
- Manage openshift route objects programmatically.
options:
state:
description:
- State represents whether to create, modify, delete, or list
required: true
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: str
aliases: []
labels:
description:
- The labels to apply on the route
required: false
default: None
aliases: []
tls_termination:
description:
- The options for termination. e.g. reencrypt
required: false
default: None
aliases: []
dest_cacert_path:
description:
- The path to the dest_cacert
required: false
default: None
aliases: []
cacert_path:
description:
- The path to the cacert
required: false
default: None
aliases: []
cert_path:
description:
- The path to the cert
required: false
default: None
aliases: []
key_path:
description:
- The path to the key
required: false
default: None
aliases: []
dest_cacert_content:
description:
- The dest_cacert content
required: false
default: None
aliases: []
cacert_content:
description:
- The cacert content
required: false
default: None
aliases: []
cert_content:
description:
- The cert content
required: false
default: None
aliases: []
service_name:
description:
- The name of the service that this route points to.
required: false
default: None
aliases: []
host:
description:
- The host that the route will use. e.g. myapp.x.y.z
required: false
default: None
aliases: []
port:
description:
- The Name of the service port or number of the container port the route will route traffic to
required: false
default: None
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: Configure certificates for reencrypt route
oc_route:
name: myapproute
namespace: awesomeapp
cert_path: "/etc/origin/master/named_certificates/myapp_cert
key_path: "/etc/origin/master/named_certificates/myapp_key
cacert_path: "/etc/origin/master/named_certificates/myapp_cacert
dest_cacert_content: "{{ dest_cacert_content }}"
service_name: myapp_php
host: myapp.awesomeapp.openshift.com
tls_termination: reencrypt
run_once: true
'''
# -*- -*- -*- End included fragment: doc/route -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup_ext=None,
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
if backup_ext is None:
self.backup_ext = ".{}".format(time.strftime("%Y%m%dT%H%M%S"))
else:
self.backup_ext = backup_ext
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, '{}{}'.format(self.filename, self.backup_ext))
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.safe_load(str(invalue))
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
backup_ext=params['backup_ext'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-p')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
version = version[1:] # Remove the 'v' prefix
versions_dict[tech + '_numeric'] = version.split('+')[0]
# "3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.'))
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/route.py -*- -*- -*-
# noqa: E302,E301
# pylint: disable=too-many-instance-attributes
class RouteConfig(object):
''' Handle route options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
labels=None,
destcacert=None,
cacert=None,
cert=None,
key=None,
host=None,
tls_termination=None,
service_name=None,
wildcard_policy=None,
weight=None,
port=None):
''' constructor for handling route options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.labels = labels
self.host = host
self.tls_termination = tls_termination
self.destcacert = destcacert
self.cacert = cacert
self.cert = cert
self.key = key
self.service_name = service_name
self.port = port
self.data = {}
self.wildcard_policy = wildcard_policy
if wildcard_policy is None:
self.wildcard_policy = 'None'
self.weight = weight
if weight is None:
self.weight = 100
self.create_dict()
def create_dict(self):
''' return a service as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Route'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = self.labels
self.data['spec'] = {}
self.data['spec']['host'] = self.host
if self.tls_termination:
self.data['spec']['tls'] = {}
self.data['spec']['tls']['termination'] = self.tls_termination
if self.tls_termination != 'passthrough':
self.data['spec']['tls']['key'] = self.key
self.data['spec']['tls']['caCertificate'] = self.cacert
self.data['spec']['tls']['certificate'] = self.cert
if self.tls_termination == 'reencrypt':
self.data['spec']['tls']['destinationCACertificate'] = self.destcacert
self.data['spec']['to'] = {'kind': 'Service',
'name': self.service_name,
'weight': self.weight}
self.data['spec']['wildcardPolicy'] = self.wildcard_policy
if self.port:
self.data['spec']['port'] = {}
self.data['spec']['port']['targetPort'] = self.port
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Route(Yedit):
''' Class to wrap the oc command line tools '''
wildcard_policy = "spec.wildcardPolicy"
host_path = "spec.host"
port_path = "spec.port.targetPort"
service_path = "spec.to.name"
weight_path = "spec.to.weight"
cert_path = "spec.tls.certificate"
cacert_path = "spec.tls.caCertificate"
destcacert_path = "spec.tls.destinationCACertificate"
termination_path = "spec.tls.termination"
key_path = "spec.tls.key"
kind = 'route'
def __init__(self, content):
'''Route constructor'''
super(Route, self).__init__(content=content)
def get_destcacert(self):
''' return cert '''
return self.get(Route.destcacert_path)
def get_cert(self):
''' return cert '''
return self.get(Route.cert_path)
def get_key(self):
''' return key '''
return self.get(Route.key_path)
def get_cacert(self):
''' return cacert '''
return self.get(Route.cacert_path)
def get_service(self):
''' return service name '''
return self.get(Route.service_path)
def get_weight(self):
''' return service weight '''
return self.get(Route.weight_path)
def get_termination(self):
''' return tls termination'''
return self.get(Route.termination_path)
def get_host(self):
''' return host '''
return self.get(Route.host_path)
def get_port(self):
''' return port '''
return self.get(Route.port_path)
def get_wildcard_policy(self):
''' return wildcardPolicy '''
return self.get(Route.wildcard_policy)
# -*- -*- -*- End included fragment: lib/route.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_route.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCRoute(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'route'
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCRoute, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self._route = None
@property
def route(self):
''' property function for route'''
if not self._route:
self.get()
return self._route
@route.setter
def route(self, data):
''' setter function for route '''
self._route = data
def exists(self):
''' return whether a route exists '''
if self.route:
return True
return False
def get(self):
'''return route information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.route = Route(content=result['results'][0])
elif 'routes \"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
return self._replace_content(self.kind,
self.config.name,
self.config.data,
force=(self.config.host != self.route.get_host()))
def needs_update(self):
''' verify an update is needed '''
skip = []
return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=self.verbose)
@staticmethod
def get_cert_data(path, content):
'''get the data for a particular value'''
rval = None
if path and os.path.exists(path) and os.access(path, os.R_OK):
rval = open(path).read()
elif content:
rval = content
return rval
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params, check_mode=False):
''' run the idempotent asnible code
params comes from the ansible portion for this module
files: a dictionary for the certificates
{'cert': {'path': '',
'content': '',
'value': ''
}
}
check_mode: does the module support check mode. (module.check_mode)
'''
files = {'destcacert': {'path': params['dest_cacert_path'],
'content': params['dest_cacert_content'],
'value': None, },
'cacert': {'path': params['cacert_path'],
'content': params['cacert_content'],
'value': None, },
'cert': {'path': params['cert_path'],
'content': params['cert_content'],
'value': None, },
'key': {'path': params['key_path'],
'content': params['key_content'],
'value': None, }, }
if params['tls_termination'] and params['tls_termination'].lower() != 'passthrough': # E501
for key, option in files.items():
if not option['path'] and not option['content']:
continue
option['value'] = OCRoute.get_cert_data(option['path'], option['content']) # E501
if not option['value']:
return {'failed': True,
'msg': 'Verify that you pass a correct value for %s' % key}
rconfig = RouteConfig(params['name'],
params['namespace'],
params['kubeconfig'],
params['labels'],
files['destcacert']['value'],
files['cacert']['value'],
files['cert']['value'],
files['key']['value'],
params['host'],
params['tls_termination'],
params['service_name'],
params['wildcard_policy'],
params['weight'],
params['port'])
oc_route = OCRoute(rconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_route.get()
#####
# Get
#####
if state == 'list':
return {'changed': False,
'results': api_rval['results'],
'state': 'list'}
########
# Delete
########
if state == 'absent':
if oc_route.exists():
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} # noqa: E501
api_rval = oc_route.delete()
return {'changed': True, 'results': api_rval, 'state': "absent"} # noqa: E501
return {'changed': False, 'state': 'absent'}
if state == 'present':
########
# Create
########
if not oc_route.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} # noqa: E501
# Create it here
api_rval = oc_route.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
# return the created object
api_rval = oc_route.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
########
# Update
########
if oc_route.needs_update():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} # noqa: E501
api_rval = oc_route.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
# return the created object
api_rval = oc_route.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
return {'changed': False, 'results': api_rval, 'state': "present"}
# catch all
return {'failed': True, 'msg': "Unknown State passed"}
# -*- -*- -*- End included fragment: class/oc_route.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_route.py -*- -*- -*-
# pylint: disable=too-many-branches
def main():
'''
ansible oc module for route
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
labels=dict(default=None, type='dict'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
tls_termination=dict(default=None, type='str'),
dest_cacert_path=dict(default=None, type='str'),
cacert_path=dict(default=None, type='str'),
cert_path=dict(default=None, type='str'),
key_path=dict(default=None, type='str'),
dest_cacert_content=dict(default=None, type='str'),
cacert_content=dict(default=None, type='str'),
cert_content=dict(default=None, type='str'),
key_content=dict(default=None, type='str'),
service_name=dict(default=None, type='str'),
host=dict(default=None, type='str'),
wildcard_policy=dict(default=None, type='str'),
weight=dict(default=None, type='int'),
port=dict(default=None, type='int'),
),
mutually_exclusive=[('dest_cacert_path', 'dest_cacert_content'),
('cacert_path', 'cacert_content'),
('cert_path', 'cert_content'),
('key_path', 'key_content'), ],
supports_check_mode=True,
)
results = OCRoute.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_route.py -*- -*- -*-
|
{
"content_hash": "cc7f9c5af3314edd1fe3eb1695c95514",
"timestamp": "",
"source": "github",
"line_count": 1898,
"max_line_length": 118,
"avg_line_length": 33.94994731296101,
"alnum_prop": 0.528190325434145,
"repo_name": "joelddiaz/openshift-tools",
"id": "ce591db4982dda25ab0fe4a913c47f227f42eeda",
"size": "65599",
"binary": false,
"copies": "8",
"ref": "refs/heads/prod",
"path": "openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/library/oc_route.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Go",
"bytes": "382164"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "102550"
},
{
"name": "JavaScript",
"bytes": "1580"
},
{
"name": "Makefile",
"bytes": "3324"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "27782442"
},
{
"name": "Shell",
"bytes": "1378642"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
}
|
import unittest
import mock
from google.cloud.tasks_v2.types import Queue, Task
from airflow.providers.google.cloud.operators.tasks import (
CloudTasksQueueCreateOperator, CloudTasksQueueDeleteOperator, CloudTasksQueueGetOperator,
CloudTasksQueuePauseOperator, CloudTasksQueuePurgeOperator, CloudTasksQueueResumeOperator,
CloudTasksQueuesListOperator, CloudTasksQueueUpdateOperator, CloudTasksTaskCreateOperator,
CloudTasksTaskDeleteOperator, CloudTasksTaskGetOperator, CloudTasksTaskRunOperator,
CloudTasksTasksListOperator,
)
GCP_CONN_ID = "google_cloud_default"
PROJECT_ID = "test-project"
LOCATION = "asia-east2"
FULL_LOCATION_PATH = "projects/test-project/locations/asia-east2"
QUEUE_ID = "test-queue"
FULL_QUEUE_PATH = "projects/test-project/locations/asia-east2/queues/test-queue"
TASK_NAME = "test-task"
FULL_TASK_PATH = (
"projects/test-project/locations/asia-east2/queues/test-queue/tasks/test-task"
)
class TestCloudTasksQueueCreate(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_create_queue(self, mock_hook):
mock_hook.return_value.create_queue.return_value = {}
operator = CloudTasksQueueCreateOperator(
location=LOCATION, task_queue=Queue(), task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_queue.assert_called_once_with(
location=LOCATION,
task_queue=Queue(),
project_id=None,
queue_name=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksQueueUpdate(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_update_queue(self, mock_hook):
mock_hook.return_value.update_queue.return_value = {}
operator = CloudTasksQueueUpdateOperator(
task_queue=Queue(name=FULL_QUEUE_PATH), task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_queue.assert_called_once_with(
task_queue=Queue(name=FULL_QUEUE_PATH),
project_id=None,
location=None,
queue_name=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksQueueGet(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_get_queue(self, mock_hook):
mock_hook.return_value.get_queue.return_value = {}
operator = CloudTasksQueueGetOperator(
location=LOCATION, queue_name=QUEUE_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.get_queue.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksQueuesList(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_list_queues(self, mock_hook):
mock_hook.return_value.list_queues.return_value = {}
operator = CloudTasksQueuesListOperator(location=LOCATION, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.list_queues.assert_called_once_with(
location=LOCATION,
project_id=None,
results_filter=None,
page_size=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksQueueDelete(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_delete_queue(self, mock_hook):
mock_hook.return_value.delete_queue.return_value = {}
operator = CloudTasksQueueDeleteOperator(
location=LOCATION, queue_name=QUEUE_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_queue.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksQueuePurge(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_delete_queue(self, mock_hook):
mock_hook.return_value.purge_queue.return_value = {}
operator = CloudTasksQueuePurgeOperator(
location=LOCATION, queue_name=QUEUE_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.purge_queue.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksQueuePause(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_pause_queue(self, mock_hook):
mock_hook.return_value.pause_queue.return_value = {}
operator = CloudTasksQueuePauseOperator(
location=LOCATION, queue_name=QUEUE_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.pause_queue.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksQueueResume(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_resume_queue(self, mock_hook):
mock_hook.return_value.resume_queue.return_value = {}
operator = CloudTasksQueueResumeOperator(
location=LOCATION, queue_name=QUEUE_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.resume_queue.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksTaskCreate(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_create_task(self, mock_hook):
mock_hook.return_value.create_task.return_value = {}
operator = CloudTasksTaskCreateOperator(
location=LOCATION, queue_name=QUEUE_ID, task=Task(), task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_task.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
task=Task(),
project_id=None,
task_name=None,
response_view=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksTaskGet(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_get_task(self, mock_hook):
mock_hook.return_value.get_task.return_value = {}
operator = CloudTasksTaskGetOperator(
location=LOCATION, queue_name=QUEUE_ID, task_name=TASK_NAME, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.get_task.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
task_name=TASK_NAME,
project_id=None,
response_view=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksTasksList(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_list_tasks(self, mock_hook):
mock_hook.return_value.list_tasks.return_value = {}
operator = CloudTasksTasksListOperator(
location=LOCATION, queue_name=QUEUE_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.list_tasks.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
project_id=None,
response_view=None,
page_size=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksTaskDelete(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_delete_task(self, mock_hook):
mock_hook.return_value.delete_task.return_value = {}
operator = CloudTasksTaskDeleteOperator(
location=LOCATION, queue_name=QUEUE_ID, task_name=TASK_NAME, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_task.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
task_name=TASK_NAME,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudTasksTaskRun(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_run_task(self, mock_hook):
mock_hook.return_value.run_task.return_value = {}
operator = CloudTasksTaskRunOperator(
location=LOCATION, queue_name=QUEUE_ID, task_name=TASK_NAME, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.run_task.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
task_name=TASK_NAME,
project_id=None,
response_view=None,
retry=None,
timeout=None,
metadata=None,
)
|
{
"content_hash": "7c0cc0f98e42a9a925d94e4184a5e7bc",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 94,
"avg_line_length": 37.91843971631206,
"alnum_prop": 0.6417282334237352,
"repo_name": "wileeam/airflow",
"id": "fde9da1a65cf20718f1294be522ffd62c6d6ec6f",
"size": "11481",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/providers/google/cloud/operators/test_tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148281"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9763694"
},
{
"name": "Shell",
"bytes": "221331"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
from rhum.rhumlogging import get_logger
from enum import Enum
class ItemType(Enum):
SWITCH=0x00
PILOT_SWITCH=0x01
TEMP_SENSOR=0x10
SWITCH_SENSOR=0x11
class Item:
'''Item representation of all type of items'''
__logger = get_logger('rhum.messages.item.Item')
__type = None
__value = None
__id = None
def __init__(self, itemType, itemId, itemValue=None):
self.__type = itemType
self.__id = itemId
self.__value = itemValue
class Message:
def __init__(self):
self.message = 'test'
def build(self):
return True
|
{
"content_hash": "eb261068f56f440b2df27bc3621813be",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 57,
"avg_line_length": 20.5625,
"alnum_prop": 0.560790273556231,
"repo_name": "cvrignaud/rhum",
"id": "7922a16046c24fd33661b19026641eeeae1fca66",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rhum/messages/item.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31724"
}
],
"symlink_target": ""
}
|
"""Server startup routines."""
import logging
import os
import platform
from grr.lib import config_lib
from grr.lib import local
from grr.lib import log
from grr.lib import registry
from grr.lib import stats
# pylint: disable=g-import-not-at-top
if platform.system() != "Windows":
import pwd
# pylint: enable=g-import-not-at-top
def DropPrivileges():
"""Attempt to drop privileges if required."""
if config_lib.CONFIG["Server.username"]:
try:
os.setuid(pwd.getpwnam(config_lib.CONFIG["Server.username"]).pw_uid)
except (KeyError, OSError):
logging.exception("Unable to switch to user %s",
config_lib.CONFIG["Server.username"])
raise
# Make sure we do not reinitialize multiple times.
INIT_RAN = False
def Init():
"""Run all required startup routines and initialization hooks."""
global INIT_RAN
if INIT_RAN:
return
if hasattr(local, "stats"):
stats.STATS = local.stats.StatsCollector()
else:
stats.STATS = stats.StatsCollector()
# Set up a temporary syslog handler so we have somewhere to log problems
# with ConfigInit() which needs to happen before we can start our create our
# proper logging setup.
syslog_logger = logging.getLogger("TempLogger")
if os.path.exists("/dev/log"):
handler = logging.handlers.SysLogHandler(address="/dev/log")
else:
handler = logging.handlers.SysLogHandler()
syslog_logger.addHandler(handler)
try:
config_lib.SetPlatformArchContext()
config_lib.ParseConfigCommandLine()
except config_lib.Error:
syslog_logger.exception("Died during config initialization")
raise
log.ServerLoggingStartupInit()
registry.Init()
# Exempt config updater from this check because it is the one responsible for
# setting the variable.
if not config_lib.CONFIG.ContextApplied("ConfigUpdater Context"):
if not config_lib.CONFIG.Get("Server.initialized"):
raise RuntimeError("Config not initialized, run \"grr_config_updater"
" initialize\". If the server is already configured,"
" add \"Server.initialized: True\" to your config.")
INIT_RAN = True
|
{
"content_hash": "d5d5cc2c9de9450a6f0ba49c4050181c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 79,
"avg_line_length": 30.01388888888889,
"alnum_prop": 0.7006015733456733,
"repo_name": "destijl/grr",
"id": "6a3ec6b40ef07afb5f8e766399d6d236710c6598",
"size": "2183",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/lib/server_startup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3409"
},
{
"name": "C",
"bytes": "10658"
},
{
"name": "C++",
"bytes": "304794"
},
{
"name": "CMake",
"bytes": "3228"
},
{
"name": "CSS",
"bytes": "26524"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "173692"
},
{
"name": "JavaScript",
"bytes": "63181"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Protocol Buffer",
"bytes": "307091"
},
{
"name": "Python",
"bytes": "6407750"
},
{
"name": "Ruby",
"bytes": "5604"
},
{
"name": "Shell",
"bytes": "40334"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
}
|
from flask import request
from framework.auth import Auth, decorators
from framework.utils import iso8601format
from osf.utils.permissions import ADMIN
from website.registries import utils
from website import util
def _view_registries_landing_page(campaign=None, **kwargs):
"""Landing page for the various registrations"""
auth = kwargs['auth'] = Auth.from_kwargs(request.args.to_dict(), kwargs)
is_logged_in = kwargs['auth'].logged_in
if is_logged_in:
# Using contributor_to instead of contributor_to_or_group_member.
# You need to be an admin contributor to register a node
registerable_nodes = [
node for node
in auth.user.contributor_to
if node.has_permission(user=auth.user, permission=ADMIN)
]
has_projects = bool(registerable_nodes)
else:
has_projects = False
if campaign == 'registered_report':
campaign_url_param = 'osf-registered-reports'
elif campaign == 'prereg_challenge' or campaign == 'prereg':
campaign_url_param = 'prereg'
else:
campaign_url_param = ''
return {
'is_logged_in': is_logged_in,
'has_draft_registrations': bool(utils.drafts_for_user(auth.user, campaign)),
'has_projects': has_projects,
'campaign_long': utils.REG_CAMPAIGNS.get(campaign),
'campaign_short': campaign,
'sign_up_url': util.web_url_for('auth_register', _absolute=True, campaign=campaign_url_param, next=request.url),
}
def registered_reports_landing(**kwargs):
return _view_registries_landing_page('registered_report', **kwargs)
@decorators.must_be_logged_in
def draft_registrations(auth, **kwargs):
"""API endpoint; returns various draft registrations the user can resume their draft"""
campaign = kwargs.get('campaign', None)
drafts = utils.drafts_for_user(auth.user, campaign)
return {
'draftRegistrations': [
{
'dateUpdated': iso8601format(draft.datetime_updated),
'dateInitiated': iso8601format(draft.datetime_initiated),
'node': {
'title': draft.branched_from.title,
},
'initiator': {
'name': draft.initiator.fullname,
},
'url': draft.branched_from.web_url_for(
'edit_draft_registration_page',
draft_id=draft._id,
),
}
for draft in drafts
],
}
def registries_landing_page(**kwargs):
# placeholder for developer who don't have ember app set up.
return {}
|
{
"content_hash": "37130afbbc01630cd4d8904b85a66226",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 120,
"avg_line_length": 36.28767123287671,
"alnum_prop": 0.6153265383163458,
"repo_name": "saradbowman/osf.io",
"id": "6af3c09df3338d42fae94fe67715df54653aeb60",
"size": "2673",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "website/registries/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70340"
},
{
"name": "JavaScript",
"bytes": "2566970"
},
{
"name": "Python",
"bytes": "2154059"
}
],
"symlink_target": ""
}
|
from functools import reduce
from typing import NamedTuple, Tuple
from . import anndata
class ElementRef(NamedTuple):
parent: "anndata.AnnData"
attrname: str
keys: Tuple[str, ...] = ()
def __str__(self) -> str:
return f".{self.attrname}" + "".join(map(lambda x: f"['{x}']", self.keys))
@property
def _parent_el(self):
return reduce(
lambda d, k: d[k], self.keys[:-1], getattr(self.parent, self.attrname)
)
def get(self):
"""Get referenced value in self.parent."""
return reduce(lambda d, k: d[k], self.keys, getattr(self.parent, self.attrname))
def set(self, val):
"""Set referenced value in self.parent."""
self._parent_el[self.keys[-1]] = val
def delete(self):
del self._parent_el[self.keys[-1]]
|
{
"content_hash": "111604a3314ad445a1622ef48193ad18",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 27.266666666666666,
"alnum_prop": 0.5904645476772616,
"repo_name": "theislab/anndata",
"id": "7dacaa6dcd75c8f28c458d76b92752942dd1581d",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anndata/_core/access.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "493297"
}
],
"symlink_target": ""
}
|
"""
===============
Array Internals
===============
Internal organization of numpy arrays
=====================================
It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy".
NumPy arrays consist of two major components, the raw array data (from now on,
referred to as the data buffer), and the information about the raw array data.
The data buffer is typically what people think of as arrays in C or Fortran,
a contiguous (and fixed) block of memory containing fixed sized data items.
NumPy also contains a significant set of data that describes how to interpret
the data in the data buffer. This extra information contains (among other things):
1) The basic data element's size in bytes
2) The start of the data within the data buffer (an offset relative to the
beginning of the data buffer).
3) The number of dimensions and the size of each dimension
4) The separation between elements for each dimension (the 'stride'). This
does not have to be a multiple of the element size
5) The byte order of the data (which may not be the native byte order)
6) Whether the buffer is read-only
7) Information (via the dtype object) about the interpretation of the basic
data element. The basic data element may be as simple as a int or a float,
or it may be a compound object (e.g., struct-like), a fixed character field,
or Python object pointers.
8) Whether the array is to interpreted as C-order or Fortran-order.
This arrangement allow for very flexible use of arrays. One thing that it allows
is simple changes of the metadata to change the interpretation of the array buffer.
Changing the byteorder of the array is a simple change involving no rearrangement
of the data. The shape of the array can be changed very easily without changing
anything in the data buffer or any data copying at all
Among other things that are made possible is one can create a new array metadata
object that uses the same data buffer
to create a new view of that data buffer that has a different interpretation
of the buffer (e.g., different shape, offset, byte order, strides, etc) but
shares the same data bytes. Many operations in numpy do just this such as
slices. Other operations, such as transpose, don't move data elements
around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move.
Typically these new versions of the array metadata but the same data buffer are
new 'views' into the data buffer. There is a different ndarray object, but it
uses the same data buffer. This is why it is necessary to force copies through
use of the .copy() method if one really wants to make a new and independent
copy of the data buffer.
New views into arrays mean the object reference counts for the data buffer
increase. Simply doing away with the original array object will not remove the
data buffer if other views of it still exist.
Multidimensional Array Indexing Order Issues
============================================
What is the right way to index
multi-dimensional arrays? Before you jump to conclusions about the one and
true way to index multi-dimensional arrays, it pays to understand why this is
a confusing issue. This section will try to explain in detail how numpy
indexing works and why we adopt the convention we do for images, and when it
may be appropriate to adopt other conventions.
The first thing to understand is
that there are two conflicting conventions for indexing 2-dimensional arrays.
Matrix notation uses the first index to indicate which row is being selected and
the second index to indicate which column is selected. This is opposite the
geometrically oriented-convention for images where people generally think the
first index represents x position (i.e., column) and the second represents y
position (i.e., row). This alone is the source of much confusion;
matrix-oriented users and image-oriented users expect two different things with
regard to indexing.
The second issue to understand is how indices correspond
to the order the array is stored in memory. In Fortran the first index is the
most rapidly varying index when moving through the elements of a two
dimensional array as it is stored in memory. If you adopt the matrix
convention for indexing, then this means the matrix is stored one column at a
time (since the first index moves to the next row as it changes). Thus Fortran
is considered a Column-major language. C has just the opposite convention. In
C, the last index changes most rapidly as one moves through the array as
stored in memory. Thus C is a Row-major language. The matrix is stored by
rows. Note that in both cases it presumes that the matrix convention for
indexing is being used, i.e., for both Fortran and C, the first index is the
row. Note this convention implies that the indexing convention is invariant
and that the data order changes to keep that so.
But that's not the only way
to look at it. Suppose one has large two-dimensional arrays (images or
matrices) stored in data files. Suppose the data are stored by rows rather than
by columns. If we are to preserve our index convention (whether matrix or
image) that means that depending on the language we use, we may be forced to
reorder the data if it is read into memory to preserve our indexing
convention. For example if we read row-ordered data into memory without
reordering, it will match the matrix indexing convention for C, but not for
Fortran. Conversely, it will match the image indexing convention for Fortran,
but not for C. For C, if one is using data stored in row order, and one wants
to preserve the image index convention, the data must be reordered when
reading into memory.
In the end, which you do for Fortran or C depends on
which is more important, not reordering data or preserving the indexing
convention. For large images, reordering data is potentially expensive, and
often the indexing convention is inverted to avoid that.
The situation with
numpy makes this issue yet more complicated. The internal machinery of numpy
arrays is flexible enough to accept any ordering of indices. One can simply
reorder indices by manipulating the internal stride information for arrays
without reordering the data at all. NumPy will know how to map the new index
order to the data without moving the data.
So if this is true, why not choose
the index order that matches what you most expect? In particular, why not define
row-ordered images to use the image convention? (This is sometimes referred
to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN'
order options for array ordering in numpy.) The drawback of doing this is
potential performance penalties. It's common to access the data sequentially,
either implicitly in array operations or explicitly by looping over rows of an
image. When that is done, then the data will be accessed in non-optimal order.
As the first index is incremented, what is actually happening is that elements
spaced far apart in memory are being sequentially accessed, with usually poor
memory access speeds. For example, for a two dimensional image 'im' defined so
that im[0, 10] represents the value at x=0, y=10. To be consistent with usual
Python behavior then im[0] would represent a column at x=0. Yet that data
would be spread over the whole array since the data are stored in row order.
Despite the flexibility of numpy's indexing, it can't really paper over the fact
basic operations are rendered inefficient because of data order or that getting
contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs
im[0]), thus one can't use an idiom such as for row in im; for col in im does
work, but doesn't yield contiguous column data.
As it turns out, numpy is
smart enough when dealing with ufuncs to determine which index is the most
rapidly varying one in memory and uses that for the innermost loop. Thus for
ufuncs there is no large intrinsic advantage to either approach in most cases.
On the other hand, use of .flat with an FORTRAN ordered array will lead to
non-optimal memory access as adjacent elements in the flattened array (iterator,
actually) are not contiguous in memory.
Indeed, the fact is that Python
indexing on lists and other sequences naturally leads to an outside-to inside
ordering (the first index gets the largest grouping, the next the next largest,
and the last gets the smallest element). Since image data are normally stored
by rows, this corresponds to position within rows being the last item indexed.
If you do want to use Fortran ordering realize that
there are two approaches to consider: 1) accept that the first index is just not
the most rapidly changing in memory and have all your I/O routines reorder
your data when going from memory to disk or visa versa, or use numpy's
mechanism for mapping the first index to the most rapidly varying data. We
recommend the former if possible. The disadvantage of the latter is that many
of numpy's functions will yield arrays without Fortran ordering unless you are
careful to use the 'order' keyword. Doing this would be highly inconvenient.
Otherwise we recommend simply learning to reverse the usual order of indices
when accessing elements of an array. Granted, it goes against the grain, but
it is more in line with Python semantics and the natural order of the data.
"""
|
{
"content_hash": "3b4922a92f15bb404f23a2cc5cd41556",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 259,
"avg_line_length": 59.28395061728395,
"alnum_prop": 0.7859225322782174,
"repo_name": "WarrenWeckesser/numpy",
"id": "6718f1108dc44f15803bada0a223df3dc0b51830",
"size": "9604",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "numpy/doc/internals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9059444"
},
{
"name": "C++",
"bytes": "174989"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8313055"
},
{
"name": "Shell",
"bytes": "9612"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
import logging
import pendulum
from pynamodb.attributes import (UnicodeAttribute, UTCDateTimeAttribute)
from pynamodb.exceptions import DoesNotExist
from pynamodb.models import Model
from . import BaseStorage
log = logging.getLogger(__name__)
class FeedState(Model):
class Meta:
table_name = 'RSSAlertbotFeeds'
write_capacity_units = 1
read_capacity_units = 1
name = UnicodeAttribute(hash_key=True)
last_run = UTCDateTimeAttribute()
class DynamoStorage(BaseStorage):
"""
Base class for storing state.
"""
not_found_exception_class = DoesNotExist
def __init__(self, table=None, url=None, region='us-east-1'):
self.url = url
self.table = table
self.region = region
# if url:
# log.warning(f"Using DynamoDB url: {url}")
# if table:
# log.warning(f"Using DynamoDB table: {table}")
FeedState.create_table()
# def create_table(self):
# self.client.create_table(
# TableName = self.table_name,
# AttributeDefinitions = [
# {'AttributeName': 'name', 'AttributeType': 'S'}
# {'AttributeName': 'last_run', 'AttributeType': 'S'}
# ],
# KeySchema = [
# {'AttributeName': 'name', 'KeyType': 'HASH'}
# ],
# ProvisionedThroughput={
# 'ReadCapacityUnits': 1,
# 'WriteCapacityUnits': 1
# },
# )
def _read(self, name):
obj = FeedState.get(name)
return pendulum.instance(obj.last_run)
def _write(self, name, date):
try:
obj = FeedState.get(name)
except DoesNotExist:
obj = FeedState(name=name)
obj.last_run = date
obj.save()
log.debug(f"Saved date for '{name}'")
def _delete(self, name):
obj = FeedState.get(name)
obj.delete()
|
{
"content_hash": "718ad137773b0355d8d83563cd90142b",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 72,
"avg_line_length": 24.91025641025641,
"alnum_prop": 0.5625321667524447,
"repo_name": "jwplayer/rssalertbot",
"id": "5774d2d304042b2a2395826dd4ac85e3e0a70459",
"size": "1943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rssalertbot/storage/dynamo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1037"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "57574"
},
{
"name": "Shell",
"bytes": "33"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
from app.users.serializers import UserSerializer
class Command(BaseCommand):
def handle(self, *args, **options):
users = [
{'username': 'andy', 'email': 'andy.krivovjas@gmail.com', 'first_name': 'Andy', 'last_name': 'Krivovjas', 'password': 'password_web', 'is_superuser': True}
]
for item in users:
user = UserSerializer.register(item)
if item.get('is_superuser'):
user.is_superuser = True
user.is_staff = True
user.is_admin = True
user.save()
|
{
"content_hash": "b811f0f8f85170621e8911a675a09414",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 167,
"avg_line_length": 36.94117647058823,
"alnum_prop": 0.5780254777070064,
"repo_name": "AndyKrivovjas/notes",
"id": "9d939d99e76acd643e28216d77e20c6eff81aa93",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/api/management/commands/create_users.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12493"
},
{
"name": "HTML",
"bytes": "33429"
},
{
"name": "JavaScript",
"bytes": "7490"
},
{
"name": "Makefile",
"bytes": "363"
},
{
"name": "Python",
"bytes": "43761"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
import sys
import os
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
scriptpath = os.path.dirname(os.path.realpath(sys.argv[0])) + '/../'
sys.path.append(os.path.abspath(scriptpath))
import utils
parameter_str = '_'.join(['top', str(utils.k), 'cw', str(utils.click_weight), 'year', utils.train_year])
train = joblib.load(utils.processed_data_path + 'train_is_booking_all_' + parameter_str +'.pkl')
X_train = train.ix[:,2:]
y_train = train['hotel_cluster'].astype(int)
print "train RandomForest Classifier..."
cforest = RandomForestClassifier(n_estimators=32, max_depth=50, min_samples_split=50, min_samples_leaf=5,
random_state=0, verbose=1, n_jobs=-1)
cforest.fit(X_train, y_train)
joblib.dump(cforest, utils.model_path + 'rf_all_'+ parameter_str + '.pkl')
|
{
"content_hash": "0e4462cccfe5b910df219b8b07b98eb7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 106,
"avg_line_length": 32.61538461538461,
"alnum_prop": 0.7264150943396226,
"repo_name": "parkerzf/kaggle-expedia",
"id": "56edc963c0082f97ce2a083cc85bf0dc070d7ef6",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/model/train_rf_model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "19295799"
},
{
"name": "Jupyter Notebook",
"bytes": "244320"
},
{
"name": "Makefile",
"bytes": "1522"
},
{
"name": "PLpgSQL",
"bytes": "3353"
},
{
"name": "Python",
"bytes": "38383"
},
{
"name": "Shell",
"bytes": "438"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('herders', '0028_auto_20210320_1357'),
]
operations = [
migrations.AddField(
model_name='summoner',
name='dark_mode',
field=models.BooleanField(blank=True, default=False),
),
]
|
{
"content_hash": "0cc07823f4622d194b6dda598bf8044d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 65,
"avg_line_length": 22.1875,
"alnum_prop": 0.5830985915492958,
"repo_name": "porksmash/swarfarm",
"id": "d1b1ec04881c1d88d404e150d3edf4624329fde9",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "herders/migrations/0029_summoner_dark_mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "29358"
},
{
"name": "HTML",
"bytes": "349774"
},
{
"name": "JavaScript",
"bytes": "80827"
},
{
"name": "Python",
"bytes": "932930"
},
{
"name": "Shell",
"bytes": "3018"
}
],
"symlink_target": ""
}
|
"""Utility functions not closely tied to other spec_tools types."""
# Copyright (c) 2018-2019 Collabora, Ltd.
# Copyright (c) 2013-2020 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def getElemName(elem, default=None):
"""Get the name associated with an element, either a name child or name attribute."""
name_elem = elem.find('name')
if name_elem is not None:
return name_elem.text
# Fallback if there is no child.
return elem.get('name', default)
def getElemType(elem, default=None):
"""Get the type associated with an element, either a type child or type attribute."""
type_elem = elem.find('type')
if type_elem is not None:
return type_elem.text
# Fallback if there is no child.
return elem.get('type', default)
def findFirstWithPredicate(collection, pred):
"""Return the first element that satisfies the predicate, or None if none exist.
NOTE: Some places where this is used might be better served by changing to a dictionary.
"""
for elt in collection:
if pred(elt):
return elt
return None
def findNamedElem(elems, name):
"""Traverse a collection of elements with 'name' nodes or attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemName(elem) == name)
def findTypedElem(elems, typename):
"""Traverse a collection of elements with 'type' nodes or attributes, looking for and returning one with the right typename.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemType(elem) == typename)
def findNamedObject(collection, name):
"""Traverse a collection of elements with 'name' attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(collection, lambda elt: elt.name == name)
|
{
"content_hash": "0bb949c5702f51ac3c79e735e0c59d65",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 128,
"avg_line_length": 39.01470588235294,
"alnum_prop": 0.7218243497926875,
"repo_name": "endlessm/chromium-browser",
"id": "c16ec4d8d39647caccf55bbc2984dbff0a7900c2",
"size": "2653",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/vulkan_headers/registry/spec_tools/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.test import TestCase
from django.test import Client
from django.core.urlresolvers import reverse
from .models import Project
class ProjectsTests(TestCase):
def setUp(self):
"""
Setup a test project.
"""
self.testProject = Project(title = "test title", description = "test description")
self.testProject.save()
def testIndexView(self):
"""
Test to see if the index view for projects are loaded.
"""
response = self.client.get(reverse('projects:index'))
statusCodeOk = 200
self.assertEqual(response.status_code, statusCodeOk)
def testNewProjectIntoDb(self):
"""
Check if number of projects larger than one.
A test project should be added in setUp() and deleted in tearDown()
"""
numProjects = len(Project.objects.all())
leastNumProjects = 1
self.assertGreaterEqual(numProjects, leastNumProjects)
def testOnlyOneProject(self):
"""
Check to see if only 1 project in db
"""
numProjects = len(Project.objects.all())
expectedNumbProjects = 1
self.assertEqual(numProjects, expectedNumbProjects)
def tearDown(self):
"""
Delete test project from db
"""
self.testProject.delete()
pass
|
{
"content_hash": "5c59590ae613eeb1e5b14d3afcc345c1",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 90,
"avg_line_length": 29.23913043478261,
"alnum_prop": 0.6260223048327137,
"repo_name": "mariufa/DjangoTesting",
"id": "90b5f3aa56af4fbf920b705044191616ea31055f",
"size": "1345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangoTesting/projects/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3403"
},
{
"name": "Python",
"bytes": "10689"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TestModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('for_inline', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inline_test_models', to='select2_tagging.TestModel')),
],
),
]
|
{
"content_hash": "8181870e8b8529fdee048881b3e57d25",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 185,
"avg_line_length": 30.434782608695652,
"alnum_prop": 0.62,
"repo_name": "luzfcb/django-autocomplete-light",
"id": "2b0b4741a638816ddb39587a61210cc9132669d0",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/select2_tagging/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "166"
},
{
"name": "HTML",
"bytes": "4331"
},
{
"name": "JavaScript",
"bytes": "7815"
},
{
"name": "Python",
"bytes": "146531"
},
{
"name": "Shell",
"bytes": "1031"
}
],
"symlink_target": ""
}
|
import threading
import time
import tensorflow as tf
from tensorboard.plugins.debugger_v2 import debug_data_multiplexer
mock = tf.compat.v1.test.mock
class RunInBackgroundRepeatedlyTest(tf.test.TestCase):
def testRunInBackgroundRepeatedlyThreeTimes(self):
state = {"counter": 0}
def run_three_times():
state["counter"] += 1
if state["counter"] == 3:
raise StopIteration()
OriginalThread = threading.Thread
with mock.patch.object(
threading,
"Thread",
# Use a non-daemon thread for testing. A non-daemon thread
# will block the test process from exiting if not terminated
# properly. Here the thread is expected to be terminated by the
# `StopIteration` raised by `run_three_times()`.
lambda target, daemon: OriginalThread(target=target, daemon=False),
):
(
interrupt_event,
thread,
) = debug_data_multiplexer.run_repeatedly_in_background(
run_three_times,
None, # `interval_sec is None` means indefinite wait()
)
interrupt_event.set()
time.sleep(0.05)
interrupt_event.set()
time.sleep(0.05)
interrupt_event.set()
thread.join()
self.assertEqual(state["counter"], 3)
class ParseTensorNameTest(tf.test.TestCase):
def testParseTensorNameWithNoOutputSlot(self):
op_name, slot = debug_data_multiplexer.parse_tensor_name("MatMul_1")
self.assertEqual(op_name, "MatMul_1")
self.assertEqual(slot, 0)
def testParseTensorNameWithZeroOutputSlot(self):
op_name, slot = debug_data_multiplexer.parse_tensor_name("MatMul_1:0")
self.assertEqual(op_name, "MatMul_1")
self.assertEqual(slot, 0)
def testParseTensorNameWithNonZeroOutputSlot(self):
op_name, slot = debug_data_multiplexer.parse_tensor_name("Unpack:10")
self.assertEqual(op_name, "Unpack")
self.assertEqual(slot, 10)
def testParseTensorNameWithInvalidSlotRaisesValueError(self):
with self.assertRaises(ValueError):
debug_data_multiplexer.parse_tensor_name("Unpack:10:10")
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "f1944a10a9d8d983a9b89dd1e641e0a3",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 34.220588235294116,
"alnum_prop": 0.6214009454232918,
"repo_name": "tensorflow/tensorboard",
"id": "6fc60b7c02c9615253af505680139cc0088cd92b",
"size": "3017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorboard/plugins/debugger_v2/debug_data_multiplexer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16222"
},
{
"name": "Dockerfile",
"bytes": "1226"
},
{
"name": "HTML",
"bytes": "154824"
},
{
"name": "Java",
"bytes": "20643"
},
{
"name": "JavaScript",
"bytes": "11869"
},
{
"name": "Jupyter Notebook",
"bytes": "7697"
},
{
"name": "Python",
"bytes": "2922179"
},
{
"name": "Rust",
"bytes": "311041"
},
{
"name": "SCSS",
"bytes": "136834"
},
{
"name": "Shell",
"bytes": "36731"
},
{
"name": "Starlark",
"bytes": "541743"
},
{
"name": "TypeScript",
"bytes": "5930550"
}
],
"symlink_target": ""
}
|
"""
This test script is adopted from:
https://github.com/numpy/numpy/blob/main/numpy/tests/test_public_api.py
"""
import pkgutil
import types
import importlib
import warnings
import scipy
def check_dir(module, module_name=None):
"""Returns a mapping of all objects with the wrong __module__ attribute."""
if module_name is None:
module_name = module.__name__
results = {}
for name in dir(module):
item = getattr(module, name)
if (hasattr(item, '__module__') and hasattr(item, '__name__')
and item.__module__ != module_name):
results[name] = item.__module__ + '.' + item.__name__
return results
def test_dir_testing():
"""Assert that output of dir has only one "testing/tester"
attribute without duplicate"""
assert len(dir(scipy)) == len(set(dir(scipy)))
# Historically SciPy has not used leading underscores for private submodules
# much. This has resulted in lots of things that look like public modules
# (i.e. things that can be imported as `import scipy.somesubmodule.somefile`),
# but were never intended to be public. The PUBLIC_MODULES list contains
# modules that are either public because they were meant to be, or because they
# contain public functions/objects that aren't present in any other namespace
# for whatever reason and therefore should be treated as public.
PUBLIC_MODULES = ["scipy." + s for s in [
"cluster",
"cluster.vq",
"cluster.hierarchy",
"constants",
"fft",
"fftpack",
"integrate",
"interpolate",
"io",
"io.arff",
"io.matlab",
"io.wavfile",
"linalg",
"linalg.blas",
"linalg.cython_blas",
"linalg.lapack",
"linalg.cython_lapack",
"linalg.interpolative",
"misc",
"ndimage",
"odr",
"optimize",
"signal",
"signal.windows",
"sparse",
"sparse.linalg",
"sparse.csgraph",
"spatial",
"spatial.distance",
"spatial.transform",
"special",
"stats",
"stats.contingency",
"stats.distributions",
"stats.mstats",
"stats.qmc",
"stats.sampling"
]]
# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
# of underscores) but should not be used. For many of those modules the
# current status is fine. For others it may make sense to work on making them
# private, to clean up our public API and avoid confusion.
# These private modules support will be removed in SciPy v2.0.0
PRIVATE_BUT_PRESENT_MODULES = [
'scipy.constants.codata',
'scipy.constants.constants',
'scipy.fftpack.basic',
'scipy.fftpack.convolve',
'scipy.fftpack.helper',
'scipy.fftpack.pseudo_diffs',
'scipy.fftpack.realtransforms',
'scipy.integrate.odepack',
'scipy.integrate.quadpack',
'scipy.integrate.dop',
'scipy.integrate.lsoda',
'scipy.integrate.vode',
'scipy.interpolate.dfitpack',
'scipy.interpolate.fitpack',
'scipy.interpolate.fitpack2',
'scipy.interpolate.interpnd',
'scipy.interpolate.interpolate',
'scipy.interpolate.ndgriddata',
'scipy.interpolate.polyint',
'scipy.interpolate.rbf',
'scipy.io.arff.arffread',
'scipy.io.harwell_boeing',
'scipy.io.idl',
'scipy.io.mmio',
'scipy.io.netcdf',
'scipy.io.matlab.byteordercodes',
'scipy.io.matlab.mio',
'scipy.io.matlab.mio4',
'scipy.io.matlab.mio5',
'scipy.io.matlab.mio5_params',
'scipy.io.matlab.mio5_utils',
'scipy.io.matlab.mio_utils',
'scipy.io.matlab.miobase',
'scipy.io.matlab.streams',
'scipy.linalg.basic',
'scipy.linalg.decomp',
'scipy.linalg.decomp_cholesky',
'scipy.linalg.decomp_lu',
'scipy.linalg.decomp_qr',
'scipy.linalg.decomp_schur',
'scipy.linalg.decomp_svd',
'scipy.linalg.flinalg',
'scipy.linalg.matfuncs',
'scipy.linalg.misc',
'scipy.linalg.special_matrices',
'scipy.misc.common',
'scipy.misc.doccer',
'scipy.ndimage.filters',
'scipy.ndimage.fourier',
'scipy.ndimage.interpolation',
'scipy.ndimage.measurements',
'scipy.ndimage.morphology',
'scipy.odr.models',
'scipy.odr.odrpack',
'scipy.optimize.cobyla',
'scipy.optimize.cython_optimize',
'scipy.optimize.lbfgsb',
'scipy.optimize.linesearch',
'scipy.optimize.minpack',
'scipy.optimize.minpack2',
'scipy.optimize.moduleTNC',
'scipy.optimize.nonlin',
'scipy.optimize.optimize',
'scipy.optimize.slsqp',
'scipy.optimize.tnc',
'scipy.optimize.zeros',
'scipy.signal.bsplines',
'scipy.signal.filter_design',
'scipy.signal.fir_filter_design',
'scipy.signal.lti_conversion',
'scipy.signal.ltisys',
'scipy.signal.signaltools',
'scipy.signal.spectral',
'scipy.signal.spline',
'scipy.signal.waveforms',
'scipy.signal.wavelets',
'scipy.signal.windows.windows',
'scipy.sparse.base',
'scipy.sparse.bsr',
'scipy.sparse.compressed',
'scipy.sparse.construct',
'scipy.sparse.coo',
'scipy.sparse.csc',
'scipy.sparse.csr',
'scipy.sparse.data',
'scipy.sparse.dia',
'scipy.sparse.dok',
'scipy.sparse.extract',
'scipy.sparse.lil',
'scipy.sparse.linalg.dsolve',
'scipy.sparse.linalg.eigen',
'scipy.sparse.linalg.interface',
'scipy.sparse.linalg.isolve',
'scipy.sparse.linalg.matfuncs',
'scipy.sparse.sparsetools',
'scipy.sparse.spfuncs',
'scipy.sparse.sputils',
'scipy.spatial.ckdtree',
'scipy.spatial.kdtree',
'scipy.spatial.qhull',
'scipy.spatial.transform.rotation',
'scipy.special.add_newdocs',
'scipy.special.basic',
'scipy.special.cython_special',
'scipy.special.orthogonal',
'scipy.special.sf_error',
'scipy.special.specfun',
'scipy.special.spfun_stats',
'scipy.stats.biasedurn',
'scipy.stats.kde',
'scipy.stats.morestats',
'scipy.stats.mstats_basic',
'scipy.stats.mstats_extras',
'scipy.stats.mvn',
'scipy.stats.statlib',
'scipy.stats.stats',
]
def is_unexpected(name):
"""Check if this needs to be considered."""
if '._' in name or '.tests' in name or '.setup' in name:
return False
if name in PUBLIC_MODULES:
return False
if name in PRIVATE_BUT_PRESENT_MODULES:
return False
return True
SKIP_LIST = [
'scipy.conftest',
'scipy.version',
]
def test_all_modules_are_expected():
"""
Test that we don't add anything that looks like a new public module by
accident. Check is based on filenames.
"""
modnames = []
for _, modname, ispkg in pkgutil.walk_packages(path=scipy.__path__,
prefix=scipy.__name__ + '.',
onerror=None):
if is_unexpected(modname) and modname not in SKIP_LIST:
# We have a name that is new. If that's on purpose, add it to
# PUBLIC_MODULES. We don't expect to have to add anything to
# PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
modnames.append(modname)
if modnames:
raise AssertionError(f'Found unexpected modules: {modnames}')
# Stuff that clearly shouldn't be in the API and is detected by the next test
# below
SKIP_LIST_2 = [
'scipy.char',
'scipy.rec',
'scipy.emath',
'scipy.math',
'scipy.random',
'scipy.ctypeslib',
'scipy.ma'
]
def test_all_modules_are_expected_2():
"""
Method checking all objects. The pkgutil-based method in
`test_all_modules_are_expected` does not catch imports into a namespace,
only filenames.
"""
def find_unexpected_members(mod_name):
members = []
module = importlib.import_module(mod_name)
if hasattr(module, '__all__'):
objnames = module.__all__
else:
objnames = dir(module)
for objname in objnames:
if not objname.startswith('_'):
fullobjname = mod_name + '.' + objname
if isinstance(getattr(module, objname), types.ModuleType):
if is_unexpected(fullobjname) and fullobjname not in SKIP_LIST_2:
members.append(fullobjname)
return members
unexpected_members = find_unexpected_members("scipy")
for modname in PUBLIC_MODULES:
unexpected_members.extend(find_unexpected_members(modname))
if unexpected_members:
raise AssertionError("Found unexpected object(s) that look like "
"modules: {}".format(unexpected_members))
def test_api_importable():
"""
Check that all submodules listed higher up in this file can be imported
Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
simply need to be removed from the list (deprecation may or may not be
needed - apply common sense).
"""
def check_importable(module_name):
try:
importlib.import_module(module_name)
except (ImportError, AttributeError):
return False
return True
module_names = []
for module_name in PUBLIC_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules in the public API that cannot be "
"imported: {}".format(module_names))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', category=DeprecationWarning)
warnings.filterwarnings('always', category=ImportWarning)
for module_name in PRIVATE_BUT_PRESENT_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules that are not really public but looked "
"public and can not be imported: "
"{}".format(module_names))
|
{
"content_hash": "73cec19952d32e8335118cf03f993a72",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 85,
"avg_line_length": 30.54153846153846,
"alnum_prop": 0.6345960104775338,
"repo_name": "zerothi/scipy",
"id": "657ca7e08e4090c7045ddad5b77b57a513f6e895",
"size": "9926",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "scipy/_lib/tests/test_public_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4306215"
},
{
"name": "C++",
"bytes": "3692292"
},
{
"name": "Fortran",
"bytes": "5573034"
},
{
"name": "HTML",
"bytes": "124330"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "76425"
},
{
"name": "Python",
"bytes": "10541152"
},
{
"name": "Shell",
"bytes": "2218"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
"""django_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
{
"content_hash": "21c1e8240ac187bb0943e5866696ba21",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.7011795543905636,
"repo_name": "FactoryBoy/factory_boy",
"id": "a8b95abbe71c85b69ca3b2573bf7a1ee2d3985d1",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/django_demo/django_demo/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3337"
},
{
"name": "Python",
"bytes": "330382"
}
],
"symlink_target": ""
}
|
"""
Provides small event framework
"""
from __future__ import absolute_import, division, unicode_literals
import logging
log = logging.getLogger('event')
_events = {}
class Event(object):
"""Represents one registered event."""
def __init__(self, name, func, priority=128):
self.name = name
self.func = func
self.priority = priority
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __eq__(self, other):
return self.priority == other.priority
def __lt__(self, other):
return self.priority < other.priority
def __gt__(self, other):
return self.priority > other.priority
def __str__(self):
return '<Event(name=%s,func=%s,priority=%s)>' % (self.name, self.func.__name__, self.priority)
__repr__ = __str__
def event(name, priority=128):
"""Register event to function with a decorator"""
def decorator(func):
add_event_handler(name, func, priority)
return func
return decorator
def get_events(name):
"""
:param String name: event name
:return: List of :class:`Event` for *name* ordered by priority
"""
if name not in _events:
raise KeyError('No such event %s' % name)
_events[name].sort(reverse=True)
return _events[name]
def add_event_handler(name, func, priority=128):
"""
:param string name: Event name
:param function func: Function that acts as event handler
:param priority: Priority for this hook
:return: Event created
:rtype: Event
:raises Exception: If *func* is already registered in an event
"""
events = _events.setdefault(name, [])
for event in events:
if event.func == func:
raise ValueError('%s has already been registered as event listener under name %s' % (func.__name__, name))
log.trace('registered function %s to event %s' % (func.__name__, name))
event = Event(name, func, priority)
events.append(event)
return event
def remove_event_handlers(name):
"""Removes all handlers for given event `name`."""
_events.pop(name, None)
def remove_event_handler(name, func):
"""Remove `func` from the handlers for event `name`."""
for e in list(_events.get(name, [])):
if e.func is func:
_events[name].remove(e)
def fire_event(name, *args, **kwargs):
"""
Trigger an event with *name*. If event is not hooked by anything nothing happens. If a function that hooks an event
returns a value, it will replace the first argument when calling next function.
:param name: Name of event to be called
:param args: List of arguments passed to handler function
:param kwargs: Key Value arguments passed to handler function
"""
if name not in _events:
return
for event in get_events(name):
result = event(*args, **kwargs)
if result is not None:
args = (result,) + args[1:]
return args and args[0]
|
{
"content_hash": "766ad95e61d4a5419db335941282dd36",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 119,
"avg_line_length": 28.18867924528302,
"alnum_prop": 0.6295180722891566,
"repo_name": "cvium/Flexget",
"id": "69e0e2db59c06d38e4cae5dff0a2e3bc4d664c94",
"size": "2988",
"binary": false,
"copies": "13",
"ref": "refs/heads/develop",
"path": "flexget/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4878"
},
{
"name": "HTML",
"bytes": "28181"
},
{
"name": "JavaScript",
"bytes": "44914"
},
{
"name": "Python",
"bytes": "2383289"
}
],
"symlink_target": ""
}
|
import DOM
class UIObject:
def getElement(self):
return self.element
def setElement(self, element):
self.element = element
def setStyleName(self, style):
DOM.setAttribute(self.element, "className", style)
class Widget(UIObject):
def setParent(self, parent):
self.parent = parent
class Panel(Widget):
pass
class ComplexPanel(Panel):
def __init__(self):
self.children = []
def add(self, widget):
self.children.append(widget)
widget.setParent(self)
return True
class AbsolutePanel(ComplexPanel):
def __init__(self):
ComplexPanel.__init__(self)
self.setElement(DOM.createDiv())
DOM.setStyleAttribute(self.getElement(), "overflow", "hidden")
def add(self, widget):
ComplexPanel.add(self, widget)
DOM.appendChild(self.getElement(), widget.getElement())
return True
|
{
"content_hash": "7f34f92cbdd8179a708feb26aa2f7e7e",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 70,
"avg_line_length": 20.26086956521739,
"alnum_prop": 0.6255364806866953,
"repo_name": "andreyvit/pyjamas",
"id": "d994e4ffea40ee660bd4709a67db32f033efd227",
"size": "932",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pyjs/tests/test008.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "400333"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "3726391"
},
{
"name": "Shell",
"bytes": "11256"
}
],
"symlink_target": ""
}
|
import math
import csv
from geopy.geocoders import GoogleV3
def hubeny(p1, p2):
a2 = 6378137.0 ** 2
b2 = 6356752.314140 ** 2
e2 = (a2 - b2) / a2
def d2r(deg):
return deg * (2 * math.pi) / 360
(lon1, lat1, lon2, lat2) = map(d2r, p1 + p2)
w = 1 - e2 * math.sin((lat1 + lat2) / 2) ** 2
c2 = math.cos((lat1 + lat2) / 2) ** 2
return math.sqrt((b2 / w ** 3) * (lat1 - lat2) ** 2 + (a2 / w) * c2 * (lon1 - lon2) ** 2)
class kosen:
def __init__(self, name, longitude, latitude):
self.name = name
self.point = [longitude, latitude]
def distance_to(self, another_kosen):
#print self.name, another_kosen.name
#print self.point, another_kosen.point
return hubeny(self.point, another_kosen.point)
def read_address_list(filename):
reader = csv.reader(open(filename, 'r'))
address_list = []
for row in reader:
address_list.append(map(lambda x: x.decode('utf-8'), row))
return address_list
def make_loc_data(address_list):
geolocator = GoogleV3()
loc_list = map(lambda x: [x[0], geolocator.geocode(x[1])], address_list)
return loc_list
def make_kosen_data(loc_list):
kosen_list = map(lambda x: kosen(x[0], x[1].longitude, x[1].latitude), loc_list)
return kosen_list
def load_kosen_data(filename):
reader = csv.reader(open(filename, 'r'))
kosen_list = []
for row in reader:
print row
kosen_list.append(kosen(row[0], float(row[1]), float(row[2])))
return kosen_list
def make_distance_list(kosen_list):
lst = kosen_list[:]
distance_list = []
while(len(lst) > 1):
route_kosen = lst.pop()
distance_list.extend(
map(lambda x: [route_kosen.name, x.name, route_kosen.distance_to(x)], lst))
return distance_list
def save_distance_list(dlist, filename):
writer = csv.writer(open(filename, 'w'))
for row in dlist:
writer.writerow([row[0].encode('utf-8'), row[1].encode('utf-8'), row[2]])
if __name__ == '__main__':
add_list = read_address_list("kosen_address.csv")
print "Getting Location Data..."
loc_list = make_loc_data(add_list)
kosen_list = make_kosen_data(loc_list)
dlist = make_distance_list(kosen_list)
save_distance_list(dlist, "dlist.csv")
|
{
"content_hash": "5532496f0bb8189cfc6651d8939c72ad",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 93,
"avg_line_length": 27.63855421686747,
"alnum_prop": 0.6046207497820401,
"repo_name": "ytsunetsune/kosen_junkai",
"id": "5d9183f255f5e137338ecc195eb92e89d2443ceb",
"size": "2319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kosen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2319"
}
],
"symlink_target": ""
}
|
from msrest.exceptions import (
ClientException,
SerializationError,
DeserializationError,
TokenExpiredError,
ClientRequestError,
AuthenticationError,
HttpOperationError,
)
from .api_client import AutoRestSwaggerBATByteService, AutoRestSwaggerBATByteServiceConfiguration
__all__ = [
'ClientException',
'SerializationError',
'DeserializationError',
'TokenExpiredError',
'ClientRequestError',
'AuthenticationError',
'HttpOperationError',
'AutoRestSwaggerBATByteService',
'AutoRestSwaggerBATByteServiceConfiguration'
]
|
{
"content_hash": "d6cf55be83b8035b4d9b26133c276bb0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 97,
"avg_line_length": 25.304347826086957,
"alnum_prop": 0.7560137457044673,
"repo_name": "vulcansteel/autorest",
"id": "9beb0be33b845b234182e211470b0d96e8cc240c",
"size": "1056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyByte/auto_rest_swagger_bat_byte_service/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "819"
},
{
"name": "C#",
"bytes": "8857811"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "3171512"
},
{
"name": "JavaScript",
"bytes": "4063363"
},
{
"name": "PowerShell",
"bytes": "8003"
},
{
"name": "Puppet",
"bytes": "145"
},
{
"name": "Python",
"bytes": "1831874"
},
{
"name": "Ruby",
"bytes": "218212"
},
{
"name": "TypeScript",
"bytes": "158339"
}
],
"symlink_target": ""
}
|
"""
Error handling using an interactive parser
==========================================
This example demonstrates error handling using an interactive parser in LALR
When the parser encounters an UnexpectedToken exception, it creates a
an interactive parser with the current parse-state, and lets you control how
to proceed step-by-step. When you've achieved the correct parse-state,
you can resume the run by returning True.
"""
from lark import Token
from examples.advanced._json_parser import json_parser
def ignore_errors(e):
if e.token.type == 'COMMA':
# Skip comma
return True
elif e.token.type == 'SIGNED_NUMBER':
# Try to feed a comma and retry the number
e.interactive_parser.feed_token(Token('COMMA', ','))
e.interactive_parser.feed_token(e.token)
return True
# Unhandled error. Will stop parse and raise exception
return False
def main():
s = "[0 1, 2,, 3,,, 4, 5 6 ]"
res = json_parser.parse(s, on_error=ignore_errors)
print(res) # prints [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
main()
|
{
"content_hash": "078bc3f8a5f9bc4e1565948d94338dbc",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 30.13888888888889,
"alnum_prop": 0.6534562211981567,
"repo_name": "lark-parser/lark",
"id": "aeaa21b494b9513d7dae4ace37c74bea4d8a4985",
"size": "1085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/advanced/error_handling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Nearley",
"bytes": "44"
},
{
"name": "Python",
"bytes": "479188"
}
],
"symlink_target": ""
}
|
import unittest
from cross2sheet.grid_features import Grid, BackgroundElt, BorderElt, TextElt
from cross2sheet.image import ImageGrid
from cross2sheet.transforms import autonumber, outside_bars
from urllib.request import urlopen
from io import StringIO
def grid_to_string(g):
i = StringIO()
oldr=0
for r,_,e in g.features:
if not isinstance(e,BackgroundElt):
continue
if r>oldr:
oldr=r
i.write('\n')
if e.color==0:
i.write('#')
elif e.color==0xffffff:
i.write('.')
else:
i.write('O')
return i.getvalue()
def bars_to_string(g):
ymax=g.height-1
xmax=g.width-1
grid=[[' ' for x in range(2*xmax+3)] for y in range(2*ymax+3)]
for y,x,b in g.features:
if not isinstance(b,BorderElt):
continue
for c in b.dirs:
if c=='T':
grid[2*y][2*x]='+'
grid[2*y][2*x+1]='-'
grid[2*y][2*x+2]='+'
elif c=='L':
grid[2*y][2*x]='+'
grid[2*y+1][2*x]='|'
grid[2*y+2][2*x]='+'
elif c=='B':
grid[2*y+2][2*x]='+'
grid[2*y+2][2*x+1]='-'
grid[2*y+2][2*x+2]='+'
elif c=='R':
grid[2*y][2*x+2]='+'
grid[2*y+1][2*x+2]='|'
grid[2*y+2][2*x+2]='+'
return '\n'.join(''.join(r) for r in grid)
def labels_to_string(g):
grid=[['.' for x in range(g.width)] for y in range(g.height)]
for y,x,t in g.features:
if isinstance(t,TextElt):
grid[y][x]='*'
return '\n'.join(''.join(r) for r in grid)
def print_tests(url,grid):
print(' url={}'.format(url))
print(' rows={}'.format(grid.height))
print(' cols={}'.format(grid.width))
if any(isinstance(e,BackgroundElt) and e.color!=0xffffff for r,c,e in grid.features):
print(" fill='''")
print(grid_to_string(grid))
print("'''")
if any(isinstance(e,BorderElt) for r,c,e in grid.features):
bordered=Grid(grid.height,grid.width)
bordered.features.extend(grid.features)
bordered.features.extend(outside_bars(grid))
print(" bars='''")
print(bars_to_string(bordered))
print("'''")
if any(isinstance(e,TextElt) for r,c,e in grid.features):
label_str = labels_to_string(grid)
gr = Grid(grid.height,grid.width)
gr.features.extend(autonumber(gr))
if label_str == labels_to_string(grid):
print(" cells_with_text='auto'")
else:
print(" cells_with_text='''")
print(label_str)
print("'''")
class ImageTest(unittest.TestCase):
def setUp(self):
url=self.url
if url.startswith('20'):
url='http://web.mit.edu/puzzle/www/'+url
req=urlopen(url)
data=req.read()
req.close()
self.img=ImageGrid(data)
self.maxDiff=None
def test_all(self):
detected=(len(self.img.breaks[0])-1,len(self.img.breaks[1])-1)
expected=(self.rows,self.cols)
self.assertEqual(expected,detected,'wrong dimensions')
if hasattr(self,'fill'):
with self.subTest('fill'):
grid=self.img.grid()
grid.features.extend(self.img.read_background())
f=grid_to_string(grid)
self.assertEqual(self.fill.strip(),f.strip())
if hasattr(self,'bars'):
with self.subTest('bars'):
grid=self.img.grid()
grid.features.extend(self.img.read_bars())
grid.features.extend(outside_bars(grid))
b=bars_to_string(grid)
self.assertEqual(self.bars.strip(),b.strip())
if hasattr(self,'cells_with_text'):
with self.subTest('cells_with_text'):
if self.cells_with_text=='auto':
grid=self.img.grid()
grid.features.extend(self.img.read_background())
grid.features.extend(self.img.read_bars())
grid.features.extend(autonumber(grid))
self.cells_with_text=labels_to_string(grid)
grid=self.img.grid()
grid.features.extend(self.img.autonumber_if_text_found())
t=labels_to_string(grid)
self.assertEqual(self.cells_with_text.strip(),t.strip())
|
{
"content_hash": "b23e53d0f94ec4420201f5e713427c32",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 89,
"avg_line_length": 36.193548387096776,
"alnum_prop": 0.5227272727272727,
"repo_name": "dgulotta/cross2sheet",
"id": "8871f90cff813e95c3e35279db6eeb6a13d8afdf",
"size": "4488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cross2sheet/test/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4137"
},
{
"name": "Python",
"bytes": "70626"
}
],
"symlink_target": ""
}
|
import unittest
from katas.kyu_7.maximum_length_difference import mxdiflg
class MaximumLengthDifferenceTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(mxdiflg([
'hoqq', 'bbllkw', 'oox', 'ejjuyyy', 'plmiis', 'xxxzgpsssa',
'xxwwkktt', 'znnnnfqknaz', 'qqquuhii', 'dvvvwz'
], ['cccooommaaqqoxii', 'gggqaffhhh', 'tttoowwwmmww']), 13)
def test_equal_2(self):
self.assertEqual(mxdiflg(['abc'], []), -1)
|
{
"content_hash": "62296a3a578641eec64076a647f6a195",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 71,
"avg_line_length": 34.142857142857146,
"alnum_prop": 0.6401673640167364,
"repo_name": "the-zebulan/CodeWars",
"id": "9715ecb7916cf81768cc0b20afef3116987f0371",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/kyu_7_tests/test_maximum_length_difference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
}
|
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import BaseModule, Sequential
from torch import nn as nn
class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
downsample_first (bool): Downsample at the first block or last block.
False for Hourglass, True for ResNet. Default: True
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
downsample_first=True,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
if downsample_first:
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
inplanes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
else: # downsample_first=False is for HourglassModule
for _ in range(num_blocks - 1):
layers.append(
block(
inplanes=inplanes,
planes=inplanes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
class SimplifiedBasicBlock(BaseModule):
"""Simplified version of original basic residual block. This is used in
`SCNet <https://arxiv.org/abs/2012.10150>`_.
- Norm layer is now optional
- Last ReLU in forward function is removed
"""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_fg=None):
super(SimplifiedBasicBlock, self).__init__(init_fg)
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
assert not with_cp, 'Not implemented yet.'
self.with_norm = norm_cfg is not None
with_bias = True if norm_cfg is None else False
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=with_bias)
if self.with_norm:
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, planes, postfix=1)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=with_bias)
if self.with_norm:
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, planes, postfix=2)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name) if self.with_norm else None
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name) if self.with_norm else None
def forward(self, x):
"""Forward function."""
identity = x
out = self.conv1(x)
if self.with_norm:
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
if self.with_norm:
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
|
{
"content_hash": "f448e03b2b0bf29f8bf4dd6c98221245",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 79,
"avg_line_length": 33.56613756613756,
"alnum_prop": 0.49085750315258514,
"repo_name": "open-mmlab/mmdetection",
"id": "5c3e89fb035d197cb82173e90659dac89ff07fab",
"size": "6392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmdet/models/utils/res_layer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "4811377"
},
{
"name": "Shell",
"bytes": "47911"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class FeatureidkeyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="featureidkey", parent_name="choropleth", **kwargs):
super(FeatureidkeyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "49572a771addae4af82e6ffcaa13540d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 87,
"avg_line_length": 38.25,
"alnum_prop": 0.6230936819172114,
"repo_name": "plotly/python-api",
"id": "9f680b56b8af1338902d69d553eff3cf4992c3f4",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choropleth/_featureidkey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from typing import Match
from discord import Message
from MoMMI import command, MChannel
@command("testmerge_dummy", "testmerge")
async def testmerge_dummy_command(channel: MChannel, match: Match, message: Message) -> None:
await channel.send("Sorry dude you got the wrong MoMMI. <@211414753925398528> is still in charge of test merges.")
|
{
"content_hash": "1d02d0265d84ff79a42473842b38e0ac",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 118,
"avg_line_length": 43.125,
"alnum_prop": 0.7710144927536232,
"repo_name": "PJB3005/MoMMI",
"id": "52fbbbe461de782a2a118a246b4b224e4a9b8c41",
"size": "345",
"binary": false,
"copies": "1",
"ref": "refs/heads/v2",
"path": "MoMMI/Modules/testmerge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147871"
},
{
"name": "Rust",
"bytes": "32335"
},
{
"name": "Shell",
"bytes": "155"
}
],
"symlink_target": ""
}
|
import unittest
from mkdocs.utils.babel_stub import Locale, UnknownLocaleError
class BabelStubTests(unittest.TestCase):
def test_locale_language_only(self):
locale = Locale('es')
self.assertEqual(locale.language, 'es')
self.assertEqual(locale.territory, '')
self.assertEqual(str(locale), 'es')
def test_locale_language_territory(self):
locale = Locale('es', 'ES')
self.assertEqual(locale.language, 'es')
self.assertEqual(locale.territory, 'ES')
self.assertEqual(str(locale), 'es_ES')
def test_parse_locale_language_only(self):
locale = Locale.parse('fr', '_')
self.assertEqual(locale.language, 'fr')
self.assertEqual(locale.territory, '')
self.assertEqual(str(locale), 'fr')
def test_parse_locale_language_territory(self):
locale = Locale.parse('fr_FR', '_')
self.assertEqual(locale.language, 'fr')
self.assertEqual(locale.territory, 'FR')
self.assertEqual(str(locale), 'fr_FR')
def test_parse_locale_language_territory_sep(self):
locale = Locale.parse('fr-FR', '-')
self.assertEqual(locale.language, 'fr')
self.assertEqual(locale.territory, 'FR')
self.assertEqual(str(locale), 'fr_FR')
def test_parse_locale_bad_type(self):
self.assertRaises(TypeError, Locale.parse, ['list'], '_')
def test_parse_locale_invalid_characters(self):
self.assertRaises(ValueError, Locale.parse, '42', '_')
def test_parse_locale_bad_format(self):
self.assertRaises(ValueError, Locale.parse, 'en-GB', '_')
def test_parse_locale_bad_format_sep(self):
self.assertRaises(ValueError, Locale.parse, 'en_GB', '-')
def test_parse_locale_unknown_locale(self):
self.assertRaises(UnknownLocaleError, Locale.parse, 'foo', '_')
|
{
"content_hash": "0c2b9fcf3e4fabce31f003f2dfb9bd64",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 71,
"avg_line_length": 36.92,
"alnum_prop": 0.647887323943662,
"repo_name": "waylan/mkdocs",
"id": "65ac3da4f5f95105e405fb5bb6e7d63508346f5f",
"size": "1846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkdocs/tests/utils/babel_stub_tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "9145"
},
{
"name": "HTML",
"bytes": "28174"
},
{
"name": "JavaScript",
"bytes": "115264"
},
{
"name": "Python",
"bytes": "376024"
}
],
"symlink_target": ""
}
|
import discord
from discord.ext import commands
from commands.util import league_help
import requests
from secret import LEAGUE_KEY
class L_see_stats():
def __init__(self, bot):
self.bot = bot
@commands.command(description = "Get information about a summoner.")
async def l_see_stats(self, summonerName):
try:
#get summoner ID
sumIdReq = league_help.baseUri + league_help.summonerV3 + "/by-name/" + str(summonerName) + "?api_key=" + LEAGUE_KEY
requestSum = requests.get(sumIdReq)
data = requestSum.json()
if requestSum.status_code != 200:
try: await self.bot.say("Summoner does not exist")
except Exception as e: print("Exception: {0}".format(e))
return
sumID = data['id']
#Grad advanced summoner details
sumStatsReq = league_help.baseUri + league_help.leagueV3 + "/positions/by-summoner/" + str(sumID) + "?api_key=" + LEAGUE_KEY
reqStats = requests.get(sumStatsReq)
statData = reqStats.json()
if reqStats.status_code != 200:
await self.bot.say("Error getting stats details. {0}".format(reqStats.status_code))
return
formattedText = ""
#formoat the text to be displayed in discord
for i in statData:
formattedText += i['queueType'] + " " +i['tier'] + " " + i['rank'] + " " + "LP: " + str(i['leaguePoints']) + "\n"
formattedText += "Wins: " + str(i['wins']) + ", Losses: " + str(i['losses']) + ", Ratio: " + str( round((i['wins']/(i['losses']+i['wins']) * 100))) + "%\n"
try:
await self.bot.say(str(summonerName) + "\n\n" + formattedText)
except Exception as e:
print("Exception: {0}".format(e))
except Exception as e:
print("Exception: {0}".format(e))
def setup(bot):
bot.add_cog(L_see_stats(bot))
|
{
"content_hash": "a3842544e74b8a270ea71f6d153b76ea",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 171,
"avg_line_length": 40.19607843137255,
"alnum_prop": 0.5409756097560976,
"repo_name": "bjkoehle/AhriBot",
"id": "55f85223af0608e08ccc1cb19773c3ccb67a5152",
"size": "2050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/league/l_see_stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18970"
}
],
"symlink_target": ""
}
|
import argparse
import importlib.util
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import torch
from .test.configs.test_config import CommonConfig
SEED_VAL = 42
DNN_LIB = "DNN"
# common path for model savings
MODEL_PATH_ROOT = os.path.join(CommonConfig().output_data_root_dir, "{}/models")
def get_full_model_path(lib_name, model_full_name):
model_path = MODEL_PATH_ROOT.format(lib_name)
return {
"path": model_path,
"full_path": os.path.join(model_path, model_full_name)
}
def plot_acc(data_list, experiment_name):
plt.figure(figsize=[8, 6])
plt.plot(data_list[:, 0], "r", linewidth=2.5, label="Original Model")
plt.plot(data_list[:, 1], "b", linewidth=2.5, label="Converted DNN Model")
plt.xlabel("Iterations ", fontsize=15)
plt.ylabel("Time (ms)", fontsize=15)
plt.title(experiment_name, fontsize=15)
plt.legend()
full_path_to_fig = os.path.join(CommonConfig().output_data_root_dir, experiment_name + ".png")
plt.savefig(full_path_to_fig, bbox_inches="tight")
def get_final_summary_info(general_quality_metric, general_inference_time, metric_name):
general_quality_metric = np.array(general_quality_metric)
general_inference_time = np.array(general_inference_time)
summary_line = "===== End of processing. General results:\n"
"\t* mean {} for the original model: {}\t"
"\t* mean time (min) for the original model inferences: {}\n"
"\t* mean {} for the DNN model: {}\t"
"\t* mean time (min) for the DNN model inferences: {}\n".format(
metric_name, np.mean(general_quality_metric[:, 0]),
np.mean(general_inference_time[:, 0]) / 60000,
metric_name, np.mean(general_quality_metric[:, 1]),
np.mean(general_inference_time[:, 1]) / 60000,
)
return summary_line
def set_common_reproducibility():
random.seed(SEED_VAL)
np.random.seed(SEED_VAL)
def set_pytorch_env():
set_common_reproducibility()
torch.manual_seed(SEED_VAL)
torch.set_printoptions(precision=10)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(SEED_VAL)
torch.backends.cudnn_benchmark_enabled = False
torch.backends.cudnn.deterministic = True
def set_tf_env(is_use_gpu=True):
set_common_reproducibility()
tf.random.set_seed(SEED_VAL)
os.environ["TF_DETERMINISTIC_OPS"] = "1"
if tf.config.list_physical_devices("GPU") and is_use_gpu:
gpu_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_visible_devices(gpu_devices[0], "GPU")
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
os.environ["TF_USE_CUDNN"] = "1"
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def str_bool(input_val):
if input_val.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif input_val.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value was expected')
def get_formatted_model_list(model_list):
note_line = 'Please, choose the model from the below list:\n'
spaces_to_set = ' ' * (len(note_line) - 2)
return note_line + ''.join([spaces_to_set, '{} \n'] * len(model_list)).format(*model_list)
def model_str(model_list):
def type_model_list(input_val):
if input_val.lower() in model_list:
return input_val.lower()
else:
raise argparse.ArgumentTypeError(
'The model is currently unavailable for test.\n' +
get_formatted_model_list(model_list)
)
return type_model_list
def get_test_module(test_module_name, test_module_path):
module_spec = importlib.util.spec_from_file_location(test_module_name, test_module_path)
test_module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(test_module)
module_spec.loader.exec_module(test_module)
return test_module
def create_parser():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--test",
type=str_bool,
help="Define whether you'd like to run the model with OpenCV for testing.",
default=False
),
parser.add_argument(
"--default_img_preprocess",
type=str_bool,
help="Define whether you'd like to preprocess the input image with defined"
" PyTorch or TF functions for model test with OpenCV.",
default=False
),
parser.add_argument(
"--evaluate",
type=str_bool,
help="Define whether you'd like to run evaluation of the models (ex.: TF vs OpenCV networks).",
default=True
)
return parser
def create_extended_parser(model_list):
parser = create_parser()
parser.add_argument(
"--model_name",
type=model_str(model_list=model_list),
help="\nDefine the model name to test.\n" +
get_formatted_model_list(model_list),
required=True
)
return parser
|
{
"content_hash": "3f4ca0d3250795d0771ed7c6820372ae",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 103,
"avg_line_length": 33.24183006535948,
"alnum_prop": 0.6482500983090838,
"repo_name": "opencv/opencv",
"id": "cf24dd385b864e3f773c6b230bb6483b11f325a0",
"size": "5086",
"binary": false,
"copies": "2",
"ref": "refs/heads/4.x",
"path": "samples/dnn/dnn_model_runner/dnn_conversion/common/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "1986"
},
{
"name": "Batchfile",
"bytes": "1498"
},
{
"name": "C",
"bytes": "1543870"
},
{
"name": "C++",
"bytes": "35975082"
},
{
"name": "CMake",
"bytes": "1010867"
},
{
"name": "Cuda",
"bytes": "333437"
},
{
"name": "Dockerfile",
"bytes": "309"
},
{
"name": "HTML",
"bytes": "40027"
},
{
"name": "Java",
"bytes": "774232"
},
{
"name": "JavaScript",
"bytes": "233673"
},
{
"name": "Kotlin",
"bytes": "5204"
},
{
"name": "Objective-C",
"bytes": "100731"
},
{
"name": "Objective-C++",
"bytes": "392600"
},
{
"name": "Perl",
"bytes": "15865"
},
{
"name": "PowerShell",
"bytes": "14591"
},
{
"name": "Prolog",
"bytes": "843"
},
{
"name": "Python",
"bytes": "1038154"
},
{
"name": "Shell",
"bytes": "22738"
},
{
"name": "Swift",
"bytes": "301765"
},
{
"name": "TeX",
"bytes": "3530"
}
],
"symlink_target": ""
}
|
'''
Created on 1 Sep 2014
@author: Ed
'''
from django.core.management.base import BaseCommand, CommandError
from quiz.models import *
class Command(BaseCommand):
args = ''
help = 'Prints all badges'
def handle(self, *args, **options):
print('handler called')
self.stdout.write('Not yet implemented')
|
{
"content_hash": "25a29f1b70f246a0ce69d99f48d2b660",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 65,
"avg_line_length": 21.875,
"alnum_prop": 0.6342857142857142,
"repo_name": "edporteous/chronotype",
"id": "c77a46cf54d4f1f20d4f63ae047e24e75ab5c5ee",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Chronotype/quiz/management/commands/printbadges.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2105"
},
{
"name": "Python",
"bytes": "21518"
},
{
"name": "Shell",
"bytes": "560"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from builtins import object
from builtins import str
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-DomainTrustMapping',
'Author': ['@harmj0y'],
'Description': ('Maps all reachable domain trusts with .CSV output. Part of PowerView.'),
'Software': 'S0194',
'Techniques': ['T1482'],
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'API' : {
'Description' : 'Switch. Use an api call (DsEnumerateDomainTrusts) to enumerate the trusts instead of the built-in LDAP method',
'Required' : False,
'Value' : ''
},
'NET' : {
'Description' : 'Switch. Use .NET queries to enumerate trusts instead of the default LDAP method',
'Required' : False,
'Value' : ''
},
'LDAPFilter' : {
'Description' : 'Specifies an LDAP query string that is used to filter Active Directory objects.',
'Required' : False,
'Value' : ''
},
'Properties' : {
'Description' : 'Specifies the properties of the output object to retrieve from the server.',
'Required' : False,
'Value' : ''
},
'SearchBase' : {
'Description' : 'The LDAP source to search through, e.g. "LDAP://OU=secret,DC=testlab,DC=local" Useful for OU queries.',
'Required' : False,
'Value' : ''
},
'Server' : {
'Description' : 'Specifies an active directory server (domain controller) to bind to',
'Required' : False,
'Value' : ''
},
'SearchScope' : {
'Description' : 'Specifies the scope to search under, Base/OneLevel/Subtree (default of Subtree)',
'Required' : False,
'Value' : ''
},
'ResultPageSize' : {
'Description' : 'Specifies the PageSize to set for the LDAP searcher object.',
'Required' : False,
'Value' : ''
},
'ServerTimeLimit' : {
'Description' : 'Specifies the maximum amount of time the server spends searching. Default of 120 seconds.',
'Required' : False,
'Value' : ''
},
'SecurityMasks' : {
'Description' : 'Specifies an option for examining security information of a directory object. One of "Dacl", "Group", "None", "Owner", "Sacl".',
'Required' : False,
'Value' : ''
},
'Tombstone' : {
'Description' : 'Switch. Specifies that the search should also return deleted/tombstoned objects.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.strip_powershell_comments(moduleCode)
script += "\n" + moduleName + " "
for option,values in self.options.items():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += '| ConvertTo-Csv -NoTypeInformation | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
script = helpers.keyword_obfuscation(script)
return script
|
{
"content_hash": "b77e75e7d96a59999b16edf8cbb1a462",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 165,
"avg_line_length": 37.78709677419355,
"alnum_prop": 0.48096295031586134,
"repo_name": "byt3bl33d3r/Empire",
"id": "a2642e47c78edc0c183321f6691931db9d3d6278",
"size": "5857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/situational_awareness/network/powerview/map_domain_trust.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "16998705"
},
{
"name": "Python",
"bytes": "2789955"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
}
|
import sys
from py4j.java_gateway import get_method
from pyflink.java_gateway import get_gateway
from pyflink.table.table_schema import TableSchema
from pyflink.table.window import GroupWindow
from pyflink.util.utils import to_jarray
if sys.version > '3':
xrange = range
__all__ = ['Table', 'GroupedTable', 'GroupWindowedTable', 'OverWindowedTable', 'WindowGroupedTable']
class Table(object):
"""
A :class:`Table` is the core component of the Table API.
Similar to how the batch and streaming APIs have DataSet and DataStream,
the Table API is built around :class:`Table`.
Use the methods of :class:`Table` to transform data.
Example:
::
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> env.set_parallelism(1)
>>> t_env = StreamTableEnvironment.create(env)
>>> ...
>>> t_env.register_table_source("source", ...)
>>> t = t_env.scan("source")
>>> t.select(...)
>>> ...
>>> t_env.register_table_sink("result", ...)
>>> t.insert_into("result")
>>> t_env.execute("table_job")
Operations such as :func:`~pyflink.table.Table.join`, :func:`~pyflink.table.Table.select`,
:func:`~pyflink.table.Table.where` and :func:`~pyflink.table.Table.group_by`
take arguments in an expression string. Please refer to the documentation for
the expression syntax.
"""
def __init__(self, j_table):
self._j_table = j_table
def select(self, fields):
"""
Performs a selection operation. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions.
Example:
::
>>> tab.select("key, value + 'hello'")
:param fields: Expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
def alias(self, fields):
"""
Renames the fields of the expression result. Use this to disambiguate fields before
joining to operations.
Example:
::
>>> tab.alias("a, b")
:param fields: Field list expression string.
:return: The result :class:`Table`.
"""
return Table(get_method(self._j_table, "as")(fields))
def filter(self, predicate):
"""
Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
clause.
Example:
::
>>> tab.filter("name = 'Fred'")
:param predicate: Predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.filter(predicate))
def where(self, predicate):
"""
Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
clause.
Example:
::
>>> tab.where("name = 'Fred'")
:param predicate: Predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.where(predicate))
def group_by(self, fields):
"""
Groups the elements on some grouping keys. Use this before a selection with aggregations
to perform the aggregation on a per-group basis. Similar to a SQL GROUP BY statement.
Example:
::
>>> tab.group_by("key").select("key, value.avg")
:param fields: Group keys.
:return: The grouped :class:`Table`.
"""
return GroupedTable(self._j_table.groupBy(fields))
def distinct(self):
"""
Removes duplicate values and returns only distinct (different) values.
Example:
::
>>> tab.select("key, value").distinct()
:return: The result :class:`Table`.
"""
return Table(self._j_table.distinct())
def join(self, right, join_predicate=None):
"""
Joins two :class:`Table`. Similar to a SQL join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary. You can use where and select clauses after a join to further specify the
behaviour of the join.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` .
Example:
::
>>> left.join(right).where("a = b && c > 3").select("a, b, d")
>>> left.join(right, "a = b")
:param right: Right table.
:param join_predicate: Optional, the join predicate expression string.
:return: The result :class:`Table`.
"""
if join_predicate is not None:
return Table(self._j_table.join(right._j_table, join_predicate))
else:
return Table(self._j_table.join(right._j_table))
def left_outer_join(self, right, join_predicate=None):
"""
Joins two :class:`Table`. Similar to a SQL left outer join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` and its
:class:`TableConfig` must have null check enabled (default).
Example:
::
>>> left.left_outer_join(right).select("a, b, d")
>>> left.left_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:param join_predicate: Optional, the join predicate expression string.
:return: The result :class:`Table`.
"""
if join_predicate is None:
return Table(self._j_table.leftOuterJoin(right._j_table))
else:
return Table(self._j_table.leftOuterJoin(right._j_table, join_predicate))
def right_outer_join(self, right, join_predicate):
"""
Joins two :class:`Table`. Similar to a SQL right outer join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` and its
:class:`TableConfig` must have null check enabled (default).
Example:
::
>>> left.right_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:param join_predicate: The join predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.rightOuterJoin(right._j_table, join_predicate))
def full_outer_join(self, right, join_predicate):
"""
Joins two :class:`Table`. Similar to a SQL full outer join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` and its
:class:`TableConfig` must have null check enabled (default).
Example:
::
>>> left.full_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:param join_predicate: The join predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.fullOuterJoin(right._j_table, join_predicate))
def join_lateral(self, table_function_call, join_predicate=None):
"""
Joins this Table with an user-defined TableFunction. This join is similar to a SQL inner
join but works with a table function. Each row of the table is joined with the rows
produced by the table function.
Example:
::
>>> t_env.register_java_function("split", "java.table.function.class.name")
>>> tab.join_lateral("split(text, ' ') as (b)", "a = b")
:param table_function_call: An expression representing a table function call.
:type table_function_call: str
:param join_predicate: Optional, The join predicate expression string, join ON TRUE if not
exist.
:type join_predicate: str
:return: The result Table.
:rtype: Table
"""
if join_predicate is None:
return Table(self._j_table.joinLateral(table_function_call))
else:
return Table(self._j_table.joinLateral(table_function_call, join_predicate))
def left_outer_join_lateral(self, table_function_call, join_predicate=None):
"""
Joins this Table with an user-defined TableFunction. This join is similar to
a SQL left outer join but works with a table function. Each row of the table is joined
with all rows produced by the table function. If the join does not produce any row, the
outer row is padded with nulls.
Example:
::
>>> t_env.register_java_function("split", "java.table.function.class.name")
>>> tab.left_outer_join_lateral("split(text, ' ') as (b)")
:param table_function_call: An expression representing a table function call.
:type table_function_call: str
:param join_predicate: Optional, The join predicate expression string, join ON TRUE if not
exist.
:type join_predicate: str
:return: The result Table.
:rtype: Table
"""
if join_predicate is None:
return Table(self._j_table.leftOuterJoinLateral(table_function_call))
else:
return Table(self._j_table.leftOuterJoinLateral(table_function_call, join_predicate))
def minus(self, right):
"""
Minus of two :class:`Table` with duplicate records removed.
Similar to a SQL EXCEPT clause. Minus returns records from the left table that do not
exist in the right table. Duplicate records in the left table are returned
exactly once, i.e., duplicates are removed. Both tables must have identical field types.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.minus(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.minus(right._j_table))
def minus_all(self, right):
"""
Minus of two :class:`Table`. Similar to a SQL EXCEPT ALL.
Similar to a SQL EXCEPT ALL clause. MinusAll returns the records that do not exist in
the right table. A record that is present n times in the left table and m times
in the right table is returned (n - m) times, i.e., as many duplicates as are present
in the right table are removed. Both tables must have identical field types.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.minus_all(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.minusAll(right._j_table))
def union(self, right):
"""
Unions two :class:`Table` with duplicate records removed.
Similar to a SQL UNION. The fields of the two union operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.union(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.union(right._j_table))
def union_all(self, right):
"""
Unions two :class:`Table`. Similar to a SQL UNION ALL. The fields of the two union
operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.union_all(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.unionAll(right._j_table))
def intersect(self, right):
"""
Intersects two :class:`Table` with duplicate records removed. Intersect returns records
that exist in both tables. If a record is present in one or both tables more than once,
it is returned just once, i.e., the resulting table has no duplicate records. Similar to a
SQL INTERSECT. The fields of the two intersect operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.intersect(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.intersect(right._j_table))
def intersect_all(self, right):
"""
Intersects two :class:`Table`. IntersectAll returns records that exist in both tables.
If a record is present in both tables more than once, it is returned as many times as it
is present in both tables, i.e., the resulting table might have duplicate records. Similar
to an SQL INTERSECT ALL. The fields of the two intersect operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.intersect_all(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.intersectAll(right._j_table))
def order_by(self, fields):
"""
Sorts the given :class:`Table`. Similar to SQL ORDER BY.
The resulting Table is sorted globally sorted across all parallel partitions.
Example:
::
>>> tab.order_by("name.desc")
:param fields: Order fields expression string,
:return: The result :class:`Table`.
"""
return Table(self._j_table.orderBy(fields))
def offset(self, offset):
"""
Limits a sorted result from an offset position.
Similar to a SQL OFFSET clause. Offset is technically part of the Order By operator and
thus must be preceded by it.
:func:`~pyflink.table.Table.offset` can be combined with a subsequent
:func:`~pyflink.table.Table.fetch` call to return n rows after skipping the first o rows.
Example:
::
# skips the first 3 rows and returns all following rows.
>>> tab.order_by("name.desc").offset(3)
# skips the first 10 rows and returns the next 5 rows.
>>> tab.order_by("name.desc").offset(10).fetch(5)
:param offset: Number of records to skip.
:return: The result :class:`Table`.
"""
return Table(self._j_table.offset(offset))
def fetch(self, fetch):
"""
Limits a sorted result to the first n rows.
Similar to a SQL FETCH clause. Fetch is technically part of the Order By operator and
thus must be preceded by it.
:func:`~pyflink.table.Table.offset` can be combined with a preceding
:func:`~pyflink.table.Table.fetch` call to return n rows after skipping the first o rows.
Example:
Returns the first 3 records.
::
>>> tab.order_by("name.desc").fetch(3)
Skips the first 10 rows and returns the next 5 rows.
::
>>> tab.order_by("name.desc").offset(10).fetch(5)
:param fetch: The number of records to return. Fetch must be >= 0.
:return: The result :class:`Table`.
"""
return Table(self._j_table.fetch(fetch))
def window(self, window):
"""
Defines group window on the records of a table.
A group window groups the records of a table by assigning them to windows defined by a time
or row interval.
For streaming tables of infinite size, grouping into windows is required to define finite
groups on which group-based aggregates can be computed.
For batch tables of finite size, windowing essentially provides shortcuts for time-based
groupBy.
.. note::
Computing windowed aggregates on a streaming table is only a parallel operation
if additional grouping attributes are added to the
:func:`~pyflink.table.GroupWindowedTable.group_by` clause.
If the :func:`~pyflink.table.GroupWindowedTable.group_by` only references a GroupWindow
alias, the streamed table will be processed by a single task, i.e., with parallelism 1.
Example:
::
>>> tab.window(Tumble.over("10.minutes").on("rowtime").alias("w")) \\
... .group_by("w") \\
... .select("a.sum as a, w.start as b, w.end as c, w.rowtime as d")
:param window: A :class:`pyflink.table.window.GroupWindow` created from
:class:`pyflink.table.window.Tumble`, :class:`pyflink.table.window.Session`
or :class:`pyflink.table.window.Slide`.
:return: A :class:`GroupWindowedTable`.
"""
# type: (GroupWindow) -> GroupWindowedTable
return GroupWindowedTable(self._j_table.window(window._java_window))
def over_window(self, *over_windows):
"""
Defines over-windows on the records of a table.
An over-window defines for each record an interval of records over which aggregation
functions can be computed.
Example:
::
>>> table.window(Over.partition_by("c").order_by("rowTime") \\
... .preceding("10.seconds").alias("ow")) \\
... .select("c, b.count over ow, e.sum over ow")
.. note::
Computing over window aggregates on a streaming table is only a parallel
operation if the window is partitioned. Otherwise, the whole stream will be processed
by a single task, i.e., with parallelism 1.
.. note::
Over-windows for batch tables are currently not supported.
:param over_windows: :class:`OverWindow`s created from :class:`Over`.
:return: A :class:`OverWindowedTable`.
"""
gateway = get_gateway()
window_array = to_jarray(gateway.jvm.OverWindow,
[item._java_over_window for item in over_windows])
return OverWindowedTable(self._j_table.window(window_array))
def add_columns(self, fields):
"""
Adds additional columns. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions, but can not contain aggregations. It will throw an
exception if the added fields already exist.
Example:
::
>>> tab.add_columns("a + 1 as a1, concat(b, 'sunny') as b1")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.addColumns(fields))
def add_or_replace_columns(self, fields):
"""
Adds additional columns. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions, but can not contain aggregations. Existing fields will be
replaced if add columns name is the same as the existing column name. Moreover, if the added
fields have duplicate field name, then the last one is used.
Example:
::
>>> tab.add_or_replace_columns("a + 1 as a1, concat(b, 'sunny') as b1")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.addOrReplaceColumns(fields))
def rename_columns(self, fields):
"""
Renames existing columns. Similar to a field alias statement. The field expressions
should be alias expressions, and only the existing fields can be renamed.
Example:
::
>>> tab.rename_columns("a as a1, b as b1")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.renameColumns(fields))
def drop_columns(self, fields):
"""
Drops existing columns. The field expressions should be field reference expressions.
Example:
::
>>> tab.drop_columns("a, b")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.dropColumns(fields))
def insert_into(self, table_path, *table_path_continued):
"""
Writes the :class:`Table` to a :class:`TableSink` that was registered under
the specified name. For the path resolution algorithm see
:func:`~TableEnvironment.use_database`.
Example:
::
>>> tab.insert_into("sink")
:param table_path: The first part of the path of the registered :class:`TableSink` to which
the :class:`Table` is written. This is to ensure at least the name of the
:class:`Table` is provided.
:param table_path_continued: The remaining part of the path of the registered
:class:`TableSink` to which the :class:`Table` is written.
"""
gateway = get_gateway()
j_table_path = to_jarray(gateway.jvm.String, table_path_continued)
self._j_table.insertInto(table_path, j_table_path)
def get_schema(self):
"""
Returns the :class:`TableSchema` of this table.
:return: The schema of this table.
"""
return TableSchema(j_table_schema=self._j_table.getSchema())
def print_schema(self):
"""
Prints the schema of this table to the console in a tree format.
"""
self._j_table.printSchema()
def __str__(self):
return self._j_table.toString()
class GroupedTable(object):
"""
A table that has been grouped on a set of grouping keys.
"""
def __init__(self, java_table):
self._j_table = java_table
def select(self, fields):
"""
Performs a selection operation on a grouped table. Similar to an SQL SELECT statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> tab.group_by("key").select("key, value.avg + ' The average' as average")
:param fields: Expression string that contains group keys and aggregate function calls.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
class GroupWindowedTable(object):
"""
A table that has been windowed for :class:`pyflink.table.window.GroupWindow`.
"""
def __init__(self, java_group_windowed_table):
self._j_table = java_group_windowed_table
def group_by(self, fields):
"""
Groups the elements by a mandatory window and one or more optional grouping attributes.
The window is specified by referring to its alias.
If no additional grouping attribute is specified and if the input is a streaming table,
the aggregation will be performed by a single task, i.e., with parallelism 1.
Aggregations are performed per group and defined by a subsequent
:func:`~pyflink.table.WindowGroupedTable.select` clause similar to SQL SELECT-GROUP-BY
query.
Example:
::
>>> tab.window(group_window.alias("w")).group_by("w, key").select("key, value.avg")
:param fields: Group keys.
:return: A :class:`WindowGroupedTable`.
"""
return WindowGroupedTable(self._j_table.groupBy(fields))
class WindowGroupedTable(object):
"""
A table that has been windowed and grouped for :class:`pyflink.table.window.GroupWindow`.
"""
def __init__(self, java_window_grouped_table):
self._j_table = java_window_grouped_table
def select(self, fields):
"""
Performs a selection operation on a window grouped table. Similar to an SQL SELECT
statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> window_grouped_table.select("key, window.start, value.avg as valavg")
:param fields: Expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
class OverWindowedTable(object):
"""
A table that has been windowed for :class:`pyflink.table.window.OverWindow`.
Unlike group windows, which are specified in the GROUP BY clause, over windows do not collapse
rows. Instead over window aggregates compute an aggregate for each input row over a range of
its neighboring rows.
"""
def __init__(self, java_over_windowed_table):
self._j_table = java_over_windowed_table
def select(self, fields):
"""
Performs a selection operation on a over windowed table. Similar to an SQL SELECT
statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> over_windowed_table.select("c, b.count over ow, e.sum over ow")
:param fields: Expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
|
{
"content_hash": "af9f01489ac5aef9fe3d260285318186",
"timestamp": "",
"source": "github",
"line_count": 744,
"max_line_length": 100,
"avg_line_length": 34.35752688172043,
"alnum_prop": 0.608168374931539,
"repo_name": "fhueske/flink",
"id": "920a372d5fc3b802f42957a7140c72e22f4a2df5",
"size": "26520",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/table/table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4588"
},
{
"name": "CSS",
"bytes": "57936"
},
{
"name": "Clojure",
"bytes": "93205"
},
{
"name": "Dockerfile",
"bytes": "10793"
},
{
"name": "FreeMarker",
"bytes": "17422"
},
{
"name": "HTML",
"bytes": "224476"
},
{
"name": "Java",
"bytes": "48798371"
},
{
"name": "JavaScript",
"bytes": "1829"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "809835"
},
{
"name": "Scala",
"bytes": "13339497"
},
{
"name": "Shell",
"bytes": "485338"
},
{
"name": "TypeScript",
"bytes": "243702"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import json
from unittest import skipIf
from django.test import TestCase
from django.contrib.auth.models import User
from haystack.query import SearchQuerySet
from rest_framework import status
from rest_framework.pagination import PageNumberPagination
from rest_framework.routers import SimpleRouter
from rest_framework.serializers import Serializer
from rest_framework.test import force_authenticate, APIRequestFactory
from drf_haystack.viewsets import HaystackViewSet
from drf_haystack.serializers import HaystackSerializer, HaystackFacetSerializer
from drf_haystack.mixins import MoreLikeThisMixin, FacetMixin
from . import restframework_version
from .mockapp.models import MockPerson, MockPet
from .mockapp.search_indexes import MockPersonIndex, MockPetIndex
factory = APIRequestFactory()
class HaystackViewSetTestCase(TestCase):
fixtures = ["mockperson", "mockpet"]
def setUp(self):
MockPersonIndex().reindex()
MockPetIndex().reindex()
self.router = SimpleRouter()
class FacetSerializer(HaystackFacetSerializer):
class Meta:
fields = ["firstname", "lastname", "created"]
class ViewSet1(FacetMixin, HaystackViewSet):
index_models = [MockPerson]
serializer_class = Serializer
facet_serializer_class = FacetSerializer
class ViewSet2(MoreLikeThisMixin, HaystackViewSet):
index_models = [MockPerson]
serializer_class = Serializer
class ViewSet3(HaystackViewSet):
index_models = [MockPerson, MockPet]
serializer_class = Serializer
self.view1 = ViewSet1
self.view2 = ViewSet2
self.view3 = ViewSet3
def tearDown(self):
MockPersonIndex().clear()
def test_viewset_get_queryset_no_queryset(self):
request = factory.get(path="/", data="", content_type="application/json")
response = self.view1.as_view(actions={"get": "list"})(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_viewset_get_queryset_with_queryset(self):
setattr(self.view1, "queryset", SearchQuerySet().all())
request = factory.get(path="/", data="", content_type="application/json")
response = self.view1.as_view(actions={"get": "list"})(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_viewset_get_object_single_index(self):
request = factory.get(path="/", data="", content_type="application/json")
response = self.view1.as_view(actions={"get": "retrieve"})(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_viewset_get_object_multiple_indices(self):
request = factory.get(path="/", data={"model": "mockapp.mockperson"}, content_type="application/json")
response = self.view3.as_view(actions={"get": "retrieve"})(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_viewset_get_object_multiple_indices_no_model_query_param(self):
request = factory.get(path="/", data="", content_type="application/json")
response = self.view3.as_view(actions={"get": "retrieve"})(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_viewset_get_object_multiple_indices_invalid_modelname(self):
request = factory.get(path="/", data={"model": "spam"}, content_type="application/json")
response = self.view3.as_view(actions={"get": "retrieve"})(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_viewset_get_obj_raise_404(self):
request = factory.get(path="/", data="", content_type="application/json")
response = self.view1.as_view(actions={"get": "retrieve"})(request, pk=100000)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_viewset_get_object_invalid_lookup_field(self):
request = factory.get(path="/", data="", content_type="application/json")
self.assertRaises(
AttributeError,
self.view1.as_view(actions={"get": "retrieve"}), request, invalid_lookup=1
)
def test_viewset_get_obj_override_lookup_field(self):
setattr(self.view1, "lookup_field", "custom_lookup")
request = factory.get(path="/", data="", content_type="application/json")
response = self.view1.as_view(actions={"get": "retrieve"})(request, custom_lookup=1)
setattr(self.view1, "lookup_field", "pk")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_viewset_more_like_this_decorator(self):
route = self.router.get_routes(self.view2)[2:].pop()
self.assertEqual(route.url, "^{prefix}/{lookup}/more-like-this{trailing_slash}$")
self.assertEqual(route.mapping, {"get": "more_like_this"})
def test_viewset_more_like_this_action_route(self):
request = factory.get(path="/", data={}, content_type="application/json")
response = self.view2.as_view(actions={"get": "more_like_this"})(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_viewset_facets_action_route(self):
request = factory.get(path="/", data={}, content_type="application/json")
response = self.view1.as_view(actions={"get": "facets"})(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class HaystackViewSetPermissionsTestCase(TestCase):
fixtures = ["mockperson"]
def setUp(self):
MockPersonIndex().reindex()
class ViewSet(HaystackViewSet):
serializer_class = Serializer
self.view = ViewSet
self.user = User.objects.create_user(username="user", email="user@example.com", password="user")
self.admin_user = User.objects.create_superuser(username="admin", email="admin@example.com", password="admin")
def tearDown(self):
MockPersonIndex().clear()
def test_viewset_get_queryset_with_no_permsission(self):
setattr(self.view, "permission_classes", [])
request = factory.get(path="/", data="", content_type="application/json")
response = self.view.as_view(actions={"get": "list"})(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_viewset_get_queryset_with_AllowAny_permission(self):
from rest_framework.permissions import AllowAny
setattr(self.view, "permission_classes", (AllowAny, ))
request = factory.get(path="/", data="", content_type="application/json")
response = self.view.as_view(actions={"get": "list"})(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_viewset_get_queryset_with_IsAuthenticated_permission(self):
from rest_framework.permissions import IsAuthenticated
setattr(self.view, "permission_classes", (IsAuthenticated, ))
request = factory.get(path="/", data="", content_type="application/json")
response = self.view.as_view(actions={"get": "list"})(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
force_authenticate(request, user=self.user)
response = self.view.as_view(actions={"get": "list"})(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_viewset_get_queryset_with_IsAdminUser_permission(self):
from rest_framework.permissions import IsAdminUser
setattr(self.view, "permission_classes", (IsAdminUser,))
request = factory.get(path="/", data="", content_type="application/json")
force_authenticate(request, user=self.user)
response = self.view.as_view(actions={"get": "list"})(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
force_authenticate(request, user=self.admin_user)
response = self.view.as_view(actions={"get": "list"})(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_viewset_get_queryset_with_IsAuthenticatedOrReadOnly_permission(self):
from rest_framework.permissions import IsAuthenticatedOrReadOnly
setattr(self.view, "permission_classes", (IsAuthenticatedOrReadOnly,))
# Unauthenticated GET requests should pass
request = factory.get(path="/", data="", content_type="application/json")
response = self.view.as_view(actions={"get": "list"})(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Authenticated GET requests should pass
request = factory.get(path="/", data="", content_type="application/json")
force_authenticate(request, user=self.user)
response = self.view.as_view(actions={"get": "list"})(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# POST, PUT, PATCH and DELETE requests are not supported, so they will
# raise an error. No need to test the permission.
@skipIf(not restframework_version < (3, 7), "Skipped due to fix in django-rest-framework > 3.6")
def test_viewset_get_queryset_with_DjangoModelPermissions_permission(self):
from rest_framework.permissions import DjangoModelPermissions
setattr(self.view, "permission_classes", (DjangoModelPermissions,))
# The `DjangoModelPermissions` is not supported and should raise an
# AssertionError from rest_framework.permissions.
request = factory.get(path="/", data="", content_type="application/json")
try:
self.view.as_view(actions={"get": "list"})(request)
self.fail("Did not fail with AssertionError or AttributeError "
"when calling HaystackView with DjangoModelPermissions")
except (AttributeError, AssertionError) as e:
if isinstance(e, AttributeError):
self.assertEqual(str(e), "'SearchQuerySet' object has no attribute 'model'")
else:
self.assertEqual(str(e), "Cannot apply DjangoModelPermissions on a view that does "
"not have `.model` or `.queryset` property.")
def test_viewset_get_queryset_with_DjangoModelPermissionsOrAnonReadOnly_permission(self):
from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly
setattr(self.view, "permission_classes", (DjangoModelPermissionsOrAnonReadOnly,))
# The `DjangoModelPermissionsOrAnonReadOnly` is not supported and should raise an
# AssertionError from rest_framework.permissions.
request = factory.get(path="/", data="", content_type="application/json")
try:
self.view.as_view(actions={"get": "list"})(request)
self.fail("Did not fail with AssertionError when calling HaystackView "
"with DjangoModelPermissionsOrAnonReadOnly")
except (AttributeError, AssertionError) as e:
if isinstance(e, AttributeError):
self.assertEqual(str(e), "'SearchQuerySet' object has no attribute 'model'")
else:
self.assertEqual(str(e), "Cannot apply DjangoModelPermissions on a view that does "
"not have `.model` or `.queryset` property.")
@skipIf(not restframework_version < (3, 7), "Skipped due to fix in django-rest-framework > 3.6")
def test_viewset_get_queryset_with_DjangoObjectPermissions_permission(self):
from rest_framework.permissions import DjangoObjectPermissions
setattr(self.view, "permission_classes", (DjangoObjectPermissions,))
# The `DjangoObjectPermissions` is a subclass of `DjangoModelPermissions` and
# therefore unsupported.
request = factory.get(path="/", data="", content_type="application/json")
try:
self.view.as_view(actions={"get": "list"})(request)
self.fail("Did not fail with AssertionError when calling HaystackView with DjangoModelPermissions")
except (AttributeError, AssertionError) as e:
if isinstance(e, AttributeError):
self.assertEqual(str(e), "'SearchQuerySet' object has no attribute 'model'")
else:
self.assertEqual(str(e), "Cannot apply DjangoModelPermissions on a view that does "
"not have `.model` or `.queryset` property.")
class PaginatedHaystackViewSetTestCase(TestCase):
fixtures = ["mockperson"]
def setUp(self):
MockPersonIndex().reindex()
class Serializer1(HaystackSerializer):
class Meta:
fields = ["firstname", "lastname"]
index_classes = [MockPersonIndex]
class NumberPagination(PageNumberPagination):
page_size = 5
class ViewSet1(HaystackViewSet):
index_models = [MockPerson]
serializer_class = Serializer1
pagination_class = NumberPagination
self.view1 = ViewSet1
def tearDown(self):
MockPersonIndex().clear()
def test_viewset_PageNumberPagination_results(self):
request = factory.get(path="/", data="", content_type="application/json")
response = self.view1.as_view(actions={"get": "list"})(request)
response.render()
content = json.loads(response.content.decode())
self.assertTrue(all(k in content for k in ("count", "next", "previous", "results")))
self.assertEqual(len(content["results"]), 5)
def test_viewset_PageNumberPagination_navigation_urls(self):
request = factory.get(path="/", data={"page": 2}, content_type="application/json")
response = self.view1.as_view(actions={"get": "list"})(request)
response.render()
content = json.loads(response.content.decode())
self.assertEqual(content["previous"], "http://testserver/")
self.assertEqual(content["next"], "http://testserver/?page=3")
|
{
"content_hash": "3958e1e3b3cc0450a9dd5f275684a013",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 118,
"avg_line_length": 46.05573770491803,
"alnum_prop": 0.6670463444151776,
"repo_name": "inonit/drf-haystack",
"id": "81e7055a3aa0468f4399be7723e671b968fd7cf3",
"size": "14130",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_viewsets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "536"
},
{
"name": "Python",
"bytes": "161752"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from os import path
current_dir = path.abspath(path.dirname(__file__))
with open(path.join(current_dir, 'README.rst'), 'r') as f:
long_description = f.read()
with open(path.join(current_dir, 'requirements.txt'), 'r') as f:
install_requires = f.read().split('\n')
setup(
name='safeopt',
version='0.16',
author='Felix Berkenkamp',
author_email='befelix@inf.ethz.ch',
packages=['safeopt'],
url='https://github.com/befelix/SafeOpt',
license='MIT',
description='Safe Bayesian optimization',
long_description=long_description,
setup_requires='numpy',
install_requires=install_requires,
keywords='Bayesian optimization, Safety',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'],
)
|
{
"content_hash": "6c89dfb4511d893c734827c081516a88",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 64,
"avg_line_length": 30.818181818181817,
"alnum_prop": 0.640117994100295,
"repo_name": "befelix/SafeOpt",
"id": "0fae0cacfa648ec91111735061cbbc76db4ec9af",
"size": "1017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "907"
},
{
"name": "Python",
"bytes": "68742"
},
{
"name": "Shell",
"bytes": "1167"
}
],
"symlink_target": ""
}
|
import argparse
def cloudwatch_dimension(dim_as_str):
try:
name, val = dim_as_str.split('=', 1)
return {'Name': name, 'Value': val}
except ValueError:
raise argparse.ArgumentTypeError('dimension filter "{0}" must have '
'form KEY=VALUE'.format(dim_as_str))
|
{
"content_hash": "1cc39b4adce306dbf5e13e35f70e852d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 77,
"avg_line_length": 33.2,
"alnum_prop": 0.5602409638554217,
"repo_name": "jhajek/euca2ools",
"id": "a7fdad94d09df71c4ecb216f5e7cad95a75554b4",
"size": "1674",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "euca2ools/commands/monitoring/argtypes.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1230266"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
}
|
"""
Person Registry, Controllers
@see: U{http://eden.sahanafoundation.org/wiki/BluePrintVITA}
"""
module = request.controller
resourcename = request.function
# -----------------------------------------------------------------------------
# Options Menu (available in all Functions' Views)
def s3_menu_postp():
# @todo: rewrite this for new framework
menu_selected = []
group_id = s3mgr.get_session("pr", "group")
if group_id:
group = s3db.pr_group
query = (group.id == group_id)
record = db(query).select(group.id, group.name, limitby=(0, 1)).first()
if record:
name = record.name
menu_selected.append(["%s: %s" % (T("Group"), name), False,
URL(f="group",
args=[record.id])])
person_id = s3mgr.get_session("pr", "person")
if person_id:
person = s3db.pr_person
query = (person.id == person_id)
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
person_represent = s3db.pr_person_represent
name = person_represent(record.id)
menu_selected.append(["%s: %s" % (T("Person"), name), False,
URL(f="person",
args=[record.id])])
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
try:
module_name = deployment_settings.modules[module].name_nice
except:
module_name = T("Person Registry")
# Load Model
s3db.table("pr_address")
def prep(r):
if r.representation == "html":
if not r.id and not r.method:
r.method = "search"
else:
redirect(URL(f="person", args=request.args))
return True
s3.prep = prep
def postp(r, output):
if isinstance(output, dict):
# Add information for Dashboard
pr_gender_opts = s3db.pr_gender_opts
pr_age_group_opts = s3db.pr_age_group_opts
table = db.pr_person
gender = []
for g_opt in pr_gender_opts:
query = (table.deleted == False) & \
(table.gender == g_opt)
count = db(query).count()
gender.append([str(pr_gender_opts[g_opt]), int(count)])
age = []
for a_opt in pr_age_group_opts:
query = (table.deleted == False) & \
(table.age_group == a_opt)
count = db(query).count()
age.append([str(pr_age_group_opts[a_opt]), int(count)])
total = int(db(table.deleted == False).count())
output.update(module_name=module_name,
gender=json.dumps(gender),
age=json.dumps(age),
total=total)
if r.interactive:
if not r.component:
label = READ
else:
label = UPDATE
linkto = r.resource.crud._linkto(r)("[id]")
s3.actions = [
dict(label=str(label), _class="action-btn", url=str(linkto))
]
r.next = None
return output
s3.postp = postp
output = s3_rest_controller("pr", "person")
response.view = "pr/index.html"
response.title = module_name
return output
# -----------------------------------------------------------------------------
def person():
""" RESTful CRUD controller """
# Enable this to allow migration of users between instances
#s3.filter = (s3db.pr_person.pe_id == s3db.pr_person_user.pe_id) & \
#(s3db.auth_user.id == s3db.pr_person_user.user_id) & \
#(s3db.auth_user.registration_key != "disabled")
# Organisation Dependent Fields
set_org_dependent_field = deployment_settings.set_org_dependent_field
person_details_table = s3db.pr_person_details
set_org_dependent_field(person_details_table.father_name)
set_org_dependent_field(person_details_table.mother_name)
set_org_dependent_field(person_details_table.affiliations)
set_org_dependent_field(person_details_table.company)
# Custom Method for Contacts
s3db.set_method(module, resourcename,
method="contacts",
action=s3db.pr_contacts)
def prep(r):
if r.representation == "json" and \
not r.component and session.s3.filter_staff:
person_ids = session.s3.filter_staff
session.s3.filter_staff = None
r.resource.add_filter = (~(db.pr_person.id.belongs(person_ids)))
elif r.interactive:
if r.representation == "popup":
# Hide "pe_label" and "missing" fields in person popups
r.table.pe_label.readable = False
r.table.pe_label.writable = False
r.table.missing.readable = False
r.table.missing.writable = False
if r.component_name == "config":
_config = s3db.gis_config
s3db.gis_config_form_setup()
# Name will be generated from person's name.
_config.name.readable = _config.name.writable = False
# Hide Location
_config.region_location_id.readable = _config.region_location_id.writable = False
elif r.component_name == "competency":
ctable = s3db.hrm_competency
ctable.organisation_id.writable = False
ctable.skill_id.comment = None
elif r.component_name == "saved_search":
if r.method == "load":
if r.component_id:
table = db.pr_saved_search
record = db(table.id == r.component_id).select(table.url,
limitby=(0, 1)
).first()
if record:
redirect(record.url)
else:
raise HTTP(404)
elif r.id:
r.table.volunteer.readable = True
r.table.volunteer.writable = True
return True
s3.prep = prep
def postp(r, output):
if r.component_name == "saved_search":
s3_action_buttons(r)
s3.actions.append(
dict(url=URL(args=r.args + ["[id]", "load"]),
label=str(T("Load")),
_class="action-btn")
)
return output
s3.postp = postp
s3db.configure("pr_group_membership",
list_fields=["id",
"group_id",
"group_head",
"description"
])
# Basic tabs
tabs = [(T("Basic Details"), None),
(T("Address"), "address"),
#(T("Contacts"), "contact"),
(T("Contact Details"), "contacts"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Education"), "education"),
(T("Groups"), "group_membership"),
(T("Journal"), "note"),
(T("Skills"), "competency"),
(T("Training"), "training"),
(T("Saved Searches"), "saved_search"),
]
# Configuration tabs
tabs.append((T("Map Settings"), "config"))
s3db.configure("pr_person", listadd=False, insertable=True)
output = s3_rest_controller(main="first_name",
extra="last_name",
rheader=lambda r: \
s3db.pr_rheader(r, tabs=tabs))
return output
# -----------------------------------------------------------------------------
def address():
"""
RESTful controller to allow creating/editing of address records within
contacts()
"""
# CRUD pre-process
def prep(r):
controller = request.get_vars.get("controller", "pr")
person_id = request.get_vars.get("person", None)
if person_id and controller:
s3db.configure("pr_address",
create_next=URL(c=controller,
f="person",
args=[person_id, "contacts"]),
update_next=URL(c=controller,
f="person",
args=[person_id, "contacts"])
)
if r.method == "create":
table = s3db.pr_person
query = (table.id == person_id)
pe_id = db(query).select(table.pe_id,
limitby=(0, 1)).first().pe_id
s3db.pr_address.pe_id.default = pe_id
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def contact():
"""
RESTful controller to allow creating/editing of contact records within
contacts()
"""
# CRUD pre-process
def prep(r):
controller = request.get_vars.get("controller", "pr")
person_id = request.get_vars.get("person", None)
if person_id:
s3db.configure("pr_contact",
create_next=URL(c=controller,
f="person",
args=[person_id, "contacts"]),
update_next=URL(c=controller,
f="person",
args=[person_id, "contacts"])
)
if r.method == "create":
table = s3db.pr_person
query = (table.id == person_id)
pe_id = db(query).select(table.pe_id,
limitby=(0, 1)).first().pe_id
s3db.pr_contact.pe_id.default = pe_id
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def contact_emergency():
"""
RESTful controller to allow creating/editing of emergency contact
records within contacts()
"""
# CRUD pre-process
def prep(r):
controller = request.get_vars.get("controller", "pr")
person_id = request.get_vars.get("person", None)
if person_id:
s3db.configure("pr_contact_emergency",
create_next=URL(c=controller,
f="person",
args=[person_id, "contacts"]),
update_next=URL(c=controller,
f="person",
args=[person_id, "contacts"])
)
if r.method == "create":
table = s3db.pr_person
query = (table.id == person_id)
pe_id = db(query).select(table.pe_id,
limitby=(0, 1)).first().pe_id
s3db.pr_contact_emergency.pe_id.default = pe_id
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search.json for use in Autocompletes
- allows differential access permissions
"""
s3.prep = lambda r: r.representation == "json" and \
r.method == "search"
return s3_rest_controller(module, "person")
# -----------------------------------------------------------------------------
def group():
""" RESTful CRUD controller """
tablename = "pr_group"
table = s3db[tablename]
s3.filter = (table.system == False) # do not show system groups
s3db.configure("pr_group_membership",
list_fields=["id",
"person_id",
"group_head",
"description"
])
rheader = lambda r: s3db.pr_rheader(r, tabs = [(T("Group Details"), None),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Members"), "group_membership")
])
output = s3_rest_controller(rheader=rheader)
return output
# -----------------------------------------------------------------------------
def image():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def education():
""" RESTful CRUD controller """
tablename = "pr_education"
table = s3db[tablename]
return s3_rest_controller("pr", "education")
# -----------------------------------------------------------------------------
#def contact():
# """ RESTful CRUD controller """
#
# table = s3db.pr_contact
#
# table.pe_id.label = T("Person/Group")
# table.pe_id.readable = True
# table.pe_id.writable = True
#
# return s3_rest_controller()
# -----------------------------------------------------------------------------
def presence():
"""
RESTful CRUD controller
- needed for Map Popups (no Menu entry for direct access)
@deprecated - People now use Base Location pr_person.location_id
"""
table = s3db.pr_presence
# Settings suitable for use in Map Popups
table.pe_id.readable = True
table.pe_id.label = "Name"
table.pe_id.represent = s3db.pr_person_represent
table.observer.readable = False
table.presence_condition.readable = False
# @ToDo: Add Skills
return s3_rest_controller()
# -----------------------------------------------------------------------------
def pentity():
"""
RESTful CRUD controller
- limited to just search.json for use in Autocompletes
"""
s3.prep = lambda r: r.representation in ("s3json", "json", "xml")
return s3_rest_controller()
# -----------------------------------------------------------------------------
def affiliation():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def role():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax tooltips """
if "formfield" in request.vars:
response.view = "pr/ajaxtips/%s.html" % request.vars.formfield
return dict()
# -----------------------------------------------------------------------------
def saved_search():
"""
REST controller for saving and loading saved searches
"""
return s3_rest_controller()
# END =========================================================================
|
{
"content_hash": "423779a1fdaf7ee18f35fd9a1867ec0d",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 97,
"avg_line_length": 35.85876993166287,
"alnum_prop": 0.44657603862279255,
"repo_name": "flavour/rgims_as_diff",
"id": "cc9b51335e5505b0b28ee0e391fb745211682ddd",
"size": "15767",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "controllers/pr.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1079144"
},
{
"name": "JavaScript",
"bytes": "15122049"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "23100673"
},
{
"name": "Racket",
"bytes": "166"
},
{
"name": "Shell",
"bytes": "727"
},
{
"name": "XSLT",
"bytes": "1307376"
}
],
"symlink_target": ""
}
|
DEBUG = True
TESTING = True
PRODUCTION = False
HOST = '0.0.0.0'
SQLALCHEMY_DATABASE_URI = 'mysql+mysqldb://root:root@localhost/ggrcdev'
FULLTEXT_INDEXER = 'ggrc.fulltext.mysql.MysqlIndexer'
LOGIN_MANAGER = 'ggrc.login.appengine'
#SQLALCHEMY_ECHO = True
AUTOBUILD_ASSETS = False
ENABLE_JASMINE = False
APP_ENGINE = True
|
{
"content_hash": "f3472f05f870ba59604c2d05c08417b7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.759375,
"repo_name": "plamut/ggrc-core",
"id": "e495f61d7caf7d5c4121617889b93a21178f8822",
"size": "433",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "src/ggrc/settings/development_app_engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "229800"
},
{
"name": "HTML",
"bytes": "1060475"
},
{
"name": "JavaScript",
"bytes": "1951072"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2839040"
},
{
"name": "Shell",
"bytes": "31100"
}
],
"symlink_target": ""
}
|
ACTIVE = "ACTIVE"
INACTIVE = "INACTIVE"
STATS_ACTIVE_CONNECTIONS = 'active_connections'
STATS_MAX_CONNECTIONS = 'max_connections'
STATS_TOTAL_CONNECTIONS = 'total_connections'
STATS_CURRENT_SESSIONS = 'current_sessions'
STATS_MAX_SESSIONS = 'max_sessions'
STATS_TOTAL_SESSIONS = 'total_sessions'
STATS_IN_BYTES = 'bytes_in'
STATS_OUT_BYTES = 'bytes_out'
STATS_CONNECTION_ERRORS = 'connection_errors'
STATS_RESPONSE_ERRORS = 'response_errors'
STATS_STATUS = 'status'
STATS_HEALTH = 'health'
STATS_FAILED_CHECKS = 'failed_checks'
KEEPALIVED_CONFIG_FILE_ABS_PATH = '/etc/keepalived/keepalived.conf'
|
{
"content_hash": "85530ce97f1507a5f302edae667f3fc1",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 33.22222222222222,
"alnum_prop": 0.7642140468227425,
"repo_name": "jiahaoliang/group-based-policy",
"id": "28cbe994fd320db6a3c34f05feda9eb8fbc90181",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/lbaasv2-mitaka-pull-request",
"path": "gbpservice/nfp/service_vendor_agents/haproxy/haproxy-agent/src/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1741199"
},
{
"name": "Shell",
"bytes": "27976"
}
],
"symlink_target": ""
}
|
'''Modules to calculate insolation and orbital variations.
These methods now accept and return ``xarray`` objects
for easier data manipulation and plotting.
'''
|
{
"content_hash": "1629b82b8689bb8deac79238fb782d85",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 58,
"avg_line_length": 32.4,
"alnum_prop": 0.7839506172839507,
"repo_name": "cjcardinale/climlab",
"id": "092928ed7ef42afa372c36c3e9729c3f79901aa8",
"size": "162",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "climlab/solar/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "289"
},
{
"name": "C",
"bytes": "1456"
},
{
"name": "Fortran",
"bytes": "10562651"
},
{
"name": "Jupyter Notebook",
"bytes": "4151741"
},
{
"name": "Python",
"bytes": "523605"
},
{
"name": "Shell",
"bytes": "3103"
},
{
"name": "TeX",
"bytes": "2253"
}
],
"symlink_target": ""
}
|
import rospy
from tf import TransformListener
class TFLatency:
def __init__(self, source_frame, target_frame):
self.source_frame = source_frame
self.target_frame = target_frame
self.listener = TransformListener(1, rospy.Duration(0.01))
def run(self):
self.listener.waitForTransform(self.source_frame, self.target_frame, rospy.Time(), rospy.Duration(5.0))
sum = 0
count = 0
while not rospy.is_shutdown():
now = rospy.Time.now()
if self.listener.canTransform(self.source_frame, self.target_frame, rospy.Time(0)):
t = self.listener.getLatestCommonTime(self.source_frame, self.target_frame)
delta = (now - t).to_sec() * 1000 #ms
sum += delta
count +=1
rospy.loginfo("Latency: %f ms (AVG: %f ms)", delta, sum / count)
#if delta > 20:
# self.listener.clear()
rospy.sleep(0.01)
if __name__ == '__main__':
rospy.init_node('tf_latency', anonymous=True)
source_frame = rospy.get_param("~source_frame", "/world")
target_frame = rospy.get_param("~target_frame")
tflatency = TFLatency(source_frame, target_frame)
tflatency.run()
|
{
"content_hash": "65c3a746f93dcfaae3fcd157efadad03",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 111,
"avg_line_length": 39.21875,
"alnum_prop": 0.5888446215139442,
"repo_name": "USC-ACTLab/vicon_bridge_test",
"id": "5adc3461affb2f20b0ef9039b5b14f14b91c5614",
"size": "1278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/tf_latency.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1056"
},
{
"name": "Python",
"bytes": "2319"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe, sys
from frappe import _
from frappe.utils import (cint, flt, now, cstr, strip_html, getdate, get_datetime, to_timedelta,
sanitize_html, sanitize_email)
from frappe.model import default_fields
from frappe.model.naming import set_new_name
from frappe.modules import load_doctype_module
from frappe.model import display_fieldtypes
from frappe.model.db_schema import type_map, varchar_len
_classes = {}
def get_controller(doctype):
"""Returns the **class** object of the given DocType.
For `custom` type, returns `frappe.model.document.Document`.
:param doctype: DocType name as string."""
from frappe.model.document import Document
if not doctype in _classes:
module_name, custom = frappe.db.get_value("DocType", doctype, ["module", "custom"]) \
or ["Core", False]
if custom:
_class = Document
else:
module = load_doctype_module(doctype, module_name)
classname = doctype.replace(" ", "").replace("-", "")
if hasattr(module, classname):
_class = getattr(module, classname)
if issubclass(_class, BaseDocument):
_class = getattr(module, classname)
else:
raise ImportError, doctype
else:
raise ImportError, doctype
_classes[doctype] = _class
return _classes[doctype]
class BaseDocument(object):
ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns")
def __init__(self, d):
self.update(d)
self.dont_update_if_missing = []
if hasattr(self, "__setup__"):
self.__setup__()
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = frappe.get_meta(self.doctype)
return self._meta
def update(self, d):
if "doctype" in d:
self.set("doctype", d.get("doctype"))
# first set default field values of base document
for key in default_fields:
if key in d:
self.set(key, d.get(key))
for key, value in d.iteritems():
self.set(key, value)
return self
def update_if_missing(self, d):
if isinstance(d, BaseDocument):
d = d.get_valid_dict()
if "doctype" in d:
self.set("doctype", d.get("doctype"))
for key, value in d.iteritems():
# dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value
if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing):
self.set(key, value)
def get_db_value(self, key):
return frappe.db.get_value(self.doctype, self.name, key)
def get(self, key=None, filters=None, limit=None, default=None):
if key:
if isinstance(key, dict):
return _filter(self.get_all_children(), key, limit=limit)
if filters:
if isinstance(filters, dict):
value = _filter(self.__dict__.get(key, []), filters, limit=limit)
else:
default = filters
filters = None
value = self.__dict__.get(key, default)
else:
value = self.__dict__.get(key, default)
if value is None and key not in self.ignore_in_getter \
and key in (d.fieldname for d in self.meta.get_table_fields()):
self.set(key, [])
value = self.__dict__.get(key)
return value
else:
return self.__dict__
def getone(self, key, filters=None):
return self.get(key, filters=filters, limit=1)[0]
def set(self, key, value, as_value=False):
if isinstance(value, list) and not as_value:
self.__dict__[key] = []
self.extend(key, value)
else:
self.__dict__[key] = value
def delete_key(self, key):
if key in self.__dict__:
del self.__dict__[key]
def append(self, key, value=None):
if value==None:
value={}
if isinstance(value, (dict, BaseDocument)):
if not self.__dict__.get(key):
self.__dict__[key] = []
value = self._init_child(value, key)
self.__dict__[key].append(value)
# reference parent document
value.parent_doc = self
return value
else:
raise ValueError, "Document attached to child table must be a dict or BaseDocument, not " + str(type(value))[1:-1]
def extend(self, key, value):
if isinstance(value, list):
for v in value:
self.append(key, v)
else:
raise ValueError
def remove(self, doc):
self.get(doc.parentfield).remove(doc)
def _init_child(self, value, key):
if not self.doctype:
return value
if not isinstance(value, BaseDocument):
if "doctype" not in value:
value["doctype"] = self.get_table_field_doctype(key)
if not value["doctype"]:
raise AttributeError, key
value = get_controller(value["doctype"])(value)
value.init_valid_columns()
value.parent = self.name
value.parenttype = self.doctype
value.parentfield = key
if value.docstatus is None:
value.docstatus = 0
if not getattr(value, "idx", None):
value.idx = len(self.get(key) or []) + 1
if not getattr(value, "name", None):
value.__dict__['__islocal'] = 1
return value
def get_valid_dict(self, sanitize=True):
d = frappe._dict()
for fieldname in self.meta.get_valid_columns():
d[fieldname] = self.get(fieldname)
# if no need for sanitization and value is None, continue
if not sanitize and d[fieldname] is None:
continue
df = self.meta.get_field(fieldname)
if df:
if df.fieldtype=="Check" and (not isinstance(d[fieldname], int) or d[fieldname] > 1):
d[fieldname] = 1 if cint(d[fieldname]) else 0
elif df.fieldtype=="Int" and not isinstance(d[fieldname], int):
d[fieldname] = cint(d[fieldname])
elif df.fieldtype in ("Currency", "Float", "Percent") and not isinstance(d[fieldname], float):
d[fieldname] = flt(d[fieldname])
elif df.fieldtype in ("Datetime", "Date") and d[fieldname]=="":
d[fieldname] = None
elif df.get("unique") and cstr(d[fieldname]).strip()=="":
# unique empty field should be set to None
d[fieldname] = None
if isinstance(d[fieldname], list) and df.fieldtype != 'Table':
frappe.throw(_('Value for {0} cannot be a list').format(_(df.label)))
return d
def init_valid_columns(self):
for key in default_fields:
if key not in self.__dict__:
self.__dict__[key] = None
if key in ("idx", "docstatus") and self.__dict__[key] is None:
self.__dict__[key] = 0
for key in self.get_valid_columns():
if key not in self.__dict__:
self.__dict__[key] = None
def get_valid_columns(self):
if self.doctype not in frappe.local.valid_columns:
if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"):
from frappe.model.meta import get_table_columns
valid = get_table_columns(self.doctype)
else:
valid = self.meta.get_valid_columns()
frappe.local.valid_columns[self.doctype] = valid
return frappe.local.valid_columns[self.doctype]
def is_new(self):
return self.get("__islocal")
def as_dict(self, no_nulls=False, no_default_fields=False):
doc = self.get_valid_dict()
doc["doctype"] = self.doctype
for df in self.meta.get_table_fields():
children = self.get(df.fieldname) or []
doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children]
if no_nulls:
for k in doc.keys():
if doc[k] is None:
del doc[k]
if no_default_fields:
for k in doc.keys():
if k in default_fields:
del doc[k]
for key in ("_user_tags", "__islocal", "__onload", "_liked_by", "__run_link_triggers"):
if self.get(key):
doc[key] = self.get(key)
return doc
def as_json(self):
return frappe.as_json(self.as_dict())
def get_table_field_doctype(self, fieldname):
return self.meta.get_field(fieldname).options
def get_parentfield_of_doctype(self, doctype):
fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype]
return fieldname[0] if fieldname else None
def db_insert(self):
"""INSERT the document (with valid columns) in the database."""
if not self.name:
# name will be set by document class in most cases
set_new_name(self)
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""insert into `tab{doctype}`
({columns}) values ({values})""".format(
doctype = self.doctype,
columns = ", ".join(["`"+c+"`" for c in columns]),
values = ", ".join(["%s"] * len(columns))
), d.values())
except Exception, e:
if e.args[0]==1062:
if "PRIMARY" in cstr(e.args[1]):
if self.meta.autoname=="hash":
# hash collision? try again
self.name = None
self.db_insert()
return
type, value, traceback = sys.exc_info()
frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name))
raise frappe.DuplicateEntryError, (self.doctype, self.name, e), traceback
elif "Duplicate" in cstr(e.args[1]):
# unique constraint
self.show_unique_validation_message(e)
else:
raise
else:
raise
self.set("__islocal", False)
def db_update(self):
if self.get("__islocal") or not self.name:
self.db_insert()
return
d = self.get_valid_dict()
# don't update name, as case might've been changed
name = d['name']
del d['name']
columns = d.keys()
try:
frappe.db.sql("""update `tab{doctype}`
set {values} where name=%s""".format(
doctype = self.doctype,
values = ", ".join(["`"+c+"`=%s" for c in columns])
), d.values() + [name])
except Exception, e:
if e.args[0]==1062 and "Duplicate" in cstr(e.args[1]):
self.show_unique_validation_message(e)
else:
raise
def show_unique_validation_message(self, e):
type, value, traceback = sys.exc_info()
fieldname, label = str(e).split("'")[-2], None
# unique_first_fieldname_second_fieldname is the constraint name
# created using frappe.db.add_unique
if "unique_" in fieldname:
fieldname = fieldname.split("_", 1)[1]
df = self.meta.get_field(fieldname)
if df:
label = df.label
frappe.msgprint(_("{0} must be unique".format(label or fieldname)))
# this is used to preserve traceback
raise frappe.UniqueValidationError, (self.doctype, self.name, e), traceback
def db_set(self, fieldname, value, update_modified=True):
self.set(fieldname, value)
if update_modified:
self.set("modified", now())
self.set("modified_by", frappe.session.user)
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
def _fix_numeric_types(self):
for df in self.meta.get("fields"):
if df.fieldtype == "Check":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif self.get(df.fieldname) is not None:
if df.fieldtype == "Int":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif df.fieldtype in ("Float", "Currency", "Percent"):
self.set(df.fieldname, flt(self.get(df.fieldname)))
if self.docstatus is not None:
self.docstatus = cint(self.docstatus)
def _get_missing_mandatory_fields(self):
"""Get mandatory fields that do not have any values"""
def get_msg(df):
if df.fieldtype == "Table":
return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label))
elif self.parentfield:
return "{}: {} #{}: {}: {}".format(_("Error"), _("Row"), self.idx,
_("Value missing for"), _(df.label))
else:
return "{}: {}: {}".format(_("Error"), _("Value missing for"), _(df.label))
missing = []
for df in self.meta.get("fields", {"reqd": 1}):
if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip():
missing.append((df.fieldname, get_msg(df)))
# check for missing parent and parenttype
if self.meta.istable:
for fieldname in ("parent", "parenttype"):
if not self.get(fieldname):
missing.append((fieldname, get_msg(frappe._dict(label=fieldname))))
return missing
def get_invalid_links(self, is_submittable=False):
def get_msg(df, docname):
if self.parentfield:
return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname)
else:
return "{}: {}".format(_(df.label), docname)
invalid_links = []
cancelled_links = []
for df in (self.meta.get_link_fields()
+ self.meta.get("fields", {"fieldtype":"Dynamic Link"})):
docname = self.get(df.fieldname)
if docname:
if df.fieldtype=="Link":
doctype = df.options
if not doctype:
frappe.throw(_("Options not set for link field {0}").format(df.fieldname))
else:
doctype = self.get(df.options)
if not doctype:
frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options)))
# MySQL is case insensitive. Preserve case of the original docname in the Link Field.
value = frappe.db.get_value(doctype, docname, "name", cache=True)
if frappe.get_meta(doctype).issingle:
value = doctype
setattr(self, df.fieldname, value)
if not value:
invalid_links.append((df.fieldname, docname, get_msg(df, docname)))
elif (df.fieldname != "amended_from"
and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable
and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2):
cancelled_links.append((df.fieldname, docname, get_msg(df, docname)))
return invalid_links, cancelled_links
def _validate_selects(self):
if frappe.flags.in_import:
return
for df in self.meta.get_select_fields():
if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options):
continue
options = (df.options or "").split("\n")
# if only empty options
if not filter(None, options):
continue
# strip and set
self.set(df.fieldname, cstr(self.get(df.fieldname)).strip())
value = self.get(df.fieldname)
if value not in options and not (frappe.flags.in_test and value.startswith("_T-")):
# show an elaborate message
prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else ""
label = _(self.meta.get_label(df.fieldname))
comma_options = '", "'.join(_(each) for each in options)
frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label,
value, comma_options))
def _validate_constants(self):
if frappe.flags.in_import or self.is_new():
return
constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": 1})]
if constants:
values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True)
for fieldname in constants:
if self.get(fieldname) != values.get(fieldname):
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)),
frappe.CannotChangeConstantError)
def _validate_length(self):
if frappe.flags.in_install:
return
for fieldname, value in self.get_valid_dict().iteritems():
df = self.meta.get_field(fieldname)
if df and df.fieldtype in type_map and type_map[df.fieldtype][0]=="varchar":
max_length = cint(df.get("length")) or cint(varchar_len)
if len(cstr(value)) > max_length:
if self.parentfield and self.idx:
reference = _("{0}, Row {1}").format(_(self.doctype), self.idx)
else:
reference = "{0} {1}".format(_(self.doctype), self.name)
frappe.throw(_("{0}: '{1}' will get truncated, as max characters allowed is {2}")\
.format(reference, _(df.label), max_length), frappe.CharacterLengthExceededError)
def _validate_update_after_submit(self):
# get the full doc with children
db_values = frappe.get_doc(self.doctype, self.name).as_dict()
for key in self.as_dict():
df = self.meta.get_field(key)
db_value = db_values.get(key)
if df and not df.allow_on_submit and (self.get(key) or db_value):
if df.fieldtype=="Table":
# just check if the table size has changed
# individual fields will be checked in the loop for children
self_value = len(self.get(key))
db_value = len(db_value)
else:
self_value = self.get_value(key)
if self_value != db_value:
frappe.throw(_("Not allowed to change {0} after submission").format(df.label),
frappe.UpdateAfterSubmitError)
def _sanitize_content(self):
"""Sanitize HTML and Email in field values. Used to prevent XSS.
- Ignore if 'Ignore XSS Filter' is checked or fieldtype is 'Code'
"""
if frappe.flags.in_install:
return
for fieldname, value in self.get_valid_dict().items():
if not value or not isinstance(value, basestring):
continue
elif (u"<" not in value and u">" not in value):
# doesn't look like html so no need
continue
elif "<!-- markdown -->" in value and not ("<script" in value or "javascript:" in value):
# should be handled separately via the markdown converter function
continue
df = self.meta.get_field(fieldname)
sanitized_value = value
if df and df.get("fieldtype") in ("Data", "Code", "Small Text") and df.get("options")=="Email":
sanitized_value = sanitize_email(value)
elif df and (df.get("ignore_xss_filter")
or (df.get("fieldtype")=="Code" and df.get("options")!="Email")
or df.get("fieldtype") in ("Attach", "Attach Image")
# cancelled and submit but not update after submit should be ignored
or self.docstatus==2
or (self.docstatus==1 and not df.get("allow_on_submit"))):
continue
else:
sanitized_value = sanitize_html(value)
self.set(fieldname, sanitized_value)
def precision(self, fieldname, parentfield=None):
"""Returns float precision for a particular field (or get global default).
:param fieldname: Fieldname for which precision is required.
:param parentfield: If fieldname is in child table."""
from frappe.model.meta import get_field_precision
if parentfield and not isinstance(parentfield, basestring):
parentfield = parentfield.parentfield
cache_key = parentfield or "main"
if not hasattr(self, "_precision"):
self._precision = frappe._dict()
if cache_key not in self._precision:
self._precision[cache_key] = frappe._dict()
if fieldname not in self._precision[cache_key]:
self._precision[cache_key][fieldname] = None
doctype = self.meta.get_field(parentfield).options if parentfield else self.doctype
df = frappe.get_meta(doctype).get_field(fieldname)
if df.fieldtype in ("Currency", "Float", "Percent"):
self._precision[cache_key][fieldname] = get_field_precision(df, self)
return self._precision[cache_key][fieldname]
def get_formatted(self, fieldname, doc=None, currency=None, absolute_value=False, translated=False):
from frappe.utils.formatters import format_value
df = self.meta.get_field(fieldname)
if not df and fieldname in default_fields:
from frappe.model.meta import get_default_df
df = get_default_df(fieldname)
val = self.get(fieldname)
if translated:
val = _(val)
if absolute_value and isinstance(val, (int, float)):
val = abs(self.get(fieldname))
if not doc:
doc = getattr(self, "parent_doc", None) or self
return format_value(val, df=df, doc=doc, currency=currency)
def is_print_hide(self, fieldname, df=None, for_print=True):
"""Returns true if fieldname is to be hidden for print.
Print Hide can be set via the Print Format Builder or in the controller as a list
of hidden fields. Example
class MyDoc(Document):
def __setup__(self):
self.print_hide = ["field1", "field2"]
:param fieldname: Fieldname to be checked if hidden.
"""
meta_df = self.meta.get_field(fieldname)
if meta_df and meta_df.get("__print_hide"):
return True
print_hide = 0
if self.get(fieldname)==0 and not self.meta.istable:
print_hide = ( df and df.print_hide_if_no_value ) or ( meta_df and meta_df.print_hide_if_no_value )
if not print_hide:
if df and df.print_hide is not None:
print_hide = df.print_hide
elif meta_df:
print_hide = meta_df.print_hide
return print_hide
def in_format_data(self, fieldname):
"""Returns True if shown via Print Format::`format_data` property.
Called from within standard print format."""
doc = getattr(self, "parent_doc", self)
if hasattr(doc, "format_data_map"):
return fieldname in doc.format_data_map
else:
return True
def reset_values_if_no_permlevel_access(self, has_access_to, high_permlevel_fields):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
to_reset = []
for df in high_permlevel_fields:
if df.permlevel not in has_access_to and df.fieldtype not in display_fieldtypes:
to_reset.append(df)
if to_reset:
if self.is_new():
# if new, set default value
ref_doc = frappe.new_doc(self.doctype)
else:
# get values from old doc
if self.parent:
self.parent_doc.get_latest()
ref_doc = [d for d in self.parent_doc.get(self.parentfield) if d.name == self.name][0]
else:
ref_doc = self.get_latest()
for df in to_reset:
self.set(df.fieldname, ref_doc.get(df.fieldname))
def get_value(self, fieldname):
df = self.meta.get_field(fieldname)
val = self.get(fieldname)
return self.cast(val, df)
def cast(self, val, df):
if df.fieldtype in ("Currency", "Float", "Percent"):
val = flt(val)
elif df.fieldtype in ("Int", "Check"):
val = cint(val)
elif df.fieldtype in ("Data", "Text", "Small Text", "Long Text",
"Text Editor", "Select", "Link", "Dynamic Link"):
val = cstr(val)
elif df.fieldtype == "Date":
val = getdate(val)
elif df.fieldtype == "Datetime":
val = get_datetime(val)
elif df.fieldtype == "Time":
val = to_timedelta(val)
return val
def _extract_images_from_text_editor(self):
from frappe.utils.file_manager import extract_images_from_doc
if self.doctype != "DocType":
for df in self.meta.get("fields", {"fieldtype":"Text Editor"}):
extract_images_from_doc(self, df.fieldname)
def _filter(data, filters, limit=None):
"""pass filters as:
{"key": "val", "key": ["!=", "val"],
"key": ["in", "val"], "key": ["not in", "val"], "key": "^val",
"key" : True (exists), "key": False (does not exist) }"""
out = []
for d in data:
add = True
for f in filters:
fval = filters[f]
if fval is True:
fval = ("not None", fval)
elif fval is False:
fval = ("None", fval)
elif not isinstance(fval, (tuple, list)):
if isinstance(fval, basestring) and fval.startswith("^"):
fval = ("^", fval[1:])
else:
fval = ("=", fval)
if not frappe.compare(getattr(d, f, None), fval[0], fval[1]):
add = False
break
if add:
out.append(d)
if limit and (len(out)-1)==limit:
break
return out
|
{
"content_hash": "cabd62ceadfebfdab004292015f3a9a9",
"timestamp": "",
"source": "github",
"line_count": 738,
"max_line_length": 117,
"avg_line_length": 30.2289972899729,
"alnum_prop": 0.6581648661974988,
"repo_name": "ShashaQin/frappe",
"id": "e343f946fbc36a55db272260a2aa04f436048bb8",
"size": "22410",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/model/base_document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "297220"
},
{
"name": "GCC Machine Description",
"bytes": "2436"
},
{
"name": "HTML",
"bytes": "165691"
},
{
"name": "JavaScript",
"bytes": "1201932"
},
{
"name": "Python",
"bytes": "1357186"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
import os
import shutil
import time
import torch
import torch.backends.cudnn as cudnn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchnet as tnt
import torchvision.transforms as transforms
from tqdm import tqdm
from wildcat.util import AveragePrecisionMeter, Warp
class Engine(object):
def __init__(self, state={}):
self.state = state
if self._state('use_gpu') is None:
self.state['use_gpu'] = torch.cuda.is_available()
if self._state('image_size') is None:
self.state['image_size'] = 224
if self._state('batch_size') is None:
self.state['batch_size'] = 64
if self._state('workers') is None:
self.state['workers'] = 4
if self._state('multi_gpu') is None:
self.state['multi_gpu'] = False
if self._state('device_ids') is None:
self.state['device_ids'] = [0, 1, 2, 3]
if self._state('evaluate') is None:
self.state['evaluate'] = False
if self._state('start_epoch') is None:
self.state['start_epoch'] = 0
if self._state('max_epochs') is None:
self.state['max_epochs'] = 90
if self._state('epoch_step') is None:
self.state['epoch_step'] = []
# meters
self.state['meter_loss'] = tnt.meter.AverageValueMeter()
# time measure
self.state['batch_time'] = tnt.meter.AverageValueMeter()
self.state['data_time'] = tnt.meter.AverageValueMeter()
# display parameters
if self._state('use_pb') is None:
self.state['use_pb'] = True
if self._state('print_freq') is None:
self.state['print_freq'] = 0
def _state(self, name):
if name in self.state:
return self.state[name]
def on_start_epoch(self, training, model, criterion, data_loader, optimizer=None, display=True):
self.state['meter_loss'].reset()
self.state['batch_time'].reset()
self.state['data_time'].reset()
def on_end_epoch(self, training, model, criterion, data_loader, optimizer=None, display=True):
loss = self.state['meter_loss'].value()[0]
if display:
if training:
print('Epoch: [{0}]\t'
'Loss {loss:.4f}'.format(self.state['epoch'], loss=loss))
else:
print('Test: \t Loss {loss:.4f}'.format(loss=loss))
return loss
def on_start_batch(self, training, model, criterion, data_loader, optimizer=None, display=True):
pass
def on_end_batch(self, training, model, criterion, data_loader, optimizer=None, display=True):
# record loss
self.state['loss_batch'] = self.state['loss'].data[0]
self.state['meter_loss'].add(self.state['loss_batch'])
if display and self.state['print_freq'] != 0 and self.state['iteration'] % self.state['print_freq'] == 0:
loss = self.state['meter_loss'].value()[0]
batch_time = self.state['batch_time'].value()[0]
data_time = self.state['data_time'].value()[0]
if training:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time_current:.3f} ({batch_time:.3f})\t'
'Data {data_time_current:.3f} ({data_time:.3f})\t'
'Loss {loss_current:.4f} ({loss:.4f})'.format(
self.state['epoch'], self.state['iteration'], len(data_loader),
batch_time_current=self.state['batch_time_current'],
batch_time=batch_time, data_time_current=self.state['data_time_batch'],
data_time=data_time, loss_current=self.state['loss_batch'], loss=loss))
else:
print('Test: [{0}/{1}]\t'
'Time {batch_time_current:.3f} ({batch_time:.3f})\t'
'Data {data_time_current:.3f} ({data_time:.3f})\t'
'Loss {loss_current:.4f} ({loss:.4f})'.format(
self.state['iteration'], len(data_loader), batch_time_current=self.state['batch_time_current'],
batch_time=batch_time, data_time_current=self.state['data_time_batch'],
data_time=data_time, loss_current=self.state['loss_batch'], loss=loss))
def on_forward(self, training, model, criterion, data_loader, optimizer=None, display=True):
input_var = torch.autograd.Variable(self.state['input'])
target_var = torch.autograd.Variable(self.state['target'])
if not training:
input_var.volatile = True
target_var.volatile = True
# compute output
self.state['output'] = model(input_var)
self.state['loss'] = criterion(self.state['output'], target_var)
if training:
optimizer.zero_grad()
self.state['loss'].backward()
optimizer.step()
def init_learning(self, model, criterion):
if self._state('train_transform') is None:
normalize = transforms.Normalize(mean=model.image_normalization_mean,
std=model.image_normalization_std)
self.state['train_transform'] = transforms.Compose([
Warp(self.state['image_size']),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
if self._state('val_transform') is None:
normalize = transforms.Normalize(mean=model.image_normalization_mean,
std=model.image_normalization_std)
self.state['val_transform'] = transforms.Compose([
Warp(self.state['image_size']),
transforms.ToTensor(),
normalize,
])
self.state['best_score'] = 0
def learning(self, model, criterion, train_dataset, val_dataset, optimizer=None):
self.init_learning(model, criterion)
# define train and val transform
train_dataset.transform = self.state['train_transform']
train_dataset.target_transform = self._state('train_target_transform')
val_dataset.transform = self.state['val_transform']
val_dataset.target_transform = self._state('val_target_transform')
# data loading code
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=self.state['batch_size'], shuffle=True,
num_workers=self.state['workers'])
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=self.state['batch_size'], shuffle=False,
num_workers=self.state['workers'])
# optionally resume from a checkpoint
if self._state('resume') is not None:
if os.path.isfile(self.state['resume']):
print("=> loading checkpoint '{}'".format(self.state['resume']))
checkpoint = torch.load(self.state['resume'])
self.state['start_epoch'] = checkpoint['epoch']
self.state['best_score'] = checkpoint['best_score']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(self.state['evaluate'], checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(self.state['resume']))
if self.state['use_gpu']:
train_loader.pin_memory = True
val_loader.pin_memory = True
cudnn.benchmark = True
if self.state['multi_gpu']:
model = torch.nn.DataParallel(model, device_ids=self.state['device_ids']).cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = criterion.cuda()
if self.state['evaluate']:
self.validate(val_loader, model, criterion)
return
# TODO define optimizer
for epoch in range(self.state['start_epoch'], self.state['max_epochs']):
self.state['epoch'] = epoch
self.adjust_learning_rate(optimizer)
# train for one epoch
self.train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = self.validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > self.state['best_score']
self.state['best_score'] = max(prec1, self.state['best_score'])
self.save_checkpoint({
'epoch': epoch + 1,
'arch': self._state('arch'),
'state_dict': model.module.state_dict() if self.state['use_gpu'] else model.state_dict(),
'best_score': self.state['best_score'],
}, is_best)
print(' *** best={best:.3f}'.format(best=self.state['best_score']))
def train(self, data_loader, model, criterion, optimizer, epoch):
# switch to train mode
model.train()
self.on_start_epoch(True, model, criterion, data_loader, optimizer)
if self.state['use_pb']:
data_loader = tqdm(data_loader, desc='Training')
end = time.time()
for i, (input, target) in enumerate(data_loader):
# measure data loading time
self.state['iteration'] = i
self.state['data_time_batch'] = time.time() - end
self.state['data_time'].add(self.state['data_time_batch'])
self.state['input'] = input
self.state['target'] = target
self.on_start_batch(True, model, criterion, data_loader, optimizer)
if self.state['use_gpu']:
self.state['target'] = self.state['target'].cuda(async=True)
self.on_forward(True, model, criterion, data_loader, optimizer)
# measure elapsed time
self.state['batch_time_current'] = time.time() - end
self.state['batch_time'].add(self.state['batch_time_current'])
end = time.time()
# measure accuracy
self.on_end_batch(True, model, criterion, data_loader, optimizer)
self.on_end_epoch(True, model, criterion, data_loader, optimizer)
def validate(self, data_loader, model, criterion):
# switch to evaluate mode
model.eval()
self.on_start_epoch(False, model, criterion, data_loader)
if self.state['use_pb']:
data_loader = tqdm(data_loader, desc='Test')
end = time.time()
for i, (input, target) in enumerate(data_loader):
# measure data loading time
self.state['iteration'] = i
self.state['data_time_batch'] = time.time() - end
self.state['data_time'].add(self.state['data_time_batch'])
self.state['input'] = input
self.state['target'] = target
self.on_start_batch(False, model, criterion, data_loader)
if self.state['use_gpu']:
self.state['target'] = self.state['target'].cuda(async=True)
self.on_forward(False, model, criterion, data_loader)
# measure elapsed time
self.state['batch_time_current'] = time.time() - end
self.state['batch_time'].add(self.state['batch_time_current'])
end = time.time()
# measure accuracy
self.on_end_batch(False, model, criterion, data_loader)
score = self.on_end_epoch(False, model, criterion, data_loader)
return score
def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar'):
if self._state('save_model_path') is not None:
filename_ = filename
filename = os.path.join(self.state['save_model_path'], filename_)
if not os.path.exists(self.state['save_model_path']):
os.makedirs(self.state['save_model_path'])
print('save model {filename}'.format(filename=filename))
torch.save(state, filename)
if is_best:
filename_best = 'model_best.pth.tar'
if self._state('save_model_path') is not None:
filename_best = os.path.join(self.state['save_model_path'], filename_best)
shutil.copyfile(filename, filename_best)
if self._state('save_model_path') is not None:
if self._state('filename_previous_best') is not None:
os.remove(self._state('filename_previous_best'))
filename_best = os.path.join(self.state['save_model_path'], 'model_best_{score:.4f}.pth.tar'.format(score=state['best_score']))
shutil.copyfile(filename, filename_best)
self.state['filename_previous_best'] = filename_best
def adjust_learning_rate(self, optimizer):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
# lr = args.lr * (0.1 ** (epoch // 30))
if self.state['epoch'] is not 0 and self.state['epoch'] in self.state['epoch_step']:
print('update learning rate')
for param_group in optimizer.state_dict()['param_groups']:
param_group['lr'] = param_group['lr'] * 0.1
print(param_group['lr'])
class MulticlassEngine(Engine):
def __init__(self, state):
Engine.__init__(self, state)
self.state['classacc'] = tnt.meter.ClassErrorMeter(accuracy=True)
def on_start_epoch(self, training, model, criterion, data_loader, optimizer=None, display=True):
Engine.on_start_epoch(self, training, model, criterion, data_loader, optimizer)
self.state['classacc'].reset()
def on_end_epoch(self, training, model, criterion, data_loader, optimizer=None, display=True):
top1 = self.state['classacc'].value()[0]
loss = self.state['meter_loss'].value()[0]
if display:
if training:
# print(model.module.spatial_pooling)
print('Epoch: [{0}]\t'
'Loss {loss:.4f}\t'
'Prec@1 {top1:.3f}'.format(self.state['epoch'], loss=loss, top1=top1))
else:
print('Test: \t Loss {loss:.4f}\t Prec@1 {top1:.3f}'.format(loss=loss, top1=top1))
return top1
def on_end_batch(self, training, model, criterion, data_loader, optimizer=None, display=True):
Engine.on_end_batch(self, training, model, criterion, data_loader, optimizer, display=False)
# measure accuracy
self.state['classacc'].add(self.state['output'].data, self.state['target'])
if display and self.state['print_freq'] != 0 and self.state['iteration'] % self.state['print_freq'] == 0:
top1 = self.state['classacc'].value()[0]
loss = self.state['meter_loss'].value()[0]
batch_time = self.state['batch_time'].value()[0]
data_time = self.state['data_time'].value()[0]
if training:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time_current:.3f} ({batch_time:.3f})\t'
'Data {data_time_current:.3f} ({data_time:.3f})\t'
'Loss {loss_current:.4f} ({loss:.4f})\t'
'Prec@1 {top1:.3f}'.format(
self.state['epoch'], self.state['iteration'], len(data_loader),
batch_time_current=self.state['batch_time_current'], batch_time=batch_time,
data_time_current=self.state['data_time_batch'], data_time=data_time,
loss_current=self.state['loss_batch'], loss=loss, top1=top1))
else:
print('Test: [{0}/{1}]\t'
'Time {batch_time_current:.3f} ({batch_time:.3f})\t'
'Data {data_time_current:.3f} ({data_time:.3f})\t'
'Loss {loss_current:.4f} ({loss:.4f})\t'
'Prec@1 {top1:.3f}'.format(
self.state['iteration'], len(data_loader), batch_time_current=self.state['batch_time_current'],
batch_time=batch_time, data_time_current=self.state['data_time_batch'],
data_time=data_time, loss_current=self.state['loss_batch'], loss=loss, top1=top1))
class MulticlassTop5Engine(Engine):
def __init__(self, state):
Engine.__init__(self, state)
self.state['classacc'] = tnt.meter.ClassErrorMeter(topk=[1, 5], accuracy=True)
def on_start_epoch(self, training, model, criterion, data_loader, optimizer=None, display=True):
Engine.on_start_epoch(self, training, model, criterion, data_loader, optimizer)
self.state['classacc'].reset()
def on_end_epoch(self, training, model, criterion, data_loader, optimizer=None, display=True):
top1 = self.state['classacc'].value()[0]
top5 = self.state['classacc'].value()[1]
loss = self.state['meter_loss'].value()[0]
if display:
if training:
print('Epoch: [{0}]\t'
'Loss {loss:.4f}\t'
'Prec@1 {top1:.3f}\t'
'Prec@5 {top5:.3f}'.format(self.state['epoch'], loss=loss, top1=top1, top5=top5))
else:
print('Test: \t'
'Loss {loss:.4f}\t'
'Prec@1 {top1:.3f}\t'
'Prec@5 {top5:.3f}'.format(loss=loss, top1=top1, top5=top5))
return top1
def on_end_batch(self, training, model, criterion, data_loader, optimizer=None, display=True):
Engine.on_end_batch(self, training, model, criterion, data_loader, optimizer, display=False)
# measure accuracy
self.state['classacc'].add(self.state['output'].data, self.state['target'])
if display and self.state['print_freq'] != 0 and self.state['iteration'] % self.state['print_freq'] == 0:
top1 = self.state['classacc'].value()[0]
top5 = self.state['classacc'].value()[1]
loss = self.state['meter_loss'].value()[0]
batch_time = self.state['batch_time'].value()[0]
data_time = self.state['data_time'].value()[0]
if training:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time_current:.3f} ({batch_time:.3f})\t'
'Data {data_time_current:.3f} ({data_time:.3f})\t'
'Loss {loss_current:.4f} ({loss:.4f})\t'
'Prec@1 {top1:.3f}\t'
'Prec@5 {top5:.3f}'.format(
self.state['epoch'], self.state['iteration'], len(data_loader),
batch_time_current=self.state['batch_time_current'], batch_time=batch_time,
data_time_current=self.state['data_time_batch'], data_time=data_time,
loss_current=self.state['loss_batch'], loss=loss, top1=top1, top5=top5))
else:
print('Test: [{0}/{1}]\t'
'Time {batch_time_current:.3f} ({batch_time:.3f})\t'
'Data {data_time_current:.3f} ({data_time:.3f})\t'
'Loss {loss_current:.4f} ({loss:.4f})\t'
'Prec@1 {top1:.3f}\t'
'Prec@5 {top5:.3f}'.format(
self.state['iteration'], len(data_loader), batch_time_current=self.state['batch_time_current'],
batch_time=batch_time, data_time_current=self.state['data_time_batch'],
data_time=data_time, loss_current=self.state['loss_batch'], loss=loss, top1=top1, top5=top5))
class MultiLabelMAPEngine(Engine):
def __init__(self, state):
Engine.__init__(self, state)
if self._state('difficult_examples') is None:
self.state['difficult_examples'] = False
self.state['ap_meter'] = AveragePrecisionMeter(self.state['difficult_examples'])
def on_start_epoch(self, training, model, criterion, data_loader, optimizer=None, display=True):
Engine.on_start_epoch(self, training, model, criterion, data_loader, optimizer)
self.state['ap_meter'].reset()
def on_end_epoch(self, training, model, criterion, data_loader, optimizer=None, display=True):
map = 100 * self.state['ap_meter'].value().mean()
loss = self.state['meter_loss'].value()[0]
if display:
if training:
# print(model.module.spatial_pooling)
print('Epoch: [{0}]\t'
'Loss {loss:.4f}\t'
'mAP {map:.3f}'.format(self.state['epoch'], loss=loss, map=map))
else:
print('Test: \t Loss {loss:.4f}\t mAP {map:.3f}'.format(loss=loss, map=map))
return map
def on_start_batch(self, training, model, criterion, data_loader, optimizer=None, display=True):
self.state['target_gt'] = self.state['target'].clone()
self.state['target'][self.state['target'] == 0] = 1
self.state['target'][self.state['target'] == -1] = 0
input = self.state['input']
self.state['input'] = input[0]
self.state['name'] = input[1]
def on_end_batch(self, training, model, criterion, data_loader, optimizer=None, display=True):
Engine.on_end_batch(self, training, model, criterion, data_loader, optimizer, display=False)
# measure mAP
self.state['ap_meter'].add(self.state['output'].data, self.state['target_gt'])
if display and self.state['print_freq'] != 0 and self.state['iteration'] % self.state['print_freq'] == 0:
loss = self.state['meter_loss'].value()[0]
batch_time = self.state['batch_time'].value()[0]
data_time = self.state['data_time'].value()[0]
if training:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time_current:.3f} ({batch_time:.3f})\t'
'Data {data_time_current:.3f} ({data_time:.3f})\t'
'Loss {loss_current:.4f} ({loss:.4f})'.format(
self.state['epoch'], self.state['iteration'], len(data_loader),
batch_time_current=self.state['batch_time_current'],
batch_time=batch_time, data_time_current=self.state['data_time_batch'],
data_time=data_time, loss_current=self.state['loss_batch'], loss=loss))
else:
print('Test: [{0}/{1}]\t'
'Time {batch_time_current:.3f} ({batch_time:.3f})\t'
'Data {data_time_current:.3f} ({data_time:.3f})\t'
'Loss {loss_current:.4f} ({loss:.4f})'.format(
self.state['iteration'], len(data_loader), batch_time_current=self.state['batch_time_current'],
batch_time=batch_time, data_time_current=self.state['data_time_batch'],
data_time=data_time, loss_current=self.state['loss_batch'], loss=loss))
|
{
"content_hash": "944e2f5b1bc9fa7f4c7d8cfd8cef4ca8",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 143,
"avg_line_length": 45.3235294117647,
"alnum_prop": 0.554488427428077,
"repo_name": "durandtibo/wildcat.pytorch",
"id": "f4ef91039801b0e939b445eccd572fe8112aa647",
"size": "23115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wildcat/engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57664"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class CmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmid", parent_name="isosurface", **kwargs):
super(CmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "5dc705a1de3bd9a369e618175b1e7158",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 38,
"alnum_prop": 0.597165991902834,
"repo_name": "plotly/python-api",
"id": "5902a12e81054471ea7ec97e8e5c7e58819db114",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/isosurface/_cmid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class DhcpOptions(Model):
"""DhcpOptions contains an array of DNS servers available to VMs deployed in
the virtual network. Standard DHCP option for a subnet overrides VNET DHCP
options.
:param dns_servers: The list of DNS servers IP addresses.
:type dns_servers: list[str]
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(self, *, dns_servers=None, **kwargs) -> None:
super(DhcpOptions, self).__init__(**kwargs)
self.dns_servers = dns_servers
|
{
"content_hash": "02edc476ddec622a6e24016e8cfa52f4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 80,
"avg_line_length": 31.157894736842106,
"alnum_prop": 0.6469594594594594,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "7dc4651973a51a755be031a3e1b800aff876cf6e",
"size": "1066",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/dhcp_options_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""Test the Vultr switch platform."""
import json
import unittest
from unittest.mock import patch
import requests_mock
import pytest
import voluptuous as vol
from homeassistant.components.switch import vultr
from homeassistant.components import vultr as base_vultr
from homeassistant.components.vultr import (
ATTR_ALLOWED_BANDWIDTH, ATTR_AUTO_BACKUPS, ATTR_IPV4_ADDRESS,
ATTR_COST_PER_MONTH, ATTR_CREATED_AT, ATTR_SUBSCRIPTION_ID,
CONF_SUBSCRIPTION)
from homeassistant.const import (
CONF_PLATFORM, CONF_NAME)
from tests.components.vultr.test_init import VALID_CONFIG
from tests.common import (
get_test_home_assistant, load_fixture)
class TestVultrSwitchSetup(unittest.TestCase):
"""Test the Vultr switch platform."""
DEVICES = []
def add_entities(self, devices, action):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Init values for this testcase class."""
self.hass = get_test_home_assistant()
self.configs = [
{
CONF_SUBSCRIPTION: '576965',
CONF_NAME: "A Server"
},
{
CONF_SUBSCRIPTION: '123456',
CONF_NAME: "Failed Server"
},
{
CONF_SUBSCRIPTION: '555555',
CONF_NAME: vultr.DEFAULT_NAME
}
]
def tearDown(self):
"""Stop our started services."""
self.hass.stop()
@requests_mock.Mocker()
def test_switch(self, mock):
"""Test successful instance."""
mock.get(
'https://api.vultr.com/v1/account/info?api_key=ABCDEFG1234567',
text=load_fixture('vultr_account_info.json'))
with patch(
'vultr.Vultr.server_list',
return_value=json.loads(
load_fixture('vultr_server_list.json'))):
# Setup hub
base_vultr.setup(self.hass, VALID_CONFIG)
# Setup each of our test configs
for config in self.configs:
vultr.setup_platform(self.hass,
config,
self.add_entities,
None)
assert len(self.DEVICES) == 3
tested = 0
for device in self.DEVICES:
if device.subscription == '555555':
assert 'Vultr {}' == device.name
tested += 1
device.update()
device_attrs = device.device_state_attributes
if device.subscription == '555555':
assert 'Vultr Another Server' == device.name
tested += 1
if device.name == 'A Server':
assert device.is_on is True
assert 'on' == device.state
assert 'mdi:server' == device.icon
assert '1000' == \
device_attrs[ATTR_ALLOWED_BANDWIDTH]
assert 'yes' == \
device_attrs[ATTR_AUTO_BACKUPS]
assert '123.123.123.123' == \
device_attrs[ATTR_IPV4_ADDRESS]
assert '10.05' == \
device_attrs[ATTR_COST_PER_MONTH]
assert '2013-12-19 14:45:41' == \
device_attrs[ATTR_CREATED_AT]
assert '576965' == \
device_attrs[ATTR_SUBSCRIPTION_ID]
tested += 1
elif device.name == 'Failed Server':
assert device.is_on is False
assert 'off' == device.state
assert 'mdi:server-off' == device.icon
assert '1000' == \
device_attrs[ATTR_ALLOWED_BANDWIDTH]
assert 'no' == \
device_attrs[ATTR_AUTO_BACKUPS]
assert '192.168.100.50' == \
device_attrs[ATTR_IPV4_ADDRESS]
assert '73.25' == \
device_attrs[ATTR_COST_PER_MONTH]
assert '2014-10-13 14:45:41' == \
device_attrs[ATTR_CREATED_AT]
assert '123456' == \
device_attrs[ATTR_SUBSCRIPTION_ID]
tested += 1
assert 4 == tested
@requests_mock.Mocker()
def test_turn_on(self, mock):
"""Test turning a subscription on."""
with patch(
'vultr.Vultr.server_list',
return_value=json.loads(load_fixture('vultr_server_list.json'))), \
patch('vultr.Vultr.server_start') as mock_start:
for device in self.DEVICES:
if device.name == 'Failed Server':
device.turn_on()
# Turn on
assert 1 == mock_start.call_count
@requests_mock.Mocker()
def test_turn_off(self, mock):
"""Test turning a subscription off."""
with patch(
'vultr.Vultr.server_list',
return_value=json.loads(load_fixture('vultr_server_list.json'))), \
patch('vultr.Vultr.server_halt') as mock_halt:
for device in self.DEVICES:
if device.name == 'A Server':
device.turn_off()
# Turn off
assert 1 == mock_halt.call_count
def test_invalid_switch_config(self):
"""Test config type failures."""
with pytest.raises(vol.Invalid): # No subscription
vultr.PLATFORM_SCHEMA({
CONF_PLATFORM: base_vultr.DOMAIN,
})
@requests_mock.Mocker()
def test_invalid_switches(self, mock):
"""Test the VultrSwitch fails."""
mock.get(
'https://api.vultr.com/v1/account/info?api_key=ABCDEFG1234567',
text=load_fixture('vultr_account_info.json'))
with patch(
'vultr.Vultr.server_list',
return_value=json.loads(
load_fixture('vultr_server_list.json'))):
# Setup hub
base_vultr.setup(self.hass, VALID_CONFIG)
bad_conf = {} # No subscription
no_subs_setup = vultr.setup_platform(self.hass,
bad_conf,
self.add_entities,
None)
assert no_subs_setup is not None
bad_conf = {
CONF_NAME: "Missing Server",
CONF_SUBSCRIPTION: '665544'
} # Sub not associated with API key (not in server_list)
wrong_subs_setup = vultr.setup_platform(self.hass,
bad_conf,
self.add_entities,
None)
assert wrong_subs_setup is not None
|
{
"content_hash": "eda5641ca73cf9d3483d5f09ee11f443",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 79,
"avg_line_length": 34.06532663316583,
"alnum_prop": 0.5118749078035109,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "f5e94e3e1b12369f9747d0049b0afbbf671857e6",
"size": "6779",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/switch/test_vultr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
"""Retriever script for Alwyn H. Gentry Forest Transect Dataset"""
from builtins import str
from builtins import range
import os
import sys
import zipfile
import xlrd
from retriever.lib.templates import Script
from retriever.lib.models import Table
from retriever.lib.excel import Excel
from pkg_resources import parse_version
try:
from retriever.lib.defaults import VERSION
except ImportError:
from retriever import VERSION
TAX_GROUPS = 9756 # 9819
class main(Script):
def __init__(self, **kwargs):
Script.__init__(self, **kwargs)
self.title = "Alwyn H. Gentry Forest Transect Dataset"
self.name = "gentry-forest-transects"
self.retriever_minimum_version = '2.0.dev'
self.version = '1.4.4'
self.urls = {"stems": "http://www.mobot.org/mobot/gentry/123/all_Excel.zip",
"sites": "https://ndownloader.figshare.com/files/5515373",
"species": "",
"counts": ""}
self.keywords = ["plants", "global-scale", "observational"]
self.ref = "http://www.mobot.org/mobot/research/gentry/welcome.shtml"
self.citation = "Phillips, O. and Miller, J.S., 2002. Global patterns of plant diversity: Alwyn H. Gentry's forest transect data set. Missouri Botanical Press."
self.addendum = """Researchers who make use of the data in publications are requested to acknowledge Alwyn H. Gentry, the Missouri Botanical Garden, and collectors who assisted Gentry or contributed data for specific sites. It is also requested that a reprint of any publication making use of the Gentry Forest Transect Data be sent to:
Bruce E. Ponman
Missouri Botanical Garden
P.O. Box 299
St. Louis, MO 63166-0299
U.S.A. """
if parse_version(VERSION) <= parse_version("2.0.0"):
self.shortname = self.name
self.name = self.title
self.tags = self.keywords
def download(self, engine=None, debug=False):
Script.download(self, engine, debug)
self.engine.auto_create_table(Table("sites"), url=self.urls["sites"], filename='gentry_sites.csv')
self.engine.insert_data_from_url(self.urls["sites"])
self.engine.download_file(self.urls["stems"], "all_Excel.zip")
local_zip = zipfile.ZipFile(self.engine.format_filename("all_Excel.zip"))
filelist = local_zip.namelist()
local_zip.close()
if parse_version(VERSION).__str__() < parse_version("2.1.dev").__str__():
filelist = [os.path.basename(filename) for filename in filelist]
self.engine.download_files_from_archive(self.urls["stems"], filelist)
# Currently all_Excel.zip is missing CURUYUQU.xls
# Download it separately and add it to the file list
if not self.engine.find_file('CURUYUQU.xls'):
self.engine.download_file("http://www.mobot.org/mobot/gentry/123/samerica/CURUYUQU.xls", "CURUYUQU.xls")
filelist.append('CURUYUQU.xls')
lines = []
tax = []
for filename in filelist:
book = xlrd.open_workbook(self.engine.format_filename(filename))
sh = book.sheet_by_index(0)
rows = sh.nrows
cn = {'stems': []}
n = 0
for colnum, c in enumerate(sh.row(0)):
if not Excel.empty_cell(c):
cid = c.value.lower().strip()
# line number column is sometimes named differently
if cid in ["sub", "number"]:
cid = "line"
# the "number of individuals" column is named in various
# different ways; they always at least contain "nd"
if "nd" in cid:
cid = "count"
# in QUIAPACA.xls the "number of individuals" column is
# misnamed "STEMDBH" just like the stems columns, so weep
# for the state of scientific data and then fix manually
if filename == "QUIAPACA.xls" and colnum == 13:
cid = "count"
# if column is a stem, add it to the list of stems;
# otherwise, make note of the column name/number
if "stem" in cid or "dbh" in cid:
cn["stems"].append(n)
else:
cn[cid] = n
n += 1
# sometimes, a data file does not contain a liana or count column
if not "liana" in list(cn.keys()):
cn["liana"] = -1
if not "count" in list(cn.keys()):
cn["count"] = -1
for i in range(1, rows):
row = sh.row(i)
cellcount = len(row)
# make sure the row is real, not just empty cells
if not all(Excel.empty_cell(cell) for cell in row):
try:
this_line = {}
# get the following information from the appropriate columns
for i in ["line", "family", "genus", "species",
"liana", "count"]:
if cn[i] > -1:
if row[cn[i]].ctype != 2:
# if the cell type(ctype) is not a number
this_line[i] = row[cn[i]].value.lower().strip().replace("\\", "/").replace('"', '')
else:
this_line[i] = row[cn[i]].value
if this_line[i] == '`':
this_line[i] = 1
this_line["stems"] = [row[c]
for c in cn["stems"]
if not Excel.empty_cell(row[c])]
site_code, _ = os.path.splitext(os.path.basename(filename))
this_line["site"] = site_code
# Manually correct CEDRAL data, which has a single line
# that is shifted by one to the left starting at Liana
if this_line["site"] == "CEDRAL" and type(this_line["liana"]) == float:
this_line["liana"] = ""
this_line["count"] = 3
this_line["stems"] = [2.5, 2.5, 30, 18, 25]
lines.append(this_line)
# Check how far the species is identified
full_id = 0
if len(this_line["species"]) < 3:
if len(this_line["genus"]) < 3:
id_level = "family"
else:
id_level = "genus"
else:
id_level = "species"
full_id = 1
tax.append((this_line["family"],
this_line["genus"],
this_line["species"],
id_level,
str(full_id)))
except:
raise
pass
tax = sorted(tax, key=lambda group: group[0] + " " + group[1] + " " + group[2])
unique_tax = []
tax_dict = {}
tax_count = 0
# Get all unique families/genera/species
for group in tax:
if not (group in unique_tax):
unique_tax.append(group)
tax_count += 1
tax_dict[group[0:3]] = tax_count
# Create species table
table = Table("species", delimiter=",")
table.columns=[("species_id" , ("pk-int",) ),
("family" , ("char", ) ),
("genus" , ("char", ) ),
("species" , ("char", ) ),
("id_level" , ("char", 10) ),
("full_id" , ("int",) )]
data = [[str(tax_dict[group[:3]])] + ['"%s"' % g for g in group]
for group in unique_tax]
table.pk = 'species_id'
table.contains_pk = True
self.engine.table = table
self.engine.create_table()
self.engine.add_to_table(data)
# Create stems table
table = Table("stems", delimiter=",")
table.columns=[("stem_id" , ("pk-auto",) ),
("line" , ("int",) ),
("species_id" , ("int",) ),
("site_code" , ("char", 12) ),
("liana" , ("char", 10) ),
("stem" , ("double",) )]
stems = []
counts = []
for line in lines:
try:
liana = line["liana"]
except KeyError:
liana = ""
species_info = [line["line"],
tax_dict[(line["family"],
line["genus"],
line["species"])],
line["site"],
liana
]
try:
counts.append([value for value in species_info + [line["count"]]])
except KeyError:
pass
for i in line["stems"]:
stem = species_info + [str(i)]
stems.append(stem)
self.engine.table = table
self.engine.create_table()
self.engine.add_to_table(stems)
# Create counts table
table = Table("counts", delimiter=",", contains_pk=False)
table.columns=[("count_id" , ("pk-auto",) ),
("line" , ("int",) ),
("species_id" , ("int",) ),
("site_code" , ("char", 12) ),
("liana" , ("char", 10) ),
("count" , ("double",) )]
self.engine.table = table
self.engine.create_table()
self.engine.add_to_table(counts)
return self.engine
SCRIPT = main()
|
{
"content_hash": "03e6dbc3160eea4478a56c6be99fb291",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 344,
"avg_line_length": 44.78389830508475,
"alnum_prop": 0.44952218752956763,
"repo_name": "henrykironde/deletedret",
"id": "7238b105ef45d49457285d9566cb7855b8854955",
"size": "10580",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts/gentry_forest_transects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "8849"
},
{
"name": "Python",
"bytes": "243380"
},
{
"name": "Shell",
"bytes": "511"
}
],
"symlink_target": ""
}
|
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
sessionWishlist = ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
month = ndb.IntegerProperty() # TODO: do we need for indexing like Java?
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) #DateTimeField()
month = messages.IntegerField(7, variant=messages.Variant.INT32)
maxAttendees = messages.IntegerField(8, variant=messages.Variant.INT32)
seatsAvailable = messages.IntegerField(9, variant=messages.Variant.INT32)
endDate = messages.StringField(10) #DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
############################################
############################################
##### New models
############################################
############################################
class ConfSession(ndb.Model):
"""Session -- Session object"""
name = ndb.StringProperty()
highlights = ndb.StringProperty()
speakerId = ndb.StringProperty()
duration = ndb.TimeProperty()
typeOfSession = ndb.StringProperty(default='NOT_SPECIFIED')
date = ndb.DateProperty()
start_time = ndb.TimeProperty()
class ConfSpeaker(ndb.Model):
"""Speaker -- speaker object"""
displayName = ndb.StringProperty()
confSessionKeysToAttend = ndb.StringProperty(repeated=True)
class ConfSessionType(messages.Enum):
"""SessionType -- Type of session"""
NOT_SPECIFIED = 1
LECTURE = 2
KEYNOTE = 3
WORKSHOP = 4
class ConfSessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
name = messages.StringField(1)
highlights = messages.StringField(2)
speakerId = messages.StringField(3)
duration = messages.StringField(4)
typeOfSession = messages.EnumField('ConfSessionType', 5)
date = messages.StringField(6)
start_time = messages.StringField(7)
speakerDisplayName = messages.StringField(8)
websafeKey = messages.StringField(9)
class ConfSessionForms(messages.Message):
"""ConfSessionForms -- multiple Session outbound form message"""
items = messages.MessageField(ConfSessionForm, 1, repeated=True)
class ConfSpeakerForm(messages.Message):
"""SpeakerForm -- Speaker outbound form message"""
displayName = messages.StringField(1)
confSessionKeysToAttend = messages.StringField(2, repeated=True)
class ProfileListForm(messages.Message):
"""ProfileListForm -- Profile list outbound form message"""
items = messages.MessageField(ProfileMiniForm, 1, repeated=True)
|
{
"content_hash": "0f7290670034069634b3779a8bb18805",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 86,
"avg_line_length": 36.72560975609756,
"alnum_prop": 0.6609662958658475,
"repo_name": "davcs86/fullstack-nanodegree-conference-cloud-app",
"id": "aaa10c38621ff1e4011304e6feafd7c47c28b472",
"size": "6046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "23913"
},
{
"name": "JavaScript",
"bytes": "32836"
},
{
"name": "Python",
"bytes": "51010"
}
],
"symlink_target": ""
}
|
from django.core.mail import EmailMessage
from django.template import Template, Context
from django.utils.safestring import mark_safe
from django.conf import settings
LOAD_EMAIL_FILTERS = '{% load emailfilters %}'
def template_with_email_filters(template_string):
"""
Create a template that will load our email filters
:param template_string: str
:return: Template
"""
return Template('{}{}'.format(LOAD_EMAIL_FILTERS, template_string))
def generate_message(reply_to_email, rcpt_email, cc_email, template_subject, template_body, context):
# Mark the fields in context as safe, since we're not outputting HTML
for k in context:
context[k] = mark_safe(context[k])
subject = template_with_email_filters(template_subject).render(Context(context))
body = template_with_email_filters(template_body).render(Context(context))
from_email = settings.EMAIL_FROM_ADDRESS
cc_email_list = [cc_email] if cc_email else []
return EmailMessage(subject, body, from_email, [rcpt_email], cc=cc_email_list, reply_to=[reply_to_email])
|
{
"content_hash": "08fbb3fe3d0031a24112367cc5fa49cd",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 109,
"avg_line_length": 41.42307692307692,
"alnum_prop": 0.7270194986072424,
"repo_name": "Duke-GCB/DukeDSHandoverService",
"id": "e76ed43aa07bdb1f2ab61eb0fed4d18e18f9de80",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "switchboard/mailer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "330"
},
{
"name": "HTML",
"bytes": "5167"
},
{
"name": "Python",
"bytes": "113326"
},
{
"name": "Shell",
"bytes": "473"
}
],
"symlink_target": ""
}
|
import os.path
from setuptools import find_packages
from setuptools import setup
from okapi import __version__
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
setup(
name='okapi',
version=__version__,
author='RedBeacon (Gobind Ball)',
author_email='support@redbeacon.com',
description='Requests wrapper to instrument HTTP calls',
long_description=README + '\n\n' + CHANGES,
url='https://github.com/redbeacon/okapi',
packages=find_packages(),
data_files=[('config', ['setup.cfg'])],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Programming Language :: Python',
],
zip_safe=False,
install_requires=[
'pymongo',
'requests',
],
entry_points={
'console_scripts': [
'okapi=okapi:mongo',
],
},
)
|
{
"content_hash": "f1db5873ad952c00e1f8715a154eb2d4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 60,
"avg_line_length": 26.842105263157894,
"alnum_prop": 0.6147058823529412,
"repo_name": "scorphus/okapi",
"id": "5a4890b73ea8d1e379fa3b130fe39681275c25f0",
"size": "1252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6121"
}
],
"symlink_target": ""
}
|
import json
from datetime import timedelta
from django.core.management import CommandError
from django.utils import timezone
import reversion
from test_app.models import TestModel
from test_app.tests.base import TestBase, TestModelMixin
class CreateInitialRevisionsTest(TestModelMixin, TestBase):
def testCreateInitialRevisions(self):
obj = TestModel.objects.create()
self.callCommand("createinitialrevisions")
self.assertSingleRevision((obj,), comment="Initial version.")
def testCreateInitialRevisionsAlreadyCreated(self):
obj = TestModel.objects.create()
self.callCommand("createinitialrevisions")
self.callCommand("createinitialrevisions")
self.assertSingleRevision((obj,), comment="Initial version.")
class CreateInitialRevisionsAppLabelTest(TestModelMixin, TestBase):
def testCreateInitialRevisionsAppLabel(self):
obj = TestModel.objects.create()
self.callCommand("createinitialrevisions", "test_app")
self.assertSingleRevision((obj,), comment="Initial version.")
def testCreateInitialRevisionsAppLabelMissing(self):
with self.assertRaises(CommandError):
self.callCommand("createinitialrevisions", "boom")
def testCreateInitialRevisionsModel(self):
obj = TestModel.objects.create()
self.callCommand("createinitialrevisions", "test_app.TestModel")
self.assertSingleRevision((obj,), comment="Initial version.")
def testCreateInitialRevisionsModelMissing(self):
with self.assertRaises(CommandError):
self.callCommand("createinitialrevisions", "test_app.boom")
def testCreateInitialRevisionsModelMissingApp(self):
with self.assertRaises(CommandError):
self.callCommand("createinitialrevisions", "boom.boom")
def testCreateInitialRevisionsModelNotRegistered(self):
TestModel.objects.create()
self.callCommand("createinitialrevisions", "auth.User")
self.assertNoRevision()
class CreateInitialRevisionsDbTest(TestModelMixin, TestBase):
databases = {"default", "mysql", "postgres"}
def testCreateInitialRevisionsDb(self):
obj = TestModel.objects.create()
self.callCommand("createinitialrevisions", using="postgres")
self.assertNoRevision()
self.assertSingleRevision((obj,), comment="Initial version.", using="postgres")
def testCreateInitialRevisionsDbMySql(self):
obj = TestModel.objects.create()
self.callCommand("createinitialrevisions", using="mysql")
self.assertNoRevision()
self.assertSingleRevision((obj,), comment="Initial version.", using="mysql")
class CreateInitialRevisionsModelDbTest(TestModelMixin, TestBase):
databases = {"default", "postgres"}
def testCreateInitialRevisionsModelDb(self):
obj = TestModel.objects.db_manager("postgres").create()
self.callCommand("createinitialrevisions", model_db="postgres")
self.assertSingleRevision((obj,), comment="Initial version.", model_db="postgres")
class CreateInitialRevisionsCommentTest(TestModelMixin, TestBase):
def testCreateInitialRevisionsComment(self):
obj = TestModel.objects.create()
self.callCommand("createinitialrevisions", comment="comment v1")
self.assertSingleRevision((obj,), comment="comment v1")
class CreateInitialRevisionsMetaTest(TestModelMixin, TestBase):
def testCreateInitialRevisionsComment(self):
obj = TestModel.objects.create()
meta_name = "meta name"
meta = json.dumps({"test_app.TestMeta": {"name": meta_name}})
self.callCommand("createinitialrevisions", "--meta", meta)
self.assertSingleRevision((obj,), meta_names=(meta_name, ), comment="Initial version.")
class DeleteRevisionsTest(TestModelMixin, TestBase):
def testDeleteRevisions(self):
with reversion.create_revision():
TestModel.objects.create()
self.callCommand("deleterevisions")
self.assertNoRevision()
class DeleteRevisionsAppLabelTest(TestModelMixin, TestBase):
def testDeleteRevisionsAppLabel(self):
with reversion.create_revision():
TestModel.objects.create()
self.callCommand("deleterevisions", "test_app")
self.assertNoRevision()
def testDeleteRevisionsAppLabelMissing(self):
with self.assertRaises(CommandError):
self.callCommand("deleterevisions", "boom")
def testDeleteRevisionsModel(self):
with reversion.create_revision():
TestModel.objects.create()
self.callCommand("deleterevisions", "test_app.TestModel")
self.assertNoRevision()
def testDeleteRevisionsModelMissing(self):
with self.assertRaises(CommandError):
self.callCommand("deleterevisions", "test_app.boom")
def testDeleteRevisionsModelMissingApp(self):
with self.assertRaises(CommandError):
self.callCommand("deleterevisions", "boom.boom")
def testDeleteRevisionsModelNotRegistered(self):
with reversion.create_revision():
obj = TestModel.objects.create()
self.callCommand("deleterevisions", "auth.User")
self.assertSingleRevision((obj,))
class DeleteRevisionsDbTest(TestModelMixin, TestBase):
databases = {"default", "mysql", "postgres"}
def testDeleteRevisionsDb(self):
with reversion.create_revision(using="postgres"):
TestModel.objects.create()
self.callCommand("deleterevisions", using="postgres")
self.assertNoRevision(using="postgres")
def testDeleteRevisionsDbMySql(self):
with reversion.create_revision(using="mysql"):
TestModel.objects.create()
self.callCommand("deleterevisions", using="mysql")
self.assertNoRevision(using="mysql")
def testDeleteRevisionsDbNoMatch(self):
with reversion.create_revision():
obj = TestModel.objects.create()
self.callCommand("deleterevisions", using="postgres")
self.assertSingleRevision((obj,))
class DeleteRevisionsModelDbTest(TestModelMixin, TestBase):
databases = {"default", "postgres"}
def testDeleteRevisionsModelDb(self):
with reversion.create_revision():
TestModel.objects.db_manager("postgres").create()
self.callCommand("deleterevisions", model_db="postgres")
self.assertNoRevision(using="postgres")
class DeleteRevisionsDaysTest(TestModelMixin, TestBase):
def testDeleteRevisionsDays(self):
date_created = timezone.now() - timedelta(days=20)
with reversion.create_revision():
TestModel.objects.create()
reversion.set_date_created(date_created)
self.callCommand("deleterevisions", days=19)
self.assertNoRevision()
def testDeleteRevisionsDaysNoMatch(self):
date_created = timezone.now() - timedelta(days=20)
with reversion.create_revision():
obj = TestModel.objects.create()
reversion.set_date_created(date_created)
self.callCommand("deleterevisions", days=21)
self.assertSingleRevision((obj,), date_created=date_created)
class DeleteRevisionsKeepTest(TestModelMixin, TestBase):
def testDeleteRevisionsKeep(self):
with reversion.create_revision():
obj_1 = TestModel.objects.create()
reversion.set_comment("obj_1 v1")
with reversion.create_revision():
obj_1.save()
reversion.set_comment("obj_1 v2")
with reversion.create_revision():
obj_2 = TestModel.objects.create()
reversion.set_comment("obj_2 v1")
with reversion.create_revision():
obj_2.save()
reversion.set_comment("obj_2 v2")
with reversion.create_revision():
obj_3 = TestModel.objects.create()
self.callCommand("deleterevisions", keep=1)
self.assertSingleRevision((obj_1,), comment="obj_1 v2")
self.assertSingleRevision((obj_2,), comment="obj_2 v2")
self.assertSingleRevision((obj_3,))
|
{
"content_hash": "c10f7b06cb556bec70cdaf4075738338",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 95,
"avg_line_length": 38.60287081339713,
"alnum_prop": 0.6955875061973228,
"repo_name": "etianen/django-reversion",
"id": "69edcc4bb53f5a3a7e22c904aaba961c88b8c3de",
"size": "8068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_app/tests/test_commands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6265"
},
{
"name": "Python",
"bytes": "130019"
}
],
"symlink_target": ""
}
|
"""
State S32 : Exportation
"""
import pickle
from ...util.funcutils import singleton
from .StateSCC import StateSCC
@singleton
class StateS32A(StateSCC):
"""State S32 : Exportation"""
def __init__(self):
"""Object initialization"""
self.buffer = None # Intern buffer
def do(self, handler, data):
"""Action of the state S32A: treat response of exportation request"""
with handler.lock:
try:
# Test if request is rejected
is_KO = data[:5] == b"ERROR"
if is_KO:
message = (data[6:]).decode()
raise Exception(message)
# Test if request is accepted
is_OK = data[:2] == b"OK"
if is_OK:
try:
tab_data = data[3:].split(b';', maxsplit=1)
nbblock = int(tab_data[0].decode())
# Are there SIB to treat ?
if nbblock > 0:
handler.nbSIB = nbblock # Number of SIB to treat
handler.nbSIBDone = 0 # Number of SIB treated
# Check if the first SIB is already received
try:
if len(tab_data[1]) > 0:
data = b';' + tab_data[1]
except IndexError:
pass
else:
handler.core.taskInProgress = False
handler.loop.run_in_executor(
None, handler.notify, "application.state",
"No information found")
except:
raise Exception("S32A protocol error")
# Test if a sib has been truncated
if self.buffer is not None:
if self.buffer[5:] == b";SIB;":
data = self.buffer + data
else:
data = data + self.buffer
self.buffer = None
# Test if a sib has been received
is_aSIB = data[:5] == b";SIB;"
if is_aSIB:
try:
tab_data = data[5:].split(b';', maxsplit=2)
index_sib = int(tab_data[0].decode())
len_sib = int(tab_data[1].decode())
psib = tab_data[2]
# Check if two SIB are received in the same packet
if len_sib < len(psib):
psib = tab_data[2][:len_sib]
handler.loop.run_in_executor(
None, handler.data_received,
tab_data[2][len_sib:])
# Treat one sib
if len_sib == len(psib):
sib = pickle.loads(psib)
sib.control_integrity(handler.keyH)
handler.core.assign_result_search_block(index_sib, sib)
handler.nbSIBDone += 1
# Notify the UI layer
if handler.core.notify:
handler.loop.run_in_executor(
None, handler.notify,
"application.state.loadbar",
(handler.nbSIBDone, handler.nbSIB))
# Indicate the task is done
if handler.nbSIBDone == handler.nbSIB:
handler.core.taskInProgress = False
self.buffer = None
# Not enough data received, wait for new data
else:
self.buffer = data # Push data for a next treatment
except:
message = "S32A protocol error " + \
str(handler.nbSIBDone) + \
"/" + str(handler.nbSIB) + " blocks"
raise Exception(message)
# Save data not treated
if is_OK is False and is_aSIB is False:
self.buffer = data
except Exception as exc:
# Schedule a call to the exception handler
handler.loop.call_soon_threadsafe(handler.exception_handler, exc)
|
{
"content_hash": "5b9ccab4c68001c6c909153e9726cd0e",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 83,
"avg_line_length": 39.58119658119658,
"alnum_prop": 0.4063917080544159,
"repo_name": "thethythy/Mnemopwd",
"id": "7b596833d5db8e6dba745dab3167434964e6ef78",
"size": "6033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mnemopwd/client/corelayer/protocol/StateS32A.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "580678"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from symposion.markdown_parser import parse
from symposion.proposals.models import ProposalBase
from symposion.conference.models import Section
from symposion.speakers.models import SpeakerBase as Speaker
@python_2_unicode_compatible
class Schedule(models.Model):
section = models.OneToOneField(Section, verbose_name=_("Section"))
published = models.BooleanField(default=True, verbose_name=_("Published"))
hidden = models.BooleanField(_("Hide schedule from overall conference view"), default=False)
def __str__(self):
return "%s Schedule" % self.section
class Meta:
ordering = ["section"]
verbose_name = _('Schedule')
verbose_name_plural = _('Schedules')
@python_2_unicode_compatible
class Day(models.Model):
schedule = models.ForeignKey(Schedule, verbose_name=_("Schedule"))
date = models.DateField(verbose_name=_("Date"))
def __str__(self):
return "%s (%s)" % (self.date, self.schedule)
class Meta:
unique_together = [("schedule", "date")]
ordering = ["date"]
verbose_name = _("date")
verbose_name_plural = _("dates")
@python_2_unicode_compatible
class Room(models.Model):
schedule = models.ForeignKey(Schedule, verbose_name=_("Schedule"))
name = models.CharField(max_length=65, verbose_name=_("Name"))
order = models.PositiveIntegerField(verbose_name=_("Order"))
def __str__(self):
return self.name
class Meta:
verbose_name = _("Room")
verbose_name_plural = _("Rooms")
@python_2_unicode_compatible
class SlotKind(models.Model):
"""
A slot kind represents what kind a slot is. For example, a slot can be a
break, lunch, or X-minute talk.
"""
schedule = models.ForeignKey(Schedule, verbose_name=_("schedule"))
label = models.CharField(max_length=50, verbose_name=_("Label"))
def __str__(self):
return self.label
class Meta:
verbose_name = _("Slot kind")
verbose_name_plural = _("Slot kinds")
@python_2_unicode_compatible
class Slot(models.Model):
name = models.CharField(max_length=200, editable=False)
day = models.ForeignKey(Day, verbose_name=_("Day"))
kind = models.ForeignKey(SlotKind, verbose_name=_("Kind"))
start = models.TimeField(verbose_name=_("Start"))
end = models.TimeField(verbose_name=_("End"))
content_override = models.TextField(blank=True, verbose_name=_("Content override"))
content_override_html = models.TextField(blank=True)
title_override = models.CharField(max_length=100, blank=True)
override_rowspan = models.IntegerField(blank=True, null=True)
def assign(self, content):
"""
Assign the given content to this slot and if a previous slot content
was given we need to unlink it to avoid integrity errors.
"""
self.unassign()
content.slot = self
content.save()
def unassign(self):
"""
Unassign the associated content with this slot.
"""
content = self.content
if content and content.slot_id:
content.slot = None
content.save()
@property
def content(self):
"""
Return the content this slot represents.
@@@ hard-coded for presentation for now
"""
try:
return self.content_ptr
except ObjectDoesNotExist:
return None
@property
def start_datetime(self):
return datetime.datetime(
self.day.date.year,
self.day.date.month,
self.day.date.day,
self.start.hour,
self.start.minute)
@property
def end_datetime(self):
return datetime.datetime(
self.day.date.year,
self.day.date.month,
self.day.date.day,
self.end.hour,
self.end.minute)
@property
def length_in_minutes(self):
return int(
(self.end_datetime - self.start_datetime).total_seconds() / 60)
@property
def rooms(self):
return Room.objects.filter(pk__in=self.slotroom_set.values("room"))
@property
def room_names(self):
return ", ".join([str(r) for r in self.rooms])
def save(self, *args, **kwargs):
roomlist = ' '.join(map(lambda r: r.__unicode__(), self.rooms))
self.name = "%s %s (%s - %s) %s" % (self.day, self.kind, self.start, self.end, roomlist)
self.content_override_html = parse(self.content_override)
super(Slot, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
ordering = ["day", "start", "end"]
verbose_name = _("slot")
verbose_name_plural = _("slots")
@python_2_unicode_compatible
class SlotRoom(models.Model):
"""
Links a slot with a room.
"""
slot = models.ForeignKey(Slot, verbose_name=_("Slot"))
room = models.ForeignKey(Room, verbose_name=_("Room"))
def __str__(self):
return "%s %s" % (self.room, self.slot)
class Meta:
unique_together = [("slot", "room")]
ordering = ["slot", "room__order"]
verbose_name = _("Slot room")
verbose_name_plural = _("Slot rooms")
@python_2_unicode_compatible
class Presentation(models.Model):
slot = models.OneToOneField(Slot, null=True, blank=True, related_name="content_ptr", verbose_name=_("Slot"))
title = models.CharField(max_length=100, verbose_name=_("Title"))
description = models.TextField(verbose_name=_("Description"))
description_html = models.TextField(blank=True)
abstract = models.TextField(verbose_name=_("Abstract"))
abstract_html = models.TextField(blank=True)
speaker = models.ForeignKey(Speaker, related_name="presentations", verbose_name=_("Speaker"))
# Removing broken relation and using speakers from proposal in speakers property below
#additional_speakers = models.ManyToManyField(Speaker, related_name="copresentations",
# blank=True, verbose_name=_("Additional speakers"))
cancelled = models.BooleanField(default=False, verbose_name=_("Cancelled"))
proposal_base = models.OneToOneField(ProposalBase, related_name="presentation", verbose_name=_("Proposal base"))
section = models.ForeignKey(Section, related_name="presentations", verbose_name=_("Section"))
feedback_url = models.URLField(blank=True, null=True)
youtube_url = models.URLField(blank=True, null=True)
def save(self, *args, **kwargs):
self.description_html = parse(self.description)
self.abstract_html = parse(self.abstract)
return super(Presentation, self).save(*args, **kwargs)
@property
def number(self):
return self.proposal.number
@property
def proposal(self):
if self.proposal_base_id is None:
return None
return ProposalBase.objects.get_subclass(pk=self.proposal_base_id)
def speakers(self):
yield self.speaker
for speaker in self.proposal.additional_speakers.all():
if speaker.user:
yield speaker
def __str__(self):
return "#%s %s (%s)" % (self.number, self.title, self.speaker)
class Meta:
ordering = ["slot"]
verbose_name = _("presentation")
verbose_name_plural = _("presentations")
@python_2_unicode_compatible
class Session(models.Model):
day = models.ForeignKey(Day, related_name="sessions", verbose_name=_("Day"))
slots = models.ManyToManyField(Slot, related_name="sessions", verbose_name=_("Slots"))
def sorted_slots(self):
return self.slots.order_by("start")
def start(self):
slots = self.sorted_slots()
if slots:
return list(slots)[0].start
else:
return None
def end(self):
slots = self.sorted_slots()
if slots:
return list(slots)[-1].end
else:
return None
def __str__(self):
start = self.start()
end = self.end()
if start and end:
return "%s: %s - %s" % (
self.day.date.strftime("%a"),
start.strftime("%X"),
end.strftime("%X")
)
return ""
class Meta:
verbose_name = _("Session")
verbose_name_plural = _("Sessions")
@python_2_unicode_compatible
class SessionRole(models.Model):
SESSION_ROLE_CHAIR = 1
SESSION_ROLE_RUNNER = 2
SESSION_ROLE_TYPES = [
(SESSION_ROLE_CHAIR, _("Session Chair")),
(SESSION_ROLE_RUNNER, _("Session Runner")),
]
session = models.ForeignKey(Session, verbose_name=_("Session"))
user = models.ForeignKey(User, verbose_name=_("User"))
role = models.IntegerField(choices=SESSION_ROLE_TYPES, verbose_name=_("Role"))
status = models.NullBooleanField(verbose_name=_("Status"))
submitted = models.DateTimeField(default=datetime.datetime.now)
class Meta:
unique_together = [("session", "user", "role")]
verbose_name = _("Session role")
verbose_name_plural = _("Session roles")
def __str__(self):
return "%s %s: %s" % (self.user, self.session,
self.SESSION_ROLE_TYPES[self.role - 1][1])
|
{
"content_hash": "ec65950b80d2ddb22ec6ff59722c70f9",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 116,
"avg_line_length": 31.695364238410598,
"alnum_prop": 0.6212912661930631,
"repo_name": "pyohio/symposion",
"id": "1e6855dfd82f57f5d032fdbe8bf2fb9726e8d2df",
"size": "9572",
"binary": false,
"copies": "1",
"ref": "refs/heads/pyohio-2019",
"path": "symposion/schedule/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "79954"
},
{
"name": "Python",
"bytes": "287626"
}
],
"symlink_target": ""
}
|
import time
import socket
UDP_IP="127.0.0.1"
UDP_PORT=5005
MESSAGE="Hello, World 5005!"
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
print "message:", MESSAGE
sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
sock.sendto( MESSAGE, (UDP_IP, UDP_PORT) )
|
{
"content_hash": "1f5d3710d7760000d8b57c57c49ab384",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 57,
"avg_line_length": 23.23076923076923,
"alnum_prop": 0.6688741721854304,
"repo_name": "afronski/LogsLiteViewerCLI",
"id": "fb91f418d974fe379648eeb3960676f4149c3266",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/server2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "475"
},
{
"name": "C++",
"bytes": "60509"
},
{
"name": "Python",
"bytes": "605"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
}
|
from helper import CompatTestCase
from validator.compat import FX48_DEFINITION
class TestFX48Compat(CompatTestCase):
"""Test that compatibility tests for Gecko 48 are properly executed."""
VERSION = FX48_DEFINITION
def test_nsIIOService_newChannel_deprecated(self):
"""https://github.com/mozilla/addons-server/issues/2679"""
expected = (
'The "newChannel" functions have been deprecated in favor of '
'their new versions (ending with 2).')
script = '''
var iOService = Components.classes["@mozilla.org/network/io-service;1"]
.getService(Components.interfaces.nsIIOService);
let channel = iOService.newChannel();
'''
self.run_script_for_compat(script)
assert not self.compat_err.notices
assert not self.compat_err.errors
warning = [msg for msg in self.compat_err.warnings if msg['message'] == expected]
assert warning
assert warning[0]['compatibility_type'] == 'warning'
def test_nsIIOService_newChannelFromURI_deprecated(self):
"""https://github.com/mozilla/addons-server/issues/2679"""
expected = (
'The "newChannel" functions have been deprecated in favor of '
'their new versions (ending with 2).')
script = '''
var io = Components.classes["@mozilla.org/network/io-service;1"]
.getService(Components.interfaces["nsIIOService"]);
reqObj = io.newChannelFromURI(uri);
'''
self.run_script_for_compat(script)
assert not self.compat_err.notices
assert not self.compat_err.errors
warning = [msg for msg in self.compat_err.warnings if msg['message'] == expected]
assert warning
assert warning[0]['compatibility_type'] == 'warning'
def test_nsIIOService_newChannelFromURIWithProxyFlags(self):
"""https://github.com/mozilla/addons-server/issues/2679"""
expected = (
'The "newChannel" functions have been deprecated in favor of '
'their new versions (ending with 2).')
script = '''
var iOService = Components.classes["@mozilla.org/network/io-service;1"]
.getService(Components.interfaces.nsIIOService);
var channel = iOService.newChannelFromURIWithProxyFlags(aboutURI);
'''
self.run_script_for_compat(script)
assert not self.compat_err.notices
assert not self.compat_err.errors
warning = [msg for msg in self.compat_err.warnings if msg['message'] == expected]
assert warning
assert warning[0]['compatibility_type'] == 'warning'
def test_Proxy_createFunction_removed(self):
"""https://github.com/mozilla/addons-server/issues/2678"""
expected = 'Proxy.create and Proxy.createFunction are no longer supported.'
script = '''
function f() { "use strict"; return this; }
var p = Proxy.createFunction(f, f);
'''
self.run_script_for_compat(script)
assert not self.compat_err.notices
assert not self.compat_err.errors
warning = [msg for msg in self.compat_err.warnings if msg['message'] == expected]
assert warning
assert warning[0]['compatibility_type'] == 'error'
def test_Proxy_create_removed(self):
"""https://github.com/mozilla/addons-server/issues/2678"""
expected = 'Proxy.create and Proxy.createFunction are no longer supported.'
script = '''
var handler = {};
var proxiedFunctionPrototype = Proxy.create(handler, Function.prototype, undefined);
'''
self.run_script_for_compat(script)
assert not self.compat_err.notices
assert not self.compat_err.errors
warning = [msg for msg in self.compat_err.warnings if msg['message'] == expected]
assert warning
assert warning[0]['compatibility_type'] == 'error'
def test_throbber_icon_moved(self):
"""https://github.com/mozilla/addons-server/issues/2678"""
expected = 'The throbber icon your add-on points to has been moved'
paths = (
'chrome://browser/skin/tabbrowser/loading.png',
'chrome://global/skin/icons/loading_16.png'
)
script = '''
#foo {
list-style-image: url("%s");
}
'''
for path in paths:
self.run_regex_for_compat(script % path, is_js=False)
assert not self.compat_err.notices
assert not self.compat_err.errors
warning = [
msg for msg in self.compat_err.warnings
if msg['message'].startswith(expected)]
assert warning
assert warning[0]['compatibility_type'] == 'warning'
|
{
"content_hash": "7fa8a30f4490c4b9329d6242480044bf",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 96,
"avg_line_length": 35.669117647058826,
"alnum_prop": 0.6128633271490415,
"repo_name": "diox/amo-validator",
"id": "2389f985bb668f6e1f5f6180aed5348327f2159d",
"size": "4851",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/compat/test_gecko48.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "502"
},
{
"name": "HTML",
"bytes": "2802"
},
{
"name": "JavaScript",
"bytes": "602"
},
{
"name": "Python",
"bytes": "789304"
},
{
"name": "Shell",
"bytes": "1039"
}
],
"symlink_target": ""
}
|
__file__ = 'QC_v1'
__date__ = '6/16/2015'
__author__ = 'ABREZNIC'
import arcpy, datetime, os
output = "C:\\TxDOT\\Scripts\\javascript\\Guardrail\\Snake\\QC"
GET = "C:\\TxDOT\\Scripts\\javascript\\Guardrail\\Snake\\QC\\Analysis\\GET_20150629_QC.gdb\\GuardrailEndTreatments"
roadways = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways"
offices = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.Facility\\TPP_GIS.APP_TPP_GIS_ADMIN.Office"
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
arcpy.CreateFileGDB_management(output, "QC_GET" + curYear + curMonth + curDay)
werkspace = output + os.sep + "QC_GET" + curYear + curMonth + curDay + ".gdb"
print "Workspace created."
###############bad speed and accuracy##########
query = "(Speed = '0' OR Speed IS NULL) AND AccuracyFeet > 250"
badGET = werkspace + os.sep + "GET_BadSpdAcc"
arcpy.Select_analysis(GET, badGET, query)
print "Bad accuracy and speed records found."
#########cursor checks############
##### same lat, long check
list = []
latlongFreq = werkspace + os.sep + "LatLongFreq"
arcpy.Frequency_analysis(GET, latlongFreq, ["Latitude", "Longitude"])
cursor = arcpy.da.UpdateCursor(latlongFreq, ["FREQUENCY", "Latitude", "Longitude"])
for row in cursor:
if row[0] == 1:
cursor.deleteRow()
else:
identifier = str(row[1]) + str(row[2])
list.append(identifier)
del cursor
print "Lat/Long Duplicates identified."
dupes = werkspace + os.sep + "DupeLatLong"
arcpy.Copy_management(GET, dupes)
cursor = arcpy.da.UpdateCursor(dupes, ["Latitude", "Longitude"])
for row in cursor:
identifier = str(row[0]) + str(row[1])
if identifier not in list:
cursor.deleteRow()
del cursor
print "DupeLatLong feature class complete."
############txdot offices buffer############
officeBuff = werkspace + os.sep + "TxdotOfficeBuffer"
arcpy.Buffer_analysis(offices, officeBuff, ".1 Miles")
print "Office buffer created."
officeGET = werkspace + os.sep + "GET_officeTests"
arcpy.Clip_analysis(GET, officeBuff, officeGET)
print "Office GETs found."
############onsystem buffer############
query2 = "RTE_CLASS = '1'"
onsystem = werkspace + os.sep + "OnSystemRoadways"
arcpy.Select_analysis(roadways, onsystem, query2)
print "OnSystem roads isolated."
onsysBuff = werkspace + os.sep + "OnSystemBuffer"
arcpy.Buffer_analysis(onsystem, onsysBuff, "125 Feet")
print "Roads buffer created."
onsysGET = werkspace + os.sep + "GET_offRoadways"
arcpy.Erase_analysis(badGET, onsysBuff, onsysGET)
print "Road GETs found."
print "that's all folks!!"
|
{
"content_hash": "0729f00f636202dd240c3662c8128db3",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 139,
"avg_line_length": 35.09090909090909,
"alnum_prop": 0.6935603256846781,
"repo_name": "adambreznicky/javascript",
"id": "d8970cda938efb442b5da9eff4c0851a9e562fb7",
"size": "2702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Guardrail/Snake/QC_v1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5612"
},
{
"name": "HTML",
"bytes": "2588233"
},
{
"name": "JavaScript",
"bytes": "81649"
},
{
"name": "PHP",
"bytes": "15796"
},
{
"name": "Python",
"bytes": "126852"
}
],
"symlink_target": ""
}
|
import os
from dateutil.parser import parse
import requests
from dal.oracle.ae2.ae2_transaction import retrieve_ena_accession
from models.sra_xml import submission_api
import settings
__author__ = 'Ahmed G. Ali'
def update_release_date(accession, release_date):
release_datetime = parse(release_date)
ena_acc = retrieve_ena_accession(accession)
if ena_acc:
ena_acc = ena_acc[0].text
print ena_acc
save_dir = os.path.join(settings.TEMP_FOLDER, accession)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, '%s_%s_change_date_submission.xml' % (accession, release_date))
action_lst = [submission_api.ACTIONType(HOLD=submission_api.HOLDType(HoldUntilDate=release_date, target=ena_acc))]
submission = submission_api.SubmissionType(broker_name='ArrayExpress',
ACTIONS=submission_api.ACTIONSType(ACTION=action_lst),
)
print save_file
submission.export(open(save_file, 'w'), 0, name_='SUBMISSION')
files = {'SUBMISSION': open(save_file, 'rb')}
r = requests.post(settings.ENA_SRA_URL, files=files, verify=False)
content = r.content
f = open(os.path.join(save_dir, '%s_%s_date_change_receipt.xml' % (accession, release_date)), 'w')
f.write(content)
f.close()
print content
if __name__ == '__main__':
accession = 'E-MTAB-3594'
release_date = '2015-12-21'
update_release_date(accession=accession, release_date=release_date)
|
{
"content_hash": "6d826ba5924a79293a78ac2f6117eea4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 118,
"avg_line_length": 38.8,
"alnum_prop": 0.6559278350515464,
"repo_name": "arrayexpress/ae_auto",
"id": "01644c72ae52798b5db3f81890498f74e7b4f427",
"size": "1552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automation/release_date/studies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "CSS",
"bytes": "596162"
},
{
"name": "HTML",
"bytes": "62396"
},
{
"name": "JavaScript",
"bytes": "605485"
},
{
"name": "Makefile",
"bytes": "1574"
},
{
"name": "PowerShell",
"bytes": "939"
},
{
"name": "Python",
"bytes": "2169498"
},
{
"name": "Ruby",
"bytes": "1030"
}
],
"symlink_target": ""
}
|
from slick import app
app.run(host="0.0.0.0", debug = True)
|
{
"content_hash": "1c7607a76c60a52f19eca34a082ee9a9",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 37,
"avg_line_length": 30,
"alnum_prop": 0.6833333333333333,
"repo_name": "iblis17/slick",
"id": "bacde9508681b45f4d0c523f6a9603c32ca9cd95",
"size": "77",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7930"
},
{
"name": "HTML",
"bytes": "78502"
},
{
"name": "JavaScript",
"bytes": "28929"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "1059"
},
{
"name": "Python",
"bytes": "116400"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import multiprocessing
import utool
from vtool.tests import grabdata
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_DELETE_ANNOTATION_CHIPS]')
def TEST_DELETE_ANNOTATION_CHIPS(ibs, back):
gpath_list = grabdata.get_test_gpaths(ndata=None)[0:4]
gid_list = ibs.add_images(gpath_list)
bbox_list = [(0, 0, 100, 100)] * len(gid_list)
name_list = ['a', 'b', 'a', 'd']
aid_list = ibs.add_annots(gid_list, bbox_list=bbox_list, name_list=name_list)
assert len(aid_list) != 0, "No annotations"
aid = aid_list[0]
gid = ibs.get_annot_gids(aid)
assert gid is not None, "gid for aid=%r is None" % (aid,)
gthumbpath = ibs.get_image_thumbpath(gid)
annotation_thumbpath = ibs.get_annot_chip_thumbpath(aid)
ibs.delete_annot_chips(aid)
aid_list = ibs.get_valid_aids()
assert aid in aid_list, "Error: Annotation deleted"
assert not utool.checkpath(gthumbpath), "Image Thumbnail not deleted"
assert not utool.checkpath(annotation_thumbpath), "Roi Thumbnail not deleted"
return locals()
if __name__ == '__main__':
multiprocessing.freeze_support() # For windows
import ibeis
main_locals = ibeis.main(defaultdb='testdb_empty', gui=False,
allow_newdir=True, delete_ibsdir=True)
ibs = main_locals['ibs'] # IBEIS Control
back = main_locals['back'] # IBEIS GUI backend
test_locals = utool.run_test(TEST_DELETE_ANNOTATION_CHIPS, ibs, back)
exec(utool.execstr_dict(test_locals, 'test_locals'))
exec(utool.ipython_execstr())
|
{
"content_hash": "7b121d31fc2eb8826ed6c8de9bd37ff5",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 96,
"avg_line_length": 43.75675675675676,
"alnum_prop": 0.6726374305126621,
"repo_name": "SU-ECE-17-7/ibeis",
"id": "8330e7e19c121869bf135b57e7ddd910a8c73788",
"size": "1694",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "_broken/test_delete_annotation_chips.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "331"
},
{
"name": "CSS",
"bytes": "26792"
},
{
"name": "HTML",
"bytes": "33762203"
},
{
"name": "Inno Setup",
"bytes": "1585"
},
{
"name": "JavaScript",
"bytes": "227454"
},
{
"name": "Jupyter Notebook",
"bytes": "66346367"
},
{
"name": "Python",
"bytes": "6112508"
},
{
"name": "Shell",
"bytes": "58211"
}
],
"symlink_target": ""
}
|
import hashlib
import importlib
import inspect
import logging
import math
import os
import uuid
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import fields
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from multiselectfield import MultiSelectField
from ordered_model.models import OrderedModel
import shortuuid
from slugger import AutoSlugField
from bridge_lti.models import BridgeUser, LtiContentSource, LtiUser, OutcomeService
from common.mixins.models import HasLinkedSequenceMixin, ModelFieldIsDefaultMixin
from module import tasks
log = logging.getLogger(__name__)
def _discover_applicable_modules(folder_name='engines', file_startswith='engine_'):
modules = []
for name in os.listdir(os.path.join(
os.path.dirname(os.path.abspath(__file__)), folder_name
)):
if name.startswith(file_startswith) and name.endswith('.py'):
modules.append((name[:-len('.py')], name[len(file_startswith):-len('.py')]))
return modules
def _load_cls_from_applicable_module(module_path, mod_name, class_startswith=None, class_endswith=None):
"""
Load class from module.
"""
module = None
try:
cls_module = importlib.import_module('{}.{}'.format(module_path, mod_name))
except ImportError:
log.error("Could not load module_path={}, mod_name={}".format(module_path, mod_name))
raise
for attr in inspect.getmembers(cls_module):
if class_endswith and attr[0].endswith(class_endswith):
module = attr[1]
elif class_startswith and attr[0].startswith(class_startswith):
module = attr[1]
return module
ENGINES = _discover_applicable_modules(folder_name='engines', file_startswith='engine_')
GRADING_POLICY_MODULES = _discover_applicable_modules(folder_name='policies', file_startswith='policy_')
GRADING_POLICY_NAME_TO_CLS = {
name: _load_cls_from_applicable_module("module.policies", file_name, class_endswith="GradingPolicy")
for file_name, name in GRADING_POLICY_MODULES
}
"""
Policy choices. Key is a policy name and value is a GradingPolicyClass.
GradingPolicyClass has __str__ method and it's string representation is GradingPolicy.public_name.
This hack is done to pass policy.summary_text and policy.detail_text to select policy widget's template, where these
variables are used to show popover message with description about each policy (bootstrap3 JS popover function).
"""
GRADING_POLICY_CHOICES = ((k, v) for k, v in GRADING_POLICY_NAME_TO_CLS.items())
class Sequence(models.Model):
"""
Represents User's problem solving track.
"""
lti_user = models.ForeignKey(LtiUser, on_delete=models.CASCADE)
collection_order = models.ForeignKey('CollectionOrder', null=True, on_delete=models.CASCADE)
completed = fields.BooleanField(default=False)
lis_result_sourcedid = models.CharField(max_length=255, null=True)
outcome_service = models.ForeignKey(OutcomeService, null=True, on_delete=models.CASCADE)
metadata = JSONField(default={}, blank=True)
# NOTE(yura.braiko) suffix is a hash to make unique user_id for the collection repetition feature.
suffix = models.CharField(max_length=15, default='')
class Meta:
unique_together = ('lti_user', 'collection_order', 'suffix')
def __str__(self):
return '<Sequence[{}]: {}>'.format(self.id, self.lti_user)
def fulfil_sequence_metadata(self, lti_params, launch_params):
"""
Automate fulfilling sequence metadata field with launch_params equal to lti_params.
:param lti_params: iterable object with the required lti parameters names
:param launch_params: dict with the launch lti parameters received in launch lti request
"""
meta_dict = {}
for param in lti_params:
if param in launch_params:
meta_dict[param] = launch_params[param]
if meta_dict:
self.metadata = meta_dict
self.save()
def sequence_ui_details(self):
"""
Create the context for the optional label on the student view.
Context depends on the ModuleGroup's OPTION value.
:return: str with the text for injecting into the label.
"""
ui_options = self.collection_order.ui_option
details_list = []
for ui_option in ui_options:
# NOTE(idegtiarov) conditions depend on ModuleGroup's OPTIONS
if ui_option == CollectionOrder.OPTIONS[0][0]:
details = (
f"{CollectionOrder.OPTIONS[0][1]}: {self.items.count()}/"
f"{self.collection_order.collection.activities.count()}"
)
elif ui_option == CollectionOrder.OPTIONS[1][0]:
grade = self.collection_order.grading_policy.calculate_grade(self)
# NOTE(andrey.lykhoman): Operations with float numbers can lead to the creation of some numbers in
# higher degrees after the decimal point.
grade = round(grade * 100, 1)
details = f"{CollectionOrder.OPTIONS[1][1]}: {grade}%"
else:
details = (
f"{CollectionOrder.OPTIONS[2][1]}: "
f"{self.items.filter(score__gt=0).count()}/{self.items.filter(score=0).count()}"
)
details_list.append(details)
return details_list
class SequenceItem(models.Model):
"""
Represents one User's step in problem solving track.
"""
sequence = models.ForeignKey('Sequence', related_name='items', null=True, on_delete=models.CASCADE)
activity = models.ForeignKey('Activity', null=True, on_delete=models.CASCADE)
position = models.PositiveIntegerField(default=1)
score = models.FloatField(null=True, blank=True, help_text="Grade policy: 'p' (problem's current score).")
# NOTE(idegtiarov) suffix is a hash to make unique user_id for the Activity repetition feature.
suffix = models.CharField(max_length=10, default='')
is_problem = models.BooleanField(default=True)
__origin_score = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__origin_score = self.score
class Meta:
verbose_name = "Sequence Item"
verbose_name_plural = "Sequence Items"
ordering = ['sequence', 'position']
def __str__(self):
return '<SequenceItem: {}={}>'.format(self.sequence, self.activity.name)
def _add_suffix(self):
"""
Add suffix to the SequenceItem if activity repetition is allowed.
"""
if self.suffix:
return
self.suffix = hashlib.sha1(shortuuid.uuid().encode('utf-8')).hexdigest()[::4] # Return 10 character uuid suffix
def save(self, *args, **kwargs):
"""
Extension sending notification to the Adaptive engine that score is changed.
"""
if self.score != self.__origin_score:
engine = self.sequence.collection_order.engine.engine_driver
engine.submit_activity_answer(self)
log.debug("Adaptive engine is updated with the grade for the {} activity in the SequenceItem {}".format(
self.activity.name, self.id
))
if self.activity.repetition > 1:
self._add_suffix()
self.is_problem = self.activity.is_problem
super().save(*args, **kwargs)
@property
def user_id_for_consumer(self):
return f'{self.sequence.lti_user.user_id}{self.sequence.suffix}{self.suffix}'
class GradingPolicy(ModelFieldIsDefaultMixin, models.Model):
"""
Predefined set of Grading policy objects. Define how to grade collections.
Field `name` if fulfilled from the correspondent choices in the form GradingPolicyForm.
"""
name = models.CharField(max_length=20) # Field name is not editable in admin UI.
public_name = models.CharField(max_length=255)
params = JSONField(default={}, blank=True, help_text="Policy parameters in json format.")
engine = models.ForeignKey('Engine', blank=True, null=True, on_delete=models.CASCADE)
is_default = models.BooleanField(default=False)
@property
def policy_cls(self):
return GRADING_POLICY_NAME_TO_CLS[self.name]
def policy_instance(self, **kwargs):
return self.policy_cls(policy=self, **kwargs)
def calculate_grade(self, sequence):
policy = self.policy_cls(policy=self, sequence=sequence)
return math.floor(policy.grade * 1000) / 1000
def __str__(self):
return "{}, public_name: {} params: {}{}".format(
self.name, self.public_name, self.params,
", IS DEFAULT POLICY" if self.is_default else ""
)
class Collection(models.Model):
"""Set of Activities (problems) for a module."""
name = fields.CharField(max_length=255)
slug = AutoSlugField(
populate_from='name',
unique=True,
db_index=True,
help_text="Add the slug for the collection. If field empty slug will be created automatically.",
verbose_name='slug id'
)
owner = models.ForeignKey(BridgeUser, on_delete=models.CASCADE)
metadata = fields.CharField(max_length=255, blank=True, null=True)
updated_at = fields.DateTimeField(auto_now=True)
class Meta:
unique_together = ('owner', 'name')
def __str__(self):
return '<Collection: {}>'.format(self.name)
def save(self, *args, **kwargs):
"""Extension cover method with logging."""
initial_id = self.id
super().save(*args, **kwargs)
tasks.sync_collection_engines.apply_async(
kwargs={'collection_slug': self.slug, 'created_at': self.updated_at},
countdown=settings.CELERY_DELAY_SYNC_TASK,
)
if initial_id:
Log.objects.create(
log_type=Log.ADMIN, action=Log.COLLECTION_UPDATED,
data={'collection_slug': self.slug}
)
else:
Log.objects.create(
log_type=Log.ADMIN, action=Log.COLLECTION_CREATED,
data={'collection_slug': self.slug}
)
def get_absolute_url(self):
return reverse('module:collection-list')
class Engine(ModelFieldIsDefaultMixin, models.Model):
"""Defines engine settings."""
DEFAULT_ENGINE = 'engine_mock'
DRIVER = None
engine = models.CharField(choices=ENGINES, default=DEFAULT_ENGINE, max_length=100)
engine_name = models.CharField(max_length=255, blank=True, null=True, unique=True)
host = models.URLField(blank=True, null=True)
token = models.CharField(max_length=255, blank=True, null=True)
lti_parameters = models.TextField(
default='',
blank=True,
help_text=_("LTI parameters to sent to the engine, use comma separated string")
)
is_default = fields.BooleanField(default=False, help_text=_("If checked Engine will be used as the default!"))
class Meta:
unique_together = ('host', 'token')
def __str__(self):
return "Engine: {}".format(self.engine_name)
@classmethod
def create_default(cls):
return cls.objects.create(
engine=cls.DEFAULT_ENGINE,
engine_name='Mock',
is_default=True
)
@property
def engine_driver(self):
if not self.DRIVER:
driver = _load_cls_from_applicable_module('module.engines', self.engine, class_startswith='Engine')
# NOTE(idegtiarov) Currently, statement coves existent engines modules. Improve in case new engine will be
# added to the engines package.
if self.engine.endswith('mock'):
engine_driver = driver()
else:
engine_driver = driver(**{'HOST': self.host, 'TOKEN': self.token})
self.DRIVER = engine_driver
return self.DRIVER
@property
def lti_params(self):
return (param.strip() for param in self.lti_parameters.split(','))
class CollectionOrder(HasLinkedSequenceMixin, OrderedModel):
OPTIONS = (
('AT', _('Questions viewed/total')),
('EP', _('Earned grade')),
('RW', _('Answers right/wrong'))
)
slug = models.SlugField(unique=True, default=uuid.uuid4, editable=True, db_index=True)
group = models.ForeignKey('ModuleGroup', on_delete=models.CASCADE)
collection = models.ForeignKey('Collection', on_delete=models.CASCADE)
grading_policy = models.OneToOneField('GradingPolicy', blank=True, null=True, on_delete=models.CASCADE)
engine = models.ForeignKey(Engine, blank=True, null=True, on_delete=models.CASCADE)
strict_forward = fields.BooleanField(default=True)
ui_option = MultiSelectField(
choices=OPTIONS, blank=True, help_text="Add an optional UI block to the student view"
)
ui_next = models.BooleanField(
default=False, help_text="Add an optional NEXT button under the embedded unit."
)
congratulation_message = fields.BooleanField(default=False)
order_with_respect_to = 'group'
class Meta:
ordering = ('group', 'order')
@property
def get_selected_ui_options(self):
res_list = self.get_ui_option_list()
if self.ui_next:
res_list.append(_('Additional NEXT Button'))
return res_list
def get_launch_url(self):
return "{}{}".format(settings.BRIDGE_HOST, reverse("lti:launch", kwargs={'collection_order_slug': self.slug}))
class ModuleGroup(models.Model):
"""
Represents Module Group.
"""
name = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
atime = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(BridgeUser, on_delete=models.CASCADE)
slug = models.UUIDField(unique=True, default=uuid.uuid4, editable=False, db_index=True)
collections = models.ManyToManyField(
Collection, related_name='collection_groups', blank=True, through='CollectionOrder'
)
contributors = models.ManyToManyField(
BridgeUser, related_name='module_groups', blank=True, through='ContributorPermission'
)
@property
def ordered_collections(self):
"""
Return tuple of tuples of CollectionOrder and result sequence_set.exists() method.
"""
return (
(col_order, col_order.sequence_set.exists())
for col_order in CollectionOrder.objects.filter(group=self).order_by('order')
)
def __str__(self):
return "<Group of Collections: {}>".format(self.name)
def get_absolute_url(self):
return reverse('module:group-detail', kwargs={'group_slug': self.slug})
def get_collection_order_by_order(self, order):
"""
Return CollectionOrder object filtered by order and group.
"""
return CollectionOrder.objects.filter(group=self, order=order).first()
def has_linked_active_sequences(self):
return CollectionOrder.objects.filter(group=self, sequence__completed=False).exists()
def has_linked_sequences(self):
return CollectionOrder.objects.filter(group=self, sequence__isnull=False).exists()
class ContributorPermission(models.Model):
user = models.ForeignKey(BridgeUser, on_delete=models.CASCADE)
group = models.ForeignKey(ModuleGroup, on_delete=models.CASCADE)
# Note(AndreyLykhoman): Change this field to field with the possibility to select more than one option for select.
full_permission = models.BooleanField(default=True)
class Activity(OrderedModel):
"""General entity which represents problem/text/video material."""
TYPES = (
('G', _('generic')),
('A', _('pre-assessment')),
('Z', _('post-assessment')),
)
order_with_respect_to = 'atype', 'collection'
name = models.CharField(max_length=255)
collection = models.ForeignKey('Collection', related_name='activities', null=True, on_delete=models.CASCADE)
tags = fields.CharField(
max_length=255,
help_text="Provide your tags separated by a comma.",
blank=True,
null=True,
)
atype = fields.CharField(
verbose_name="type", choices=TYPES, default='G', max_length=1,
help_text="Choose 'pre/post-assessment' activity type to pin Activity to the start or the end of "
"the Collection."
)
difficulty = fields.FloatField(
default='0.5',
help_text="Provide float number in the range 0.0 - 1.0",
validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
)
points = models.FloatField(blank=True, default=1)
lti_content_source = models.ForeignKey(LtiContentSource, null=True, on_delete=models.CASCADE)
source_launch_url = models.URLField(max_length=255, null=True)
source_name = fields.CharField(max_length=255, blank=True, null=True)
# NOTE(wowkalucky): extra field 'order' is available (inherited from OrderedModel)
# `stype` - means source_type or string_type.
stype = models.CharField(
"Type of the activity", help_text="(problem, video, html, etc.)", max_length=25, blank=True, null=True
)
# Number of possible repetition of the activity in the sequence
repetition = models.PositiveIntegerField(
default=1, help_text="The number of possible repetition of the Activity in the sequence."
)
@property
def is_problem(self):
return self.stype in settings.PROBLEM_ACTIVITY_TYPES
class Meta:
verbose_name_plural = 'Activities'
unique_together = ("source_launch_url", "collection")
ordering = 'atype', 'order'
def __str__(self):
return '<Activity: {}>'.format(self.name)
def get_absolute_url(self):
return reverse('module:collection-detail', kwargs={'slug': self.collection.slug})
def save(self, *args, **kwargs):
"""Extension which sends notification to the Adaptive engine that Activity is created/updated."""
initial_id = self.id
if initial_id:
Log.objects.create(
log_type=Log.ADMIN, action=Log.ACTIVITY_UPDATED,
data=self.get_research_data()
)
else:
Log.objects.create(
log_type=Log.ADMIN, action=Log.ACTIVITY_CREATED,
data=self.get_research_data()
)
super().save(*args, **kwargs)
self.collection.save()
def delete(self, *args, **kwargs):
"""Extension which sends notification to the Adaptive engine that Activity is deleted."""
Log.objects.create(
log_type=Log.ADMIN, action=Log.ACTIVITY_DELETED,
data=self.get_research_data()
)
super().delete(*args, **kwargs)
self.collection.save()
@property
def last_pre(self):
"""
Has Activity last order number position in certain type sub-collection.
:return: (bool)
"""
last_pre = Activity.objects.filter(collection=self.collection, atype='A').last()
return self.id == last_pre.id
def get_research_data(self):
return {'collection_id': self.collection_id, 'activity_id': self.id}
class Log(models.Model):
"""
Student actions log.
Every time student opens/submits lti problem new Log created.
"""
OPENED = 'O'
SUBMITTED = 'S'
ADMIN = 'A'
LOG_TYPES = (
(OPENED, 'Opened'),
(SUBMITTED, 'Submitted'),
(ADMIN, 'Admin'),
)
ACTIVITY_CREATED = 'AC'
ACTIVITY_UPDATED = 'AU'
ACTIVITY_DELETED = 'AD'
COLLECTION_CREATED = 'CC'
COLLECTION_UPDATED = 'CU'
ACTIONS = (
(ACTIVITY_CREATED, 'Activity created'),
(ACTIVITY_UPDATED, 'Activity updated'),
(ACTIVITY_DELETED, 'Activity deleted'),
(COLLECTION_CREATED, 'Collection created'),
(COLLECTION_UPDATED, 'Collection updated'),
)
sequence_item = models.ForeignKey('SequenceItem', null=True, blank=True, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True, blank=True, null=True)
log_type = fields.CharField(choices=LOG_TYPES, max_length=32)
answer = models.BooleanField(verbose_name='Is answer correct?', default=False)
attempt = models.PositiveIntegerField(default=0)
action = fields.CharField(choices=ACTIONS, max_length=2, null=True, blank=True)
data = JSONField(default={}, blank=True)
def __str__(self):
if self.log_type == self.OPENED:
return '<Log[{}]: {}>'.format(self.get_log_type_display(), self.sequence_item)
elif self.log_type == self.ADMIN:
return '<Log[{}]: {} ({})>'.format(
self.get_log_type_display(),
self.get_action_display(),
self.data
)
else:
return '<Log[{}]: {}-{}[{}]>'.format(
self.get_log_type_display(),
self.sequence_item,
self.answer,
self.attempt
)
|
{
"content_hash": "1801123a3bb286bbf96246ce07691740",
"timestamp": "",
"source": "github",
"line_count": 567,
"max_line_length": 120,
"avg_line_length": 37.410934744268076,
"alnum_prop": 0.6424665283801622,
"repo_name": "harvard-vpal/bridge-adaptivity",
"id": "edf5bbd882c97ed8ebc259c032286c0fc2d4bcec",
"size": "21212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bridge_adaptivity/module/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2514"
},
{
"name": "Dockerfile",
"bytes": "1586"
},
{
"name": "HTML",
"bytes": "70921"
},
{
"name": "JavaScript",
"bytes": "29636"
},
{
"name": "Makefile",
"bytes": "1614"
},
{
"name": "Python",
"bytes": "315506"
},
{
"name": "Shell",
"bytes": "242"
}
],
"symlink_target": ""
}
|
"""Test HomeKit util module."""
import pytest
import voluptuous as vol
from homeassistant.components.homekit.const import (
CONF_FEATURE,
CONF_FEATURE_LIST,
CONF_LINKED_BATTERY_SENSOR,
CONF_LOW_BATTERY_THRESHOLD,
DEFAULT_CONFIG_FLOW_PORT,
DOMAIN,
FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE,
HOMEKIT_PAIRING_QR,
HOMEKIT_PAIRING_QR_SECRET,
TYPE_FAUCET,
TYPE_OUTLET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_SWITCH,
TYPE_VALVE,
)
from homeassistant.components.homekit.util import (
HomeKitSpeedMapping,
SpeedRange,
cleanup_name_for_homekit,
convert_to_float,
density_to_air_quality,
dismiss_setup_message,
find_next_available_port,
format_sw_version,
port_is_available,
show_setup_message,
temperature_to_homekit,
temperature_to_states,
validate_entity_config as vec,
validate_media_player_features,
)
from homeassistant.components.persistent_notification import (
ATTR_MESSAGE,
ATTR_NOTIFICATION_ID,
DOMAIN as PERSISTENT_NOTIFICATION_DOMAIN,
)
from homeassistant.const import (
ATTR_CODE,
ATTR_SUPPORTED_FEATURES,
CONF_NAME,
CONF_TYPE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import State
from .util import async_init_integration
from tests.common import async_mock_service
def test_validate_entity_config():
"""Test validate entities."""
configs = [
None,
[],
"string",
12345,
{"invalid_entity_id": {}},
{"demo.test": 1},
{"binary_sensor.demo": {CONF_LINKED_BATTERY_SENSOR: None}},
{"binary_sensor.demo": {CONF_LINKED_BATTERY_SENSOR: "switch.demo"}},
{"binary_sensor.demo": {CONF_LOW_BATTERY_THRESHOLD: "switch.demo"}},
{"binary_sensor.demo": {CONF_LOW_BATTERY_THRESHOLD: -10}},
{"demo.test": "test"},
{"demo.test": [1, 2]},
{"demo.test": None},
{"demo.test": {CONF_NAME: None}},
{"media_player.test": {CONF_FEATURE_LIST: [{CONF_FEATURE: "invalid_feature"}]}},
{
"media_player.test": {
CONF_FEATURE_LIST: [
{CONF_FEATURE: FEATURE_ON_OFF},
{CONF_FEATURE: FEATURE_ON_OFF},
]
}
},
{"switch.test": {CONF_TYPE: "invalid_type"}},
]
for conf in configs:
with pytest.raises(vol.Invalid):
vec(conf)
assert vec({}) == {}
assert vec({"demo.test": {CONF_NAME: "Name"}}) == {
"demo.test": {CONF_NAME: "Name", CONF_LOW_BATTERY_THRESHOLD: 20}
}
assert vec(
{"binary_sensor.demo": {CONF_LINKED_BATTERY_SENSOR: "sensor.demo_battery"}}
) == {
"binary_sensor.demo": {
CONF_LINKED_BATTERY_SENSOR: "sensor.demo_battery",
CONF_LOW_BATTERY_THRESHOLD: 20,
}
}
assert vec({"binary_sensor.demo": {CONF_LOW_BATTERY_THRESHOLD: 50}}) == {
"binary_sensor.demo": {CONF_LOW_BATTERY_THRESHOLD: 50}
}
assert vec({"alarm_control_panel.demo": {}}) == {
"alarm_control_panel.demo": {ATTR_CODE: None, CONF_LOW_BATTERY_THRESHOLD: 20}
}
assert vec({"alarm_control_panel.demo": {ATTR_CODE: "1234"}}) == {
"alarm_control_panel.demo": {ATTR_CODE: "1234", CONF_LOW_BATTERY_THRESHOLD: 20}
}
assert vec({"lock.demo": {}}) == {
"lock.demo": {ATTR_CODE: None, CONF_LOW_BATTERY_THRESHOLD: 20}
}
assert vec({"lock.demo": {ATTR_CODE: "1234"}}) == {
"lock.demo": {ATTR_CODE: "1234", CONF_LOW_BATTERY_THRESHOLD: 20}
}
assert vec({"media_player.demo": {}}) == {
"media_player.demo": {CONF_FEATURE_LIST: {}, CONF_LOW_BATTERY_THRESHOLD: 20}
}
config = {
CONF_FEATURE_LIST: [
{CONF_FEATURE: FEATURE_ON_OFF},
{CONF_FEATURE: FEATURE_PLAY_PAUSE},
]
}
assert vec({"media_player.demo": config}) == {
"media_player.demo": {
CONF_FEATURE_LIST: {FEATURE_ON_OFF: {}, FEATURE_PLAY_PAUSE: {}},
CONF_LOW_BATTERY_THRESHOLD: 20,
}
}
assert vec({"switch.demo": {CONF_TYPE: TYPE_FAUCET}}) == {
"switch.demo": {CONF_TYPE: TYPE_FAUCET, CONF_LOW_BATTERY_THRESHOLD: 20}
}
assert vec({"switch.demo": {CONF_TYPE: TYPE_OUTLET}}) == {
"switch.demo": {CONF_TYPE: TYPE_OUTLET, CONF_LOW_BATTERY_THRESHOLD: 20}
}
assert vec({"switch.demo": {CONF_TYPE: TYPE_SHOWER}}) == {
"switch.demo": {CONF_TYPE: TYPE_SHOWER, CONF_LOW_BATTERY_THRESHOLD: 20}
}
assert vec({"switch.demo": {CONF_TYPE: TYPE_SPRINKLER}}) == {
"switch.demo": {CONF_TYPE: TYPE_SPRINKLER, CONF_LOW_BATTERY_THRESHOLD: 20}
}
assert vec({"switch.demo": {CONF_TYPE: TYPE_SWITCH}}) == {
"switch.demo": {CONF_TYPE: TYPE_SWITCH, CONF_LOW_BATTERY_THRESHOLD: 20}
}
assert vec({"switch.demo": {CONF_TYPE: TYPE_VALVE}}) == {
"switch.demo": {CONF_TYPE: TYPE_VALVE, CONF_LOW_BATTERY_THRESHOLD: 20}
}
def test_validate_media_player_features():
"""Test validate modes for media players."""
config = {}
attrs = {ATTR_SUPPORTED_FEATURES: 20873}
entity_state = State("media_player.demo", "on", attrs)
assert validate_media_player_features(entity_state, config) is True
config = {FEATURE_ON_OFF: None}
assert validate_media_player_features(entity_state, config) is True
entity_state = State("media_player.demo", "on")
assert validate_media_player_features(entity_state, config) is False
def test_convert_to_float():
"""Test convert_to_float method."""
assert convert_to_float(12) == 12
assert convert_to_float(12.4) == 12.4
assert convert_to_float(STATE_UNKNOWN) is None
assert convert_to_float(None) is None
def test_cleanup_name_for_homekit():
"""Ensure name sanitize works as expected."""
assert cleanup_name_for_homekit("abc") == "abc"
assert cleanup_name_for_homekit("a b c") == "a b c"
assert cleanup_name_for_homekit("ab_c") == "ab c"
assert (
cleanup_name_for_homekit('ab!@#$%^&*()-=":.,><?//\\ frog')
== "ab--#---&----- -.,------ frog"
)
assert cleanup_name_for_homekit("の日本_語文字セット") == "の日本 語文字セット"
def test_temperature_to_homekit():
"""Test temperature conversion from HA to HomeKit."""
assert temperature_to_homekit(20.46, TEMP_CELSIUS) == 20.5
assert temperature_to_homekit(92.1, TEMP_FAHRENHEIT) == 33.4
def test_temperature_to_states():
"""Test temperature conversion from HomeKit to HA."""
assert temperature_to_states(20, TEMP_CELSIUS) == 20.0
assert temperature_to_states(20.2, TEMP_FAHRENHEIT) == 68.5
def test_density_to_air_quality():
"""Test map PM2.5 density to HomeKit AirQuality level."""
assert density_to_air_quality(0) == 1
assert density_to_air_quality(35) == 1
assert density_to_air_quality(35.1) == 2
assert density_to_air_quality(75) == 2
assert density_to_air_quality(115) == 3
assert density_to_air_quality(150) == 4
assert density_to_air_quality(300) == 5
async def test_show_setup_msg(hass, hk_driver):
"""Test show setup message as persistence notification."""
pincode = b"123-45-678"
entry = await async_init_integration(hass)
assert entry
call_create_notification = async_mock_service(
hass, PERSISTENT_NOTIFICATION_DOMAIN, "create"
)
await hass.async_add_executor_job(
show_setup_message, hass, entry.entry_id, "bridge_name", pincode, "X-HM://0"
)
await hass.async_block_till_done()
assert hass.data[DOMAIN][entry.entry_id][HOMEKIT_PAIRING_QR_SECRET]
assert hass.data[DOMAIN][entry.entry_id][HOMEKIT_PAIRING_QR]
assert call_create_notification
assert call_create_notification[0].data[ATTR_NOTIFICATION_ID] == entry.entry_id
assert pincode.decode() in call_create_notification[0].data[ATTR_MESSAGE]
async def test_dismiss_setup_msg(hass):
"""Test dismiss setup message."""
call_dismiss_notification = async_mock_service(
hass, PERSISTENT_NOTIFICATION_DOMAIN, "dismiss"
)
await hass.async_add_executor_job(dismiss_setup_message, hass, "entry_id")
await hass.async_block_till_done()
assert call_dismiss_notification
assert call_dismiss_notification[0].data[ATTR_NOTIFICATION_ID] == "entry_id"
def test_homekit_speed_mapping():
"""Test if the SpeedRanges from a speed_list are as expected."""
# A standard 2-speed fan
speed_mapping = HomeKitSpeedMapping(["off", "low", "high"])
assert speed_mapping.speed_ranges == {
"off": SpeedRange(0, 0),
"low": SpeedRange(100 / 3, 50),
"high": SpeedRange(200 / 3, 100),
}
# A standard 3-speed fan
speed_mapping = HomeKitSpeedMapping(["off", "low", "medium", "high"])
assert speed_mapping.speed_ranges == {
"off": SpeedRange(0, 0),
"low": SpeedRange(100 / 4, 100 / 3),
"medium": SpeedRange(200 / 4, 200 / 3),
"high": SpeedRange(300 / 4, 100),
}
# a Dyson-like fan with 10 speeds
speed_mapping = HomeKitSpeedMapping([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
assert speed_mapping.speed_ranges == {
0: SpeedRange(0, 0),
1: SpeedRange(10, 100 / 9),
2: SpeedRange(20, 200 / 9),
3: SpeedRange(30, 300 / 9),
4: SpeedRange(40, 400 / 9),
5: SpeedRange(50, 500 / 9),
6: SpeedRange(60, 600 / 9),
7: SpeedRange(70, 700 / 9),
8: SpeedRange(80, 800 / 9),
9: SpeedRange(90, 100),
}
def test_speed_to_homekit():
"""Test speed conversion from HA to Homekit."""
speed_mapping = HomeKitSpeedMapping(["off", "low", "high"])
assert speed_mapping.speed_to_homekit(None) is None
assert speed_mapping.speed_to_homekit("off") == 0
assert speed_mapping.speed_to_homekit("low") == 50
assert speed_mapping.speed_to_homekit("high") == 100
def test_speed_to_states():
"""Test speed conversion from Homekit to HA."""
speed_mapping = HomeKitSpeedMapping(["off", "low", "high"])
assert speed_mapping.speed_to_states(-1) == "off"
assert speed_mapping.speed_to_states(0) == "off"
assert speed_mapping.speed_to_states(33) == "off"
assert speed_mapping.speed_to_states(34) == "low"
assert speed_mapping.speed_to_states(50) == "low"
assert speed_mapping.speed_to_states(66) == "low"
assert speed_mapping.speed_to_states(67) == "high"
assert speed_mapping.speed_to_states(100) == "high"
async def test_port_is_available(hass):
"""Test we can get an available port and it is actually available."""
next_port = await hass.async_add_executor_job(
find_next_available_port, DEFAULT_CONFIG_FLOW_PORT
)
assert next_port
assert await hass.async_add_executor_job(port_is_available, next_port)
async def test_format_sw_version():
"""Test format_sw_version method."""
assert format_sw_version("soho+3.6.8+soho-release-rt120+10") == "3.6.8"
assert format_sw_version("undefined-undefined-1.6.8") == "1.6.8"
assert format_sw_version("56.0-76060") == "56.0.76060"
assert format_sw_version(3.6) == "3.6"
assert format_sw_version("unknown") is None
|
{
"content_hash": "35478ec6b85f7b454ee4e4c209e0a891",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 88,
"avg_line_length": 34.10703363914373,
"alnum_prop": 0.6223437640096835,
"repo_name": "tchellomello/home-assistant",
"id": "c6845779313cb78ba606f6c6084f4729e4987864",
"size": "11189",
"binary": false,
"copies": "10",
"ref": "refs/heads/dev",
"path": "tests/components/homekit/test_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.