hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a15d698b672da57e1bd866189e6b75785fbad8a
| 893
|
py
|
Python
|
configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py
|
JustWeZero/mmdetection
|
6de523b5b1e71b9c989760faff0462e807827515
|
[
"Apache-2.0"
] | 20,190
|
2018-09-10T01:11:53.000Z
|
2022-03-31T22:31:33.000Z
|
configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py
|
Joker-co/mmdet_pro
|
96abfd90cf0e38c5ce398795f949e9328eb85c1b
|
[
"Apache-2.0"
] | 6,736
|
2018-09-17T09:45:51.000Z
|
2022-03-31T22:54:10.000Z
|
configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py
|
Joker-co/mmdet_pro
|
96abfd90cf0e38c5ce398795f949e9328eb85c1b
|
[
"Apache-2.0"
] | 7,837
|
2018-09-11T02:58:23.000Z
|
2022-03-31T22:31:38.000Z
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
| 38.826087
| 77
| 0.699888
|
4a15d6de9f39bb9a0d7510f74eb8a65279f95584
| 4,669
|
py
|
Python
|
src/dashboard/views.py
|
franzcruspero/dj_class_views
|
a1b10c22658f3fc2a61bb29f3967e632e5571509
|
[
"MIT"
] | null | null | null |
src/dashboard/views.py
|
franzcruspero/dj_class_views
|
a1b10c22658f3fc2a61bb29f3967e632e5571509
|
[
"MIT"
] | 4
|
2020-06-06T00:45:44.000Z
|
2021-06-10T22:41:35.000Z
|
src/dashboard/views.py
|
franzcruspero/dj_class_views
|
a1b10c22658f3fc2a61bb29f3967e632e5571509
|
[
"MIT"
] | null | null | null |
from django.contrib import messages
from django.views.generic.edit import CreateView, UpdateView, DeleteView, ModelFormMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, Http404
from django.shortcuts import render
from django.views.generic.base import (
TemplateView, TemplateResponseMixin, ContextMixin
)
from django.views.generic import View
from django.utils.decorators import method_decorator
from .models import Book
from .forms import BookForm
# Create your views here.
class MultipleObjectMixin(object):
def get_object(self, queryset=None, *args, **kwargs):
slug = self.kwargs.get("slug")
if slug:
try:
obj = self.model.objects.get(slug=slug)
except self.model.MultipleObjectsReturned:
obj = self.get_queryset().first()
except:
raise Http404
return obj
return Http404
class BookCreateView(SuccessMessageMixin, CreateView):
# model = Book
template_name = "forms.html"
form_class = BookForm
success_message = "%(title)s has been created at %(created_at)s"
def form_valid(self, form):
form.instance.added_by = self.request.user
# form.instance.last_edited_by = self.request.user
valid_form = super(BookCreateView, self).form_valid(form)
# messages.success(self.request, "Book created!")
#send signals
return valid_form
def get_success_url(self):
# messages.success(self.request, "Book created!")
return reverse("book_list")
def get_success_message(self, cleaned_data):
return self.success_message % dict(
cleaned_data,
created_at = self.object.timestamp
)
class BookUpdateView(MultipleObjectMixin, UpdateView):
model = Book
#fields = ["title", "description"]
form_class = BookForm
template_name = "forms.html"
class BookDeleteView(DeleteView):
model = Book
def get_success_url(self):
return reverse("book_list")
class BookDetail(SuccessMessageMixin, ModelFormMixin, MultipleObjectMixin, DetailView):
model = Book
form_class = BookForm
success_message = "%(title)s has been updated"
# def dispatch(self, request, *args, **kwargs):
# messages.success(self.request, "Book viewed!")
# return super(BookDetail, self).dispatch(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super(BookDetail, self).get_context_data(*args, **kwargs)
context['form'] = self.get_form()
context["btn_title"] = "Update Book"
return context
def post(self, request, *args, **kwargs):
if request.user.is_authenticated():
self.object = self.get_object()
print(self.object)
form = self.get_form()
print(f"----->{form}<-------")
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def get_success_url(self):
return reverse("book_list")
class BookListView(ListView):
model = Book
# def get_queryset(self, *args, **kwargs):
# qs = super(BookListView, self).get_queryset(*args, **kwargs).order_by("timestamp")
# print(qs)
# print(qs.first().title)
# return qs
class LoginRequiredMixin(object):
# @classmethod
# def as_view(cls, **kwargs):
# view = super(LoginRequiredMixin, cls).as_view(**kwargs)
# return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class DashboardTemplateView(TemplateView):
template_name = "about.html"
def get_context_data(self, *args, **kwargs):
context = super(DashboardTemplateView, self).get_context_data(*args, **kwargs)
context["title"] = "This is about us."
return context
class MyView(LoginRequiredMixin, TemplateResponseMixin, ContextMixin, View):
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context["title"] = "Some other title"
return self.render_to_response(context)
# @method_decorator(login_required)
# def dispatch(self, request, *args, **kwargs):
# return super(MyView, self).dispatch(request, *args, **kwargs)
| 35.105263
| 92
| 0.669951
|
4a15d85bf4e881221b45c743d8e72330e4cfe2ca
| 11,129
|
py
|
Python
|
guild/plugins/skopt_util.py
|
jukiewiczm/guildai
|
478cc29cb102a8bd0bed693ce9626fe4949257a2
|
[
"Apache-2.0"
] | null | null | null |
guild/plugins/skopt_util.py
|
jukiewiczm/guildai
|
478cc29cb102a8bd0bed693ce9626fe4949257a2
|
[
"Apache-2.0"
] | null | null | null |
guild/plugins/skopt_util.py
|
jukiewiczm/guildai
|
478cc29cb102a8bd0bed693ce9626fe4949257a2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2019 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import os
import warnings
import six
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
import numpy.core.umath_tests # pylint: disable=unused-import
import skopt
from guild import batch_util
from guild import flag_util
from guild import op_util
from guild import query as qparse
log = logging.getLogger("guild")
DEFAULT_MAX_TRIALS = 20
DEFAULT_OBJECTIVE = "loss"
###################################################################
# Exceptions
###################################################################
class MissingSearchDimension(Exception):
def __init__(self, flag_vals):
super(MissingSearchDimension, self).__init__(flag_vals)
self.flag_vals = flag_vals
class InvalidSearchDimension(Exception):
pass
class InvalidObjective(Exception):
pass
###################################################################
# Random trials
###################################################################
def random_trials_for_flags(flag_vals, count, random_seed=None):
names, dims, initial_x = flag_dims(flag_vals)
if not names:
raise MissingSearchDimension(flag_vals)
trials = _trials_for_dims(names, dims, initial_x, count, random_seed)
_apply_missing_flag_vals(flag_vals, trials)
return trials
def _trials_for_dims(names, dims, initial_x, num_trials, random_seed):
res = skopt.dummy_minimize(
lambda *args: 0,
dims,
n_calls=num_trials,
random_state=random_seed)
trials_xs = res.x_iters
if trials_xs:
_apply_initial_x(initial_x, trials_xs[0])
return [dict(zip(names, _native_python_xs(xs))) for xs in trials_xs]
def _native_python_xs(xs):
def pyval(x):
try:
return x.item()
except AttributeError:
return x
return [pyval(x) for x in xs]
def _apply_initial_x(initial_x, target_x):
assert len(initial_x) == len(target_x)
for i, x in enumerate(initial_x):
if x is not None:
target_x[i] = x
def _apply_missing_flag_vals(flag_vals, trials):
for trial in trials:
trial.update({
name: flag_vals[name] for name in flag_vals
if name not in trial
})
###################################################################
# Flag dims
###################################################################
def flag_dims(flags):
"""Return flag names, dims, and initials for flags.
Only flag value that correspond to searchable dimensions are
returned. Scalars and non-function string values are not included
in the result.
"""
dims = {}
initials = {}
for name, val in flags.items():
try:
flag_dim, initial = _flag_dim(val, name)
except ValueError:
pass
else:
dims[name] = flag_dim
initials[name] = initial
names = sorted(dims)
return (
names,
[dims[name] for name in names],
[initials[name] for name in names])
def _flag_dim(val, flag_name):
if isinstance(val, list):
return _categorical_dim(val, None)
elif isinstance(val, six.string_types):
return _try_function_dim(val, flag_name)
else:
raise ValueError(val, flag_name)
def _categorical_dim(vals, initial):
from skopt.space import space
return space.Categorical(vals), initial
def _try_function_dim(val, flag_name):
assert isinstance(val, six.string_types), val
try:
func_name, func_args = flag_util.decode_flag_function(val)
except ValueError:
raise ValueError(val, flag_name)
else:
return _function_dim(func_name, func_args, flag_name)
def _function_dim(func_name, args, flag_name):
if func_name is None:
func_name = "uniform"
if func_name == "uniform":
return _uniform_dim(args, func_name, flag_name)
elif func_name == "loguniform":
return _real_dim(args, "log-uniform", func_name, flag_name)
else:
raise InvalidSearchDimension(
"unknown function '%s' used for flag %s"
% (func_name, flag_name))
def _uniform_dim(args, func_name, flag_name):
from skopt.space import space
dim_args, initial = _dim_args_and_initial(args, func_name, flag_name)
return space.check_dimension(dim_args), initial
def _real_dim(args, prior, func_name, flag_name):
from skopt.space import space
dim_args, initial = _dim_args_and_initial(args, func_name, flag_name)
real_init_args = list(dim_args) + [prior]
return space.Real(*real_init_args), initial
def _dim_args_and_initial(args, func_name, flag_name):
if len(args) == 2:
return args, None
elif len(args) == 3:
return args[:2], args[2]
else:
raise InvalidSearchDimension(
"%s requires 2 or 3 args, got %s for flag %s"
% (func_name, args, flag_name))
###################################################################
# Sequential trials support
###################################################################
def handle_seq_trials(batch_run, suggest_x_cb):
if os.getenv("PRINT_TRIALS_CMD") == "1":
_print_trials_cmd_not_supported_error()
elif os.getenv("PRINT_TRIALS") == "1":
_print_trials_not_supported_error()
elif os.getenv("SAVE_TRIALS"):
_save_trials_not_supported_error()
else:
try:
_run_seq_trials(batch_run, suggest_x_cb)
except MissingSearchDimension as e:
missing_search_dim_error(e.flag_vals)
except InvalidObjective as e:
_handle_general_error(e)
def _run_seq_trials(batch_run, suggest_x_cb):
proto_flag_vals = batch_run.batch_proto.get("flags")
batch_flag_vals = suggest_opts = batch_run.get("flags")
max_trials = batch_run.get("max_trials") or DEFAULT_MAX_TRIALS
names, dims, initial_x = _flag_dims_for_search(proto_flag_vals)
random_state = batch_run.get("random_seed")
random_starts = min(
batch_flag_vals.get("random-starts") or 0,
max_trials)
objective_scalar, objective_negate = _objective_y_info(batch_run)
runs = 0
for _ in range(max_trials):
prev_trials = batch_util.trial_results(batch_run, [objective_scalar])
x0, y0 = _trials_xy_for_prev_trials(
prev_trials,
names,
objective_negate)
suggest_random_start = _suggest_random_start(x0, runs, random_starts)
_log_seq_trial(suggest_random_start, random_starts, runs, prev_trials)
suggested_x, random_state = _suggest_x(
suggest_x_cb,
dims, x0, y0,
suggest_random_start,
random_state,
suggest_opts)
if runs == 0 and suggested_x:
_apply_initial_x(initial_x, suggested_x)
trial_flag_vals = _trial_flags_for_x(
suggested_x,
names,
proto_flag_vals)
batch_util.run_trial(batch_run, trial_flag_vals)
runs += 1
def _flag_dims_for_search(proto_flag_vals):
names, dims, initial_x = flag_dims(proto_flag_vals)
if not names:
raise MissingSearchDimension(proto_flag_vals)
return names, dims, initial_x
def _objective_y_info(batch_run):
objective = batch_run.get("objective") or DEFAULT_OBJECTIVE
if objective[0] == "-":
objective = objective[1:]
y_negate = -1
else:
y_negate = 1
try:
colspec = qparse.parse_colspec(objective)
except qparse.ParseError as e:
raise InvalidObjective(
"invalid objective %r: %s" % (objective, e))
else:
if len(colspec.cols) > 1:
raise InvalidObjective(
"invalid objective %r: too many columns"
% objective)
col = colspec.cols[0]
prefix, key = col.split_key()
y_scalar_col = (prefix, key, col.qualifier)
return y_scalar_col, y_negate
def _trials_xy_for_prev_trials(prev_trials, names, objective_negate):
assert names
x0 = []
y0 = []
for flags, y_scalars in prev_trials:
assert len(y_scalars) == 1
y = y_scalars[0]
if y is None:
continue
x0.append([flags.get(name) for name in names])
y0.append(objective_negate * y)
if not x0:
return None, None
return x0, y0
def _suggest_random_start(x0, runs_count, wanted_random_starts):
return x0 is None or runs_count < wanted_random_starts
def _log_seq_trial(suggest_random_start, random_starts, runs, prev_trials):
if suggest_random_start:
assert random_starts != 0
if runs < random_starts:
log.info(
"Random start for optimization (%s of %s)",
runs + 1, random_starts)
else:
log.info(
"Random start for optimization (missing previous trials)")
else:
log.info(
"Found %i previous trial(s) for use in optimization",
len(prev_trials))
def _suggest_x(suggest_x_cb, dims, x0, y0, suggest_random_start,
random_state, suggest_opts):
log.debug(
"suggestion inputs: dims=%s x0=%s y0=%s "
"random_start=%s random_state=%s opts=%s",
dims, x0, y0, suggest_random_start, random_state,
suggest_opts)
return suggest_x_cb(
dims, x0, y0,
suggest_random_start,
random_state,
suggest_opts)
def _trial_flags_for_x(x, names, proto_flag_vals):
flags = dict(proto_flag_vals)
flags.update(dict(zip(names, _native_python_xs(x))))
return flags
###################################################################
# Error handlers
###################################################################
def missing_search_dim_error(flag_vals):
log.error(
"flags for batch (%s) do not contain any search dimensions\n"
"Try specifying a range for one or more flags as NAME=[MIN:MAX].",
op_util.flags_desc(flag_vals))
raise SystemExit(1)
def _print_trials_cmd_not_supported_error():
log.error("optimizer does not support printing trials command")
raise SystemExit(1)
def _print_trials_not_supported_error():
log.error("optimizer does not support printing trials")
raise SystemExit(1)
def _save_trials_not_supported_error():
log.error("optimizer does not support saving trials")
raise SystemExit(1)
def _handle_general_error(e):
log.error(e)
raise SystemExit(1)
| 32.926036
| 78
| 0.630874
|
4a15d90dbe5a9faf44f06f06dd0f71ebb9e263b9
| 5,658
|
py
|
Python
|
tests/inventory/pipelines/test_data/fake_buckets.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | 1
|
2018-03-26T08:15:21.000Z
|
2018-03-26T08:15:21.000Z
|
tests/inventory/pipelines/test_data/fake_buckets.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | null | null | null |
tests/inventory/pipelines/test_data/fake_buckets.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake buckets data."""
FAKE_BUCKETS_MAP = [{
'project_number': 11111,
'buckets': [{
'kind': 'storage#bucket',
'name': 'fakebucket1',
'timeCreated': '2016-07-21T12:57:04.604Z',
'updated': '2016-07-21T12:57:04.604Z',
'projectNumber': '11111',
'metageneration': '2',
'location': 'EU',
'etag': 'CAE=',
'id': 'fakebucket1',
'selfLink': 'https://www.googleapis.com/storage/v1/b/fakebucket1',
'storageClass': 'STANDARD',
'lifecycle': {}
}]
}]
EXPECTED_LOADABLE_BUCKETS = [{
'project_number': 11111,
'bucket_id': 'fakebucket1',
'bucket_name': 'fakebucket1',
'bucket_kind': 'storage#bucket',
'bucket_storage_class': 'STANDARD',
'bucket_location': 'EU',
'bucket_create_time': '2016-07-21 12:57:04',
'bucket_update_time': '2016-07-21 12:57:04',
'bucket_selflink': 'https://www.googleapis.com/storage/v1/b/fakebucket1',
'bucket_lifecycle_raw': '{}',
'raw_bucket': '{"updated": "2016-07-21T12:57:04.604Z", "timeCreated": "2016-07-21T12:57:04.604Z", "metageneration": "2", "id": "fakebucket1", "kind": "storage#bucket", "name": "fakebucket1", "projectNumber": "11111", "etag": "CAE=", "storageClass": "STANDARD", "lifecycle": {}, "selfLink": "https://www.googleapis.com/storage/v1/b/fakebucket1", "location": "EU"}'
}
]
FAKE_BUCKET_ACL_MAP = [{
'bucket_name': 'fakebucket1',
'acl': [
{
'kind': 'storage#bucketAccessControl',
'bucket': 'fakebucket1',
'entity': 'project-owners-11111',
'etag': 'CAE=',
'role': 'OWNER',
'projectTeam': {
'projectNumber': '11111',
'team': 'owners'
},
'id': 'fakebucket1/project-owners-11111',
'selfLink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-owners-11111'
},
{
'kind': 'storage#bucketAccessControl',
'bucket': 'fakebucket1',
'entity': 'project-readers-11111',
'etag': 'CAE=',
'role': 'READER',
'projectTeam': {
'projectNumber': '11111',
'team': 'readers'},
'id': 'fakebucket1/project-readers-11111',
'selfLink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-readers-11111'
}
]
}]
EXPECTED_LOADABLE_BUCKET_ACLS = [{
'acl_id': 'fakebucket1/project-owners-11111',
'bucket': 'fakebucket1',
'bucket_acl_selflink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-owners-11111',
'domain': None,
'email': None,
'entity': 'project-owners-11111',
'entity_id': None,
'kind': 'storage#bucketAccessControl',
'project_team': '{"projectNumber": "11111", "team": "owners"}',
'raw_bucket_acl': '{"kind": "storage#bucketAccessControl", "etag": "CAE=", "role": "OWNER", "projectTeam": {"projectNumber": "11111", "team": "owners"}, "bucket": "fakebucket1", "id": "fakebucket1/project-owners-11111", "selfLink": "https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-owners-11111", "entity": "project-owners-11111"}',
'role': 'OWNER'
},
{
'acl_id': 'fakebucket1/project-readers-11111',
'bucket': 'fakebucket1',
'bucket_acl_selflink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-readers-11111',
'domain': None,
'email': None,
'entity': 'project-readers-11111',
'entity_id': None,
'kind': 'storage#bucketAccessControl',
'project_team': '{"projectNumber": "11111", "team": "readers"}',
'raw_bucket_acl': '{"kind": "storage#bucketAccessControl", "etag": "CAE=", "role": "READER", "projectTeam": {"projectNumber": "11111", "team": "readers"}, "bucket": "fakebucket1", "id": "fakebucket1/project-readers-11111", "selfLink": "https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-readers-11111", "entity": "project-readers-11111"}',
'role': 'READER'
}]
FAKE_RAW_BUCKET_ROW = [
{
'bucket_id': 'bucket1',
'raw_bucket': """{
"acl": [
{"id": "bucket1/project-readers-1",
"role": "READER",
"bucket": "bucket1",
"domain": "",
"email": "",
"entity": "",
"entityId": "",
"kind": "",
"projectTeam": []
}
],
"id": "bucket1"
}"""
}
]
EXPECTED_RAW_BUCKET_JSON = [
{
'bucket_name': 'bucket1',
'acl': [
{'id': 'bucket1/project-readers-1',
'role': 'READER',
'bucket': 'bucket1',
'domain': '',
'email': '',
'entity': '',
'entityId': '',
'kind': '',
'projectTeam': [],
}
]
}
]
| 38.753425
| 367
| 0.554259
|
4a15da5318cf55007f5bcc67bd1205f2afe4ca11
| 400
|
py
|
Python
|
molsysmt/tools/string_pdb_id/to_openmm_Modeller.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tools/string_pdb_id/to_openmm_Modeller.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tools/string_pdb_id/to_openmm_Modeller.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
def to_openmm_Modeller(item, selection='all', model_indices='all', syntaxis='MolSysMT'):
from molsysmt.tools.string_pdb_id import is_string_pdb_id
from molsysmt.basic import convert
if not is_string_pdb_id(item):
raise ValueError
tmp_item = convert(item, to_form='openmm.Modeller', selection=selection, frame_indices=model_indices, syntaxis=syntaxis)
return tmp_item
| 30.769231
| 124
| 0.7625
|
4a15db1dc87ce151b5570098dce99c2487d5f2c4
| 1,882
|
py
|
Python
|
test/integration/test_soft_argmax2d.py
|
Manza12/kornia
|
580bbbffc771470445de27a7957d970b5a606172
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-03-24T12:43:02.000Z
|
2021-03-24T12:43:08.000Z
|
test/integration/test_soft_argmax2d.py
|
Manza12/kornia
|
580bbbffc771470445de27a7957d970b5a606172
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/integration/test_soft_argmax2d.py
|
Manza12/kornia
|
580bbbffc771470445de27a7957d970b5a606172
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import logging
import torch
import torch.nn as nn
import torch.optim as optim
from torch.testing import assert_allclose
import kornia
logger = logging.getLogger(__name__)
class TestIntegrationSoftArgmax2d:
# optimization
lr = 1e-3
num_iterations = 500
# data params
height = 240
width = 320
def generate_sample(self, base_target, std_val=1.0):
"""Generates a random sample around the given point.
The standard deviation is in pixel.
"""
noise = std_val * torch.rand_like(base_target)
return base_target + noise
def test_regression_2d(self, device):
# create the parameters to estimate: the heatmap
params = nn.Parameter(torch.rand(1, 1, self.height, self.width).to(device))
# generate base sample
target = torch.zeros(1, 1, 2).to(device)
target[..., 0] = self.width / 2
target[..., 1] = self.height / 2
# create the optimizer and pass the heatmap
optimizer = optim.Adam([params], lr=self.lr)
# loss criterion
criterion = nn.MSELoss()
# spatial soft-argmax2d module
soft_argmax2d = kornia.geometry.SpatialSoftArgmax2d(normalized_coordinates=False)
# NOTE: check where this comes from
temperature = (self.height * self.width) ** (0.5)
for iter_id in range(self.num_iterations):
x = params
sample = self.generate_sample(target).to(device)
pred = soft_argmax2d(temperature * x)
loss = criterion(pred, sample)
logger.debug("Loss: {0:.3f} Pred: {1}".format(loss.item(), pred))
optimizer.zero_grad()
loss.backward()
optimizer.step()
assert_allclose(pred[..., 0], target[..., 0], rtol=1e-2, atol=1e-2)
assert_allclose(pred[..., 1], target[..., 1], rtol=1e-2, atol=1e-2)
| 29.873016
| 89
| 0.619554
|
4a15dcf88a36573dd6f48087bdfd8496255b8fc0
| 413
|
py
|
Python
|
gquant/plugin_nodes/strategy/__init__.py
|
philtrade/gQuant
|
08b2a82a257c234b92f097b925f25cab16fd0926
|
[
"Apache-2.0"
] | 1
|
2021-07-09T14:49:08.000Z
|
2021-07-09T14:49:08.000Z
|
gquant/plugin_nodes/strategy/__init__.py
|
philtrade/gQuant
|
08b2a82a257c234b92f097b925f25cab16fd0926
|
[
"Apache-2.0"
] | null | null | null |
gquant/plugin_nodes/strategy/__init__.py
|
philtrade/gQuant
|
08b2a82a257c234b92f097b925f25cab16fd0926
|
[
"Apache-2.0"
] | 1
|
2021-03-22T19:54:38.000Z
|
2021-03-22T19:54:38.000Z
|
from .movingAverageStrategyNode import MovingAverageStrategyNode
from .portExpMovingAverageStrategyNode import (
PortExpMovingAverageStrategyNode, CpuPortExpMovingAverageStrategyNode)
from .xgboostStrategyNode import XGBoostStrategyNode
__all__ = ["MovingAverageStrategyNode",
"PortExpMovingAverageStrategyNode",
"CpuPortExpMovingAverageStrategyNode",
"XGBoostStrategyNode"]
| 41.3
| 74
| 0.811138
|
4a15dd3b12735f3199cc4475ce804b098fabc96f
| 836
|
py
|
Python
|
BlackDesert/CraftingCalcPrototype1.py
|
SystemNinja/MyPythonPrograms
|
6bdebb5017994c3431aea769319f702075fff9b9
|
[
"MIT"
] | null | null | null |
BlackDesert/CraftingCalcPrototype1.py
|
SystemNinja/MyPythonPrograms
|
6bdebb5017994c3431aea769319f702075fff9b9
|
[
"MIT"
] | null | null | null |
BlackDesert/CraftingCalcPrototype1.py
|
SystemNinja/MyPythonPrograms
|
6bdebb5017994c3431aea769319f702075fff9b9
|
[
"MIT"
] | null | null | null |
#****PROTOTYPE 1 - For option 1 from main program****
#Program that calculates how many resources are needed to craft items.
def nextHighest(n):
division = n%5
while division != 0:
n+=1
division = n%5
return n
material=int(input("Enter the ammount of beer that you want to produce:"))
#Use following code if the one with names as keys won't work or is being buggy
#beer_num = dict = {1 : 6, 2 : 5, 3 : 2, 4 : 1 }
beer_num = dict = {'water' : 6, 'grain' : 5, 'agent' : 2, 'sugar' : 1 }
beer_name = dict = {1:'Water', 2:'Grain', 3:'Agent', 4:'Sugar'}
mat_iterator=1
for ammount in beer_num:
total=beer[ammount]*material
print("The ammount of",mat_name[mat_iterator],"you need is:",total)
mat_iterator+=1
#input("Press enter to close.\n") #disabled during testing phase
| 38
| 79
| 0.636364
|
4a15dea87bfd58de369db36d01e63e9caf5fd002
| 1,767
|
py
|
Python
|
tests/time_res/test_ozone_Minutely.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/time_res/test_ozone_Minutely.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/time_res/test_ozone_Minutely.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
#get_ipython().magic('matplotlib inline')
b1 = tsds.load_ozone()
df = b1.mPastData
for k in [1 , 5]:
df[b1.mTimeVar + "_" + str(k) + '_PerMinute'] = pd.date_range('2000-1-1', periods=df.shape[0], freq=str(k) + 'min')
#df.to_csv("outputs/ozone_WDHMS.csv");
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
for k in [1 , 5]:
for timevar in [b1.mTimeVar + "_" + str(k) + '_PerMinute']:
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.set_active_autoregressions([]);
lEngine.train(df , timevar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = df.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_" + timevar + "apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[timevar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
# lEngine.standardPlots(name = "outputs/ozone_" + timevar)
| 29.949153
| 119
| 0.617431
|
4a15dec29b3a5dcbb71e128687a6bef75dc33605
| 9,854
|
py
|
Python
|
docs/source/conf.py
|
dabeaz/llvmlite
|
2521d7afb52c59f7121e2010b63dda9b70f96165
|
[
"BSD-2-Clause"
] | 2
|
2018-12-17T14:00:22.000Z
|
2020-01-11T05:49:28.000Z
|
docs/source/conf.py
|
dabeaz/llvmlite
|
2521d7afb52c59f7121e2010b63dda9b70f96165
|
[
"BSD-2-Clause"
] | null | null | null |
docs/source/conf.py
|
dabeaz/llvmlite
|
2521d7afb52c59f7121e2010b63dda9b70f96165
|
[
"BSD-2-Clause"
] | 2
|
2018-05-05T11:31:14.000Z
|
2021-12-21T22:23:21.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# llvmlite documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 29 14:18:42 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'llvmlite'
copyright = '2015, Continuum Analytics'
author = 'Continuum Analytics'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.19.0'
# The full version, including alpha/beta/rc tags.
release = '0.19.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
# only import and set the theme if we're building docs locally
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'llvmlitedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'llvmlite.tex', 'llvmlite Documentation',
'Continuum Analytics', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'llvmlite', 'llvmlite Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'llvmlite', 'llvmlite Documentation',
author, 'llvmlite', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'llvm': ('http://llvm.org/releases/3.8.0/docs', None),
}
| 32.202614
| 86
| 0.71778
|
4a15dfc5bdf1997d91bbece3de9093bc71cb8385
| 744
|
py
|
Python
|
floodsystem/risk.py
|
emilydarnell/Flood-warning-group-123
|
000ea2995588624269b3800fdfae194ebc109e99
|
[
"MIT"
] | 1
|
2022-01-22T15:19:18.000Z
|
2022-01-22T15:19:18.000Z
|
floodsystem/risk.py
|
emilydarnell/Flood-warning-group-123
|
000ea2995588624269b3800fdfae194ebc109e99
|
[
"MIT"
] | null | null | null |
floodsystem/risk.py
|
emilydarnell/Flood-warning-group-123
|
000ea2995588624269b3800fdfae194ebc109e99
|
[
"MIT"
] | null | null | null |
from floodsystem.datafetcher import fetch_measure_levels
import statistics
import datetime
def mean_level(station, days_back):
# given a particular station and a number of days, work out what the average relative level is over these days
# the easiest way is probably to average the actual water level over the last 'x' days
# then put it into the formula for relative water level which ill put below
dates, levels = fetch_measure_levels(
station.measure_id, dt=datetime.timedelta(days = days_back))
if len(levels) != 0:
mean = statistics.mean(levels)
relative_mean_level = (mean - station.typical_range[0])/(station.typical_range[1]-station.typical_range[0])
return relative_mean_level
| 49.6
| 115
| 0.74328
|
4a15dfd944e35f879b6736bf6e566a18944cd9a6
| 1,616
|
py
|
Python
|
XSum-Topic-ConvS2S/fairseq/optim/lr_scheduler/fixed_schedule.py
|
zsquaredz/XSum
|
10f2fac2e70801e7a3973c864b5a24b61d3f8bfe
|
[
"MIT"
] | 235
|
2018-11-26T16:53:27.000Z
|
2022-03-24T13:04:48.000Z
|
XSum-Topic-ConvS2S/fairseq/optim/lr_scheduler/fixed_schedule.py
|
zsquaredz/XSum
|
10f2fac2e70801e7a3973c864b5a24b61d3f8bfe
|
[
"MIT"
] | 24
|
2018-12-19T01:02:27.000Z
|
2022-01-16T07:47:36.000Z
|
XSum-Topic-ConvS2S/fairseq/optim/lr_scheduler/fixed_schedule.py
|
zsquaredz/XSum
|
10f2fac2e70801e7a3973c864b5a24b61d3f8bfe
|
[
"MIT"
] | 59
|
2018-12-07T18:57:05.000Z
|
2022-03-24T13:34:09.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.optim.lr_scheduler
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('fixed')
class FixedSchedule(FairseqLRScheduler):
"""Decay the LR on a fixed schedule."""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer.optimizer, self.anneal)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--force-anneal', '--fa', type=int, metavar='N',
help='force annealing at specified epoch')
def anneal(self, epoch):
lrs = self.args.lr
if self.args.force_anneal is None or epoch < self.args.force_anneal:
# use fixed LR schedule
next_lr = lrs[min(epoch, len(lrs) - 1)]
else:
# annneal based on lr_shrink
next_lr = lrs[-1] * self.args.lr_shrink ** (epoch + 1 - self.args.force_anneal)
return next_lr / lrs[0] # correct for scaling from LambdaLR
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
self.lr_scheduler.step(epoch)
return self.optimizer.get_lr()
| 37.581395
| 91
| 0.659035
|
4a15dfde7b84f4af969fddbd9fab00bd01cfb33d
| 10,584
|
py
|
Python
|
src/streetview/core.py
|
juliantrue/Streetview-Segmenting
|
337740e6ebd2284c880ace09a11032c5914b39a4
|
[
"MIT"
] | 1
|
2021-02-27T07:39:05.000Z
|
2021-02-27T07:39:05.000Z
|
src/streetview/core.py
|
juliantrue/Streetview-Segmenting
|
337740e6ebd2284c880ace09a11032c5914b39a4
|
[
"MIT"
] | null | null | null |
src/streetview/core.py
|
juliantrue/Streetview-Segmenting
|
337740e6ebd2284c880ace09a11032c5914b39a4
|
[
"MIT"
] | 1
|
2021-12-06T23:35:34.000Z
|
2021-12-06T23:35:34.000Z
|
import os, shutil, logging, math
from collections import OrderedDict
import requests
import cv2
from .logging_facility import LoggingWrapper
"""
Usage:
location1: type tuple: (lat1, lon1)
location2: type tuple: (lat2, lon2)
Based on Haversine formula found here:
https://en.wikipedia.org/wiki/Haversine_formula
returns: result: type float: distance in meters
"""
def delta_lat_lon_to_meters(location1, location2):
E_radius = 6378.137 # ~Earth's radius in kilometers
d_lat = (location2[0]*math.pi/180) - (location1[0]*math.pi/180)
d_lon = (location2[1]*math.pi/180) - (location1[1]*math.pi/180)
a = math.sin(d_lat/2)*math.sin(d_lat/2) + \
math.cos(location1[0]*math.pi / 180)*math.cos(location2[0]*math.pi / 180) * \
math.sin(d_lon/2)*math.sin(d_lon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R*c
return d * 1000
"""
Usage:
curr_location: type tuple (lat, lon
dx: type float: change in x in meters
dy: type float: change in y in meters
returns: type tuple: location(lat, lon)
"""
def meters_to_lat_lon(curr_location, dx, dy):
E_radius = 6378.137 # ~Earth's radius in kilometers
delta_lat = curr_location[0] + (dy / E_radius) * (180 / math.pi)
delta_lon = curr_location[1] + (dx / E_radius) * (180 / math.pi) / \
cos(curr_location[0] * math.pi/180)
return (new_lat, new_lon)
"""
Saving helper function for streamed data from requests
"""
def stream_save(r, directory, save_to_file):
try:
os.mkdir(directory)
except FileExistsError as e:
pass
with open('{}.png'.format(save_to_file), 'wb') as out_file:
shutil.copyfileobj(r.raw, out_file)
"""
Core functionality of the module
"""
class Core(object):
def __init__(self,logs_folder=None):
self.L = LoggingWrapper(log_folder_path=logs_folder)
self.logger = logging.getLogger('Streetview_Module')
self.logger.info("Streetview Module Initialized")
"""
Usage:
Pass in the base url on which to build the request on, followed by the API_KEY
and the signature if needed. The request builder then takes as many kwargs as
needed.
Returns Request string
"""
def request_builder(self, BASE_URL, API_KEY, kwargs, signature=None):
request = BASE_URL
for key in kwargs:
request += "{}={}&".format(key,kwargs[key])
request += "key={}".format(API_KEY)
if(not(signature is None)):
request += "&signature={}".format(signature)
return request
"""
Usage:
See request builder. Builds request for metadata.
Run this prior to sending image request to google servers. Confirms image
availability as well as request validation.
"""
def metadata_request_builder(self, BASE_URL, API_KEY, kwargs, signature=None):
request = BASE_URL
request = request[:-1] + "/" + "metadata?"
for key in kwargs:
request += "{}={}&".format(key,kwargs[key])
request += "key={}".format(API_KEY)
if(not(signature is None)):
request += "&signature={}".format(signature)
return request
"""
Usage:
Requires tuple of geographic coordinates in the format (lon,lat)
Returns:
List of images associated with that location unless save_to parameter is
defined
Example
location = (43.656009, -79.380354)
"""
def get_by_location(self, BASE_URL, API_KEY, location, save_to=None,
size=(600,400), outdoor_only=True, signature=None):
if(not(type(location) is tuple)):
raise Exception("\'location\' must be of type tuple.")
if(not(type(size) is tuple)):
raise Exception("\'size\' must be of type tuple.")
# Remove brackets from tuple input and convert to strings
size_s = str(size[0])+"x"+str(size[1])
loc_s = str(location)[1:][:-1]
headings = [0, 90, 180, 270] # N E W S
source_s = "outdoor" if outdoor_only else "default"
# Memory for images
imgs = []
user_repsonse = input("Are you sure you want to download {} images?(yes/no): ".format(len(headings)))
if(not(user_repsonse == "yes")):
raise Exception("User did not confirm image download.")
directory = "" # Placeholder for returned path to data
# Build kwargs in order
for heading in headings:
head_s = str(heading)
kwargs = OrderedDict([('size', size_s), ('location', loc_s),
('heading', head_s), ('source', source_s)])
self.logger.info("Kwargs: {}".format(kwargs))
# Request image metadata
meta_req = self.metadata_request_builder(BASE_URL, API_KEY, kwargs)
self.logger.info("Sending image metadata request: {}".format(meta_req))
meta_r = requests.get(meta_req)
response = meta_r.json()
if self.L.debug_mode:
self.logger.debug("Response: {}".format(meta_r.text))
if(not(str(response['status']) == "OK")):
# Make noise if response is not OK
raise Exception("Request status: {}".format(response['status']))
# Request for each cardinal direction heading
req = self.request_builder(BASE_URL, API_KEY, kwargs)
to_file = req.split("&")[1]+req.split("&")[2]
# Check if file already exists
exists = False
if not save_to == None:
directory = os.path.join(save_to,req.split("&")[1])
save_to_file = os.path.join(directory,to_file)
exists = os.path.isfile(save_to_file + ".png")
# If the file doesn't already exists, GET from API
if not exists:
self.logger.info("Sending image request: {}".format(req))
r = requests.get(req, stream=True)
# Save to file
stream_save(r,directory,save_to_file)
del r
# Save to temp file then to opencv img obj
else:
self.logger.info("Sending image request: {}".format(req))
r = requests.get(req, stream=True)
with open('./temp.png', 'wb') as out_file:
shutil.copyfileobj(r.raw, out_file)
del r
img = cv2.imread('./temp.png')
imgs.append(img)
os.remove('./temp.png')
if save_to == None:
return imgs
else:
return directory
"""
Usage:
Requires address in string format. Address may resemble a google maps query
or just the actual address.
Returns:
List of images associated with that address
Optional: return only the first n images by specifying n.
Example
search_string = "245 Church St, Toronto, ON M5B 2K3"
imgs = get_by_search(search_string, n=4)
"""
def get_by_search(self, BASE_URL, API_KEY, search_string, save_to=None,
size=(600,400), outdoor_only=True, signature=None):
if(not(type(search_string) is type("string"))):
raise Exception("\'location\' must be of type string.")
if(not(type(size) is tuple)):
raise Exception("\'size\' must be of type tuple.")
# Convert to strings
size_s = str(size[0])+"x"+str(size[1])
loc_s = search_string.replace(" ", "%20")
headings = [0, 90, 180, 270] # N E W S
source_s = "outdoor" if outdoor_only else "default"
# Memory for images
imgs = []
user_repsonse = input("Are you sure you want to download {} images?(yes/no): ".format(len(headings)))
if(not(user_repsonse == "yes")):
raise Exception("User did not confirm image download.")
directory = "" # Placeholder for returned path to data
# Build kwargs in order
for heading in headings:
head_s = str(heading)
kwargs = OrderedDict([('size', size_s), ('location', loc_s),
('heading', head_s), ('source', source_s)])
# Request image metadata
meta_req = self.metadata_request_builder(BASE_URL, API_KEY, kwargs)
self.logger.info("Sending image metadata request: {}".format(meta_req))
meta_r = requests.get(meta_req)
response = meta_r.json()
if self.L.debug_mode:
self.logger.debug("Response: {}".format(meta_r.text))
if(not(str(response['status']) == "OK")):
raise Exception("Request status: {}".format(response['status']))
# Request for each cardinal direction heading
req = self.request_builder(BASE_URL, API_KEY, kwargs)
to_file = req.split("&")[1]+ req.split("&")[2]
# Check if file already exists
exists = False
if not save_to == None:
directory = os.path.join(save_to,req.split("&")[1])
save_to_file = os.path.join(directory,to_file)
exists = os.path.isfile(save_to_file + ".png")
# If the file doesn't already exists, GET from API
if not exists:
self.logger.info("Sending image request: {}".format(req))
r = requests.get(req, stream=True)
# Save to file
stream_save(r,directory,save_to_file)
del r
# Save to temp file then to opencv img obj
else:
self.logger.info("Sending image request: {}".format(req))
r = requests.get(req, stream=True)
with open('./temp.png', 'wb') as out_file:
shutil.copyfileobj(r.raw, out_file)
del r
img = cv2.imread('./temp.png')
imgs.append(img)
os.remove('./temp.png')
if save_to == None:
return imgs
else:
return directory
"""
Usage:
Requires:
base_url
API_KEY
location: tuple of geographic coordinates in the format (lon,lat)
radius: radius in metres around location center to get images from
Returns:
ALL available image in the given radius
Example
location = (43.656009, -79.380354), radius = 10
"""
def get_all_in_area(self, BASE_URL, API_KEY, location, radius, save_to=None,
size=(600,400), outdoor_only=True, signature=None):
pass
| 37.136842
| 109
| 0.583239
|
4a15e0f62a3d1a02d55fc88642279f23cf0bbeb1
| 3,402
|
py
|
Python
|
S4/S4 Library/simulation/visualization/portal_visualizer.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | 1
|
2021-05-20T19:33:37.000Z
|
2021-05-20T19:33:37.000Z
|
S4/S4 Library/simulation/visualization/portal_visualizer.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
S4/S4 Library/simulation/visualization/portal_visualizer.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
from build_buy import register_build_buy_exit_callback, unregister_build_buy_exit_callback
from debugvis import Context
from sims4 import commands
from sims4.color import Color
import services
import sims4.log
logger = sims4.log.Logger('Debugvis')
class PortalVisualizer:
def __init__(self, layer, portal_obj_id=0, portal_id=0):
self.layer = layer
self.portal_obj_id = portal_obj_id
self.portal_id = portal_id
self._start()
def _start(self):
object_manager = services.object_manager()
object_manager.register_portal_added_callback(self._draw_portal_obj)
object_manager.register_portal_removed_callback(self._on_portal_removed)
register_build_buy_exit_callback(self._draw_all_portals)
if self.portal_obj_id:
obj = services.object_manager().get(self.portal_obj_id)
if obj is not None:
obj.register_on_location_changed(self._draw_portal_obj)
self._draw_all_portals()
def stop(self):
object_manager = services.object_manager()
object_manager.unregister_portal_added_callback(self._draw_portal_obj)
object_manager.unregister_portal_removed_callback(self._on_portal_removed)
unregister_build_buy_exit_callback(self._draw_all_portals)
if self.portal_obj_id:
obj = services.object_manager().get(self.portal_obj_id)
if obj is not None:
obj.unregister_on_location_changed(self._draw_portal_obj)
def _draw_portal_pair(self, portal_instance, portal_id, layer, color_entry, color_exit, height, detail):
(p_entry, p_exit) = portal_instance.get_portal_locations(portal_id)
layer.add_arch(p_entry, p_exit, height=height, detail=detail, color_a=color_entry, color_b=color_exit)
def _draw_portal_obj(self, portal_obj, *args, portal_id=0, **kwargs):
with Context(self.layer, preserve=True) as layer:
for portal_instance in portal_obj.get_portal_instances():
if portal_id and not portal_id == portal_instance.there and not portal_id == portal_instance.back:
continue
if portal_instance.there is not None:
self._draw_portal_pair(portal_instance, portal_instance.there, layer, Color.CYAN, Color.MAGENTA, 6.0, 6)
if portal_instance.back is not None:
self._draw_portal_pair(portal_instance, portal_instance.back, layer, Color.GREEN, Color.ORANGE, 4.0, 6)
def _on_portal_removed(self, portal_obj):
if self.portal_obj_id and portal_obj.id == self.portal_id:
full_command = 'debugvis.portals.stop' + ' {}'.format(self.portal_obj_id)
client_id = services.client_manager().get_first_client_id()
commands.execute(full_command, client_id)
else:
self._draw_all_portals()
def _draw_all_portals(self, *_, **__):
object_manager = services.object_manager()
with Context(self.layer, preserve=True) as context:
context.layer.clear()
if self.portal_obj_id:
portal_obj = object_manager.get(self.portal_obj_id)
if portal_obj is not None:
self._draw_portal_obj(portal_obj, portal_id=self.portal_id)
return
for obj in object_manager.portal_cache_gen():
self._draw_portal_obj(obj, portal_id=0)
| 47.915493
| 124
| 0.698707
|
4a15e30e8a5545c3cb4de655f8e75e59fe938833
| 772
|
py
|
Python
|
tests/api_tests/conftest.py
|
JobtechSwe/castaway
|
e0917511b20152f0bd7e2802b73a0beae30a96f5
|
[
"Apache-2.0"
] | null | null | null |
tests/api_tests/conftest.py
|
JobtechSwe/castaway
|
e0917511b20152f0bd7e2802b73a0beae30a96f5
|
[
"Apache-2.0"
] | null | null | null |
tests/api_tests/conftest.py
|
JobtechSwe/castaway
|
e0917511b20152f0bd7e2802b73a0beae30a96f5
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
import requests
import tests.test_resources.settings as settings
@pytest.fixture
def session():
"""
creates a Session object which will persist over the entire test run ("session").
http connections will be reused (higher performance, less resource usage)
Returns a Session object
"""
s = requests.sessions.Session()
s.headers.update(settings.headers_search)
return s
@pytest.fixture
def session_stream():
"""
creates a Session object which will persist over the entire test run ("session").
http connections will be reused (higher performance, less resource usage)
Returns a Session object
"""
s = requests.sessions.Session()
s.headers.update(settings.headers_stream)
return s
| 24.903226
| 85
| 0.720207
|
4a15e4a20037ccf4f202fc1ecb1502f292b26744
| 13,665
|
py
|
Python
|
darknet.py
|
TAMU-VITA/3D_Adversarial_Logo
|
c96b6e769fb2f4a5dd7bf06eb9f2b9d82ede3990
|
[
"MIT"
] | 11
|
2020-06-25T00:14:08.000Z
|
2020-08-06T18:23:29.000Z
|
darknet.py
|
bit-twidd1er/adversarial-yolo-snapshot
|
8f77b313489d05e1f1a5a28d311c6e4b05d06bd5
|
[
"MIT"
] | 1
|
2021-04-16T15:20:07.000Z
|
2022-03-11T02:23:06.000Z
|
darknet.py
|
bit-twidd1er/adversarial-yolo-snapshot
|
8f77b313489d05e1f1a5a28d311c6e4b05d06bd5
|
[
"MIT"
] | 7
|
2019-11-27T09:13:05.000Z
|
2022-02-22T12:34:17.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from region_loss import RegionLoss
from cfg import *
class MaxPoolStride1(nn.Module):
def __init__(self):
super(MaxPoolStride1, self).__init__()
def forward(self, x):
x = F.max_pool2d(F.pad(x, (0,1,0,1), mode='replicate'), 2, stride=1)
return x
class Reorg(nn.Module):
def __init__(self, stride=2):
super(Reorg, self).__init__()
self.stride = stride
def forward(self, x):
stride = self.stride
assert(x.data.dim() == 4)
B = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
assert(H % stride == 0)
assert(W % stride == 0)
ws = stride
hs = stride
#Simen: edited as suggested here: https://github.com/marvis/pytorch-yolo2/issues/129#issue-350726531
#x = x.view(B, C, H/hs, hs, W/ws, ws).transpose(3,4).contiguous()
#x = x.view(B, C, H/hs*W/ws, hs*ws).transpose(2,3).contiguous()
#x = x.view(B, C, hs*ws, H/hs, W/ws).transpose(1,2).contiguous()
#x = x.view(B, hs*ws*C, H/hs, W/ws)
x = x.view(B, C, H//hs, hs, W//ws, ws).transpose(3,4).contiguous()
x = x.view(B, C, H//hs*W//ws, hs*ws).transpose(2,3).contiguous()
x = x.view(B, C, hs*ws, H//hs, W//ws).transpose(1,2).contiguous()
x = x.view(B, hs*ws*C, H//hs, W//ws)
return x
class GlobalAvgPool2d(nn.Module):
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
N = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
x = F.avg_pool2d(x, (H, W))
x = x.view(N, C)
return x
# for route and shortcut
class EmptyModule(nn.Module):
def __init__(self):
super(EmptyModule, self).__init__()
def forward(self, x):
return x
# support route shortcut and reorg
class Darknet(nn.Module):
def __init__(self, cfgfile):
super(Darknet, self).__init__()
self.blocks = parse_cfg(cfgfile)
self.models = self.create_network(self.blocks) # merge conv, bn,leaky
self.loss = self.models[len(self.models)-1]
self.width = int(self.blocks[0]['width'])
self.height = int(self.blocks[0]['height'])
if self.blocks[(len(self.blocks)-1)]['type'] == 'region':
self.anchors = self.loss.anchors
self.num_anchors = self.loss.num_anchors
self.anchor_step = self.loss.anchor_step
self.num_classes = self.loss.num_classes
self.header = torch.IntTensor([0,0,0,0])
self.seen = 0
def forward(self, x):
ind = -2
self.loss = None
outputs = dict()
for block in self.blocks:
ind = ind + 1
#if ind > 0:
# return x
if block['type'] == 'net':
continue
elif block['type'] == 'convolutional' or block['type'] == 'maxpool' or block['type'] == 'reorg' or block['type'] == 'avgpool' or block['type'] == 'softmax' or block['type'] == 'connected':
x = self.models[ind](x)
outputs[ind] = x
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
x = outputs[layers[0]]
outputs[ind] = x
elif len(layers) == 2:
x1 = outputs[layers[0]]
x2 = outputs[layers[1]]
x = torch.cat((x1,x2),1)
outputs[ind] = x
elif block['type'] == 'shortcut':
from_layer = int(block['from'])
activation = block['activation']
from_layer = from_layer if from_layer > 0 else from_layer + ind
x1 = outputs[from_layer]
x2 = outputs[ind-1]
x = x1 + x2
if activation == 'leaky':
x = F.leaky_relu(x, 0.1, inplace=True)
elif activation == 'relu':
x = F.relu(x, inplace=True)
outputs[ind] = x
elif block['type'] == 'region':
continue
if self.loss:
self.loss = self.loss + self.models[ind](x)
else:
self.loss = self.models[ind](x)
outputs[ind] = None
elif block['type'] == 'cost':
continue
else:
print('unknown type %s' % (block['type']))
return x
def print_network(self):
print_cfg(self.blocks)
def create_network(self, blocks):
models = nn.ModuleList()
prev_filters = 3
out_filters =[]
conv_id = 0
for block in blocks:
if block['type'] == 'net':
prev_filters = int(block['channels'])
continue
elif block['type'] == 'convolutional':
conv_id = conv_id + 1
batch_normalize = int(block['batch_normalize'])
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
#Simen: edit as sugessted here: https://github.com/marvis/pytorch-yolo2/issues/129#issue-350726531
#pad = (kernel_size-1)/2 if is_pad else 0
pad = (kernel_size-1)//2 if is_pad else 0
activation = block['activation']
model = nn.Sequential()
if batch_normalize:
model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False))
model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters))
#model.add_module('bn{0}'.format(conv_id), BN2d(filters))
else:
model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad))
if activation == 'leaky':
model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True))
elif activation == 'relu':
model.add_module('relu{0}'.format(conv_id), nn.ReLU(inplace=True))
prev_filters = filters
out_filters.append(prev_filters)
models.append(model)
elif block['type'] == 'maxpool':
pool_size = int(block['size'])
stride = int(block['stride'])
if stride > 1:
model = nn.MaxPool2d(pool_size, stride)
else:
model = MaxPoolStride1()
out_filters.append(prev_filters)
models.append(model)
elif block['type'] == 'avgpool':
model = GlobalAvgPool2d()
out_filters.append(prev_filters)
models.append(model)
elif block['type'] == 'softmax':
model = nn.Softmax()
out_filters.append(prev_filters)
models.append(model)
elif block['type'] == 'cost':
if block['_type'] == 'sse':
model = nn.MSELoss(size_average=True)
elif block['_type'] == 'L1':
model = nn.L1Loss(size_average=True)
elif block['_type'] == 'smooth':
model = nn.SmoothL1Loss(size_average=True)
out_filters.append(1)
models.append(model)
elif block['type'] == 'reorg':
stride = int(block['stride'])
prev_filters = stride * stride * prev_filters
out_filters.append(prev_filters)
models.append(Reorg(stride))
elif block['type'] == 'route':
layers = block['layers'].split(',')
ind = len(models)
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
prev_filters = out_filters[layers[0]]
elif len(layers) == 2:
assert(layers[0] == ind - 1)
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
out_filters.append(prev_filters)
models.append(EmptyModule())
elif block['type'] == 'shortcut':
ind = len(models)
prev_filters = out_filters[ind-1]
out_filters.append(prev_filters)
models.append(EmptyModule())
elif block['type'] == 'connected':
filters = int(block['output'])
if block['activation'] == 'linear':
model = nn.Linear(prev_filters, filters)
elif block['activation'] == 'leaky':
model = nn.Sequential(
nn.Linear(prev_filters, filters),
nn.LeakyReLU(0.1, inplace=True))
elif block['activation'] == 'relu':
model = nn.Sequential(
nn.Linear(prev_filters, filters),
nn.ReLU(inplace=True))
prev_filters = filters
out_filters.append(prev_filters)
models.append(model)
elif block['type'] == 'region':
loss = RegionLoss()
anchors = block['anchors'].split(',')
loss.anchors = [float(i) for i in anchors]
loss.num_classes = int(block['classes'])
loss.num_anchors = int(block['num'])
loss.anchor_step = len(loss.anchors)/loss.num_anchors
loss.object_scale = float(block['object_scale'])
loss.noobject_scale = float(block['noobject_scale'])
loss.class_scale = float(block['class_scale'])
loss.coord_scale = float(block['coord_scale'])
out_filters.append(prev_filters)
models.append(loss)
else:
print('unknown type %s' % (block['type']))
return models
def load_weights(self, weightfile):
fp = open(weightfile, 'rb')
header = np.fromfile(fp, count=4, dtype=np.int32)
self.header = torch.from_numpy(header)
self.seen = self.header[3]
buf = np.fromfile(fp, dtype = np.float32)
fp.close()
start = 0
ind = -2
for block in self.blocks:
if start >= buf.size:
break
ind = ind + 1
if block['type'] == 'net':
continue
elif block['type'] == 'convolutional':
model = self.models[ind]
batch_normalize = int(block['batch_normalize'])
if batch_normalize:
start = load_conv_bn(buf, start, model[0], model[1])
else:
start = load_conv(buf, start, model[0])
elif block['type'] == 'connected':
model = self.models[ind]
if block['activation'] != 'linear':
start = load_fc(buf, start, model[0])
else:
start = load_fc(buf, start, model)
elif block['type'] == 'maxpool':
pass
elif block['type'] == 'reorg':
pass
elif block['type'] == 'route':
pass
elif block['type'] == 'shortcut':
pass
elif block['type'] == 'region':
pass
elif block['type'] == 'avgpool':
pass
elif block['type'] == 'softmax':
pass
elif block['type'] == 'cost':
pass
else:
print('unknown type %s' % (block['type']))
def save_weights(self, outfile, cutoff=0):
if cutoff <= 0:
cutoff = len(self.blocks)-1
fp = open(outfile, 'wb')
self.header[3] = self.seen
header = self.header
header.numpy().tofile(fp)
ind = -1
for blockId in range(1, cutoff+1):
ind = ind + 1
block = self.blocks[blockId]
if block['type'] == 'convolutional':
model = self.models[ind]
batch_normalize = int(block['batch_normalize'])
if batch_normalize:
save_conv_bn(fp, model[0], model[1])
else:
save_conv(fp, model[0])
elif block['type'] == 'connected':
model = self.models[ind]
if block['activation'] != 'linear':
save_fc(fc, model)
else:
save_fc(fc, model[0])
elif block['type'] == 'maxpool':
pass
elif block['type'] == 'reorg':
pass
elif block['type'] == 'route':
pass
elif block['type'] == 'shortcut':
pass
elif block['type'] == 'region':
pass
elif block['type'] == 'avgpool':
pass
elif block['type'] == 'softmax':
pass
elif block['type'] == 'cost':
pass
else:
print('unknown type %s' % (block['type']))
fp.close()
| 39.83965
| 200
| 0.484376
|
4a15e4c15324eaf081f48978013234f10610c5cb
| 1,764
|
py
|
Python
|
app.py
|
ARNAV-GHATE/TrashNet
|
abef8d1e1dcef06d1e8ed8eb6c2f3af7384323b0
|
[
"MIT"
] | null | null | null |
app.py
|
ARNAV-GHATE/TrashNet
|
abef8d1e1dcef06d1e8ed8eb6c2f3af7384323b0
|
[
"MIT"
] | null | null | null |
app.py
|
ARNAV-GHATE/TrashNet
|
abef8d1e1dcef06d1e8ed8eb6c2f3af7384323b0
|
[
"MIT"
] | null | null | null |
#from flask import Flask
#from flask import render_template
#from flask import request
#from PIL import Image
from prediction import *
import os
import cv2
from flask import Flask, render_template, request,jsonify
from PIL import Image
import tensorflow as tf
#import jinja2
app=Flask(__name__)
#app.jinja_env.line_statement_prefix = '%'
UPLOAD_FOLDER = os.path.basename('.')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/", methods=['GET', 'POST'])
def application():
file=""
answer = None
error=""
if request.method=="POST":
try:
file= request.files["image"]
#if file.strip()=="":
# error="Pl. upload an Image"
if file:
#upload
f = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(f)
print(file.filename)
#read
#image = cv2.imread(UPLOAD_FOLDER+"/"+file.filename)
#filename = "{}.png".format(os.getpid())
#cv2.imwrite(filename, image)
#print(filename)
# Deleting from path after uploading
result=predict(file.filename)
#os.remove(filename)
if result=="":
error="Sorry!"
except(SyntaxError) as e:
error ="Could not understand"
print("Error:" + str(e))
try:
if result!="Sorry!":
answer=result
except Exception as e:
print(e)
return render_template('index.html', file=file,
answer=answer, error=error)
if __name__ == "__main__":
app.run(debug=True)
| 28.451613
| 76
| 0.535147
|
4a15e5fc654fe86787527229b34409978d67069f
| 4,343
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
unifycoin/unifycoin
|
7d0d5245610daab81e8b124c9b4dc03a73020b8f
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
unifycoin/unifycoin
|
7d0d5245610daab81e8b124c9b4dc03a73020b8f
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
unifycoin/unifycoin
|
7d0d5245610daab81e8b124c9b4dc03a73020b8f
|
[
"MIT"
] | 2
|
2019-06-28T12:47:30.000Z
|
2019-12-16T04:56:50.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the unifycoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19335)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.244604
| 98
| 0.579784
|
4a15e672990b7081f8d6fe19ac03f5fb02171bd8
| 103
|
py
|
Python
|
bitmovin_api_sdk/encoding/inputs/akamai_netstorage/customdata/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/inputs/akamai_netstorage/customdata/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/inputs/akamai_netstorage/customdata/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.inputs.akamai_netstorage.customdata.customdata_api import CustomdataApi
| 51.5
| 102
| 0.912621
|
4a15e68fedb6cd1acb013624ecf9816eade18b13
| 68
|
py
|
Python
|
setup.py
|
adam-of-barot/dgws-api
|
0ecd2877531e69fc9c04afedba5141e34fd71a95
|
[
"MIT"
] | 1
|
2022-01-02T12:11:30.000Z
|
2022-01-02T12:11:30.000Z
|
setup.py
|
adam-of-barot/dgws-api
|
0ecd2877531e69fc9c04afedba5141e34fd71a95
|
[
"MIT"
] | null | null | null |
setup.py
|
adam-of-barot/dgws-api
|
0ecd2877531e69fc9c04afedba5141e34fd71a95
|
[
"MIT"
] | null | null | null |
import setuptools
if __name__ == "__main__":
setuptools.setup()
| 17
| 26
| 0.720588
|
4a15e6a95eb281b6a1f6984aef0a07d502bdc224
| 263
|
py
|
Python
|
core-python-robust-resource-and-error-handling/exception_chaining/explicit_chaining/chaining.py
|
hassonor/core-python
|
92672aa72c1474061df5247a2dd4dfd9fab1642a
|
[
"MIT"
] | 1
|
2022-03-09T20:58:33.000Z
|
2022-03-09T20:58:33.000Z
|
core-python-robust-resource-and-error-handling/exception_chaining/explicit_chaining/chaining.py
|
hassonor/core-python
|
92672aa72c1474061df5247a2dd4dfd9fab1642a
|
[
"MIT"
] | null | null | null |
core-python-robust-resource-and-error-handling/exception_chaining/explicit_chaining/chaining.py
|
hassonor/core-python
|
92672aa72c1474061df5247a2dd4dfd9fab1642a
|
[
"MIT"
] | null | null | null |
import math
class InclinationError(Exception):
pass
def inclination(dx, dy):
try:
return math.degrees(math.atan(dy / dx))
except ZeroDivisionError as e:
raise InclinationError("Slope cannot be vertical") from e
inclination(0, 5)
| 16.4375
| 65
| 0.684411
|
4a15e6c9f57ba152a330bd0c6a399e19bb4b35a1
| 223
|
py
|
Python
|
ymidi/io/__init__.py
|
Owen-Cochell/yapmidi
|
50a3a1800375a3f390eb40628387fb3d2a520b8e
|
[
"MIT"
] | null | null | null |
ymidi/io/__init__.py
|
Owen-Cochell/yapmidi
|
50a3a1800375a3f390eb40628387fb3d2a520b8e
|
[
"MIT"
] | null | null | null |
ymidi/io/__init__.py
|
Owen-Cochell/yapmidi
|
50a3a1800375a3f390eb40628387fb3d2a520b8e
|
[
"MIT"
] | null | null | null |
"""
This submodule contains I/O components of yap-midi.
Each section specializes in inputting and outputting
MIDI data to a certain location.
For example,
the alsaio class gets and outputs MIDI data to the ALSA daemon
"""
| 27.875
| 62
| 0.784753
|
4a15e6cc9a08cffed8416ed6e99520413ded73fa
| 25,732
|
py
|
Python
|
utility.py
|
jocelyngate38/photobooth-software
|
db103a5eeb34f5e85faf8ac17709be021f848d42
|
[
"MIT"
] | null | null | null |
utility.py
|
jocelyngate38/photobooth-software
|
db103a5eeb34f5e85faf8ac17709be021f848d42
|
[
"MIT"
] | null | null | null |
utility.py
|
jocelyngate38/photobooth-software
|
db103a5eeb34f5e85faf8ac17709be021f848d42
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from PyQt5.QtCore import (QUrl, QFile, QFileInfo, QPoint, QRect, QRectF, QSettings, QSize, QPointF,
Qt, QTextStream, QThread, pyqtSignal, pyqtSlot, QTimer, QDateTime, QIODevice, QElapsedTimer)
from PyQt5.QtGui import (QIcon, QKeySequence, QFont, QBrush, QPixmap, QPainter, QPen, QColor, QPainterPath, \
QDesktopServices, QFontMetrics)
from PyQt5.QtWidgets import (QMenu, QAction, QLabel, QApplication, QMainWindow, QDialog, QProgressBar, QLabel,
QHBoxLayout, QVBoxLayout, QLineEdit, QPushButton, QGridLayout, QGroupBox, QComboBox,
QSpacerItem, QSizePolicy, QInputDialog)
import time
import threading, time
from random import randint
from subprocess import call
import subprocess
from datetime import datetime
import random
import shutil
import os
from shutil import copyfile
from enum import Enum
import math
from ressourceManager import *
import glob
import sys
class Assembly():
def __init__(self, input, output):
self.resources = ressourcesManager()
self.resources.loadCurrentXmlSkinDescriptor()
if input != "":
self.resources.setPath(ressourcesManager.PATH.CAPTURE_USB, input)
if output != "":
self.resources.setPath(ressourcesManager.PATH.ASSEMBLIES_USB, output)
def redoAssemblies(self, all):
mylist = [f for f in glob.glob(self.resources.getPath(ressourcesManager.PATH.CAPTURE_USB) + "/*.jpg")]
for files in mylist:
# print(files)
if os.path.isfile(files):
basename = os.path.basename(files)
basename = basename.replace("_0.jpg", "", 1)
basename = basename.replace("_1.jpg", "", 1)
basename = basename.replace("_2.jpg", "", 1)
basename = basename.replace("_3.jpg", "", 1)
mylist1 = [ff for ff in
glob.glob(self.resources.getPath(ressourcesManager.PATH.CAPTURE_USB) + "/" + basename + "*")]
n = len(mylist1)
dir_path = os.path.dirname(os.path.realpath(files))
for i in range(n):
os.rename(mylist1[i], dir_path + "\\" + basename + "_" + str(int(i)) + ".jpg")
if all == False:
self.resources.buildShuttleAssembly(
self.resources.getPath(ressourcesManager.PATH.CAPTURE_USB) + "\\" + basename, n)
else:
self.resources.buildAvailableAssemblies(
self.resources.getPath(ressourcesManager.PATH.CAPTURE_USB) + "\\" + basename, n)
def redoAssemblies1Pict(self):
mylist = [f for f in glob.glob(self.resources.getPath(ressourcesManager.PATH.CAPTURE_USB) + "/*.jpg")]
i = 0
for files in mylist:
i += 1
# print(files)
dir_path = os.path.dirname(os.path.realpath(files))
if os.path.isfile(files):
basename = os.path.basename(files)
os.rename(files, dir_path + "\\file" + str(i) + "_0.jpg")
self.redoAssembliest()
def redoAssembliest(self):
mylist = [f for f in glob.glob(self.resources.getPath(ressourcesManager.PATH.CAPTURE_USB) + "/*.jpg")]
for files in mylist:
if os.path.isfile(files):
basename = os.path.basename(files)
basename = basename.replace("_0.jpg", "", 1)
self.resources.buildAvailableAssemblies(
self.resources.getPath(ressourcesManager.PATH.CAPTURE_USB) + "\\" + basename, 1)
class skinBuilder():
def __init__(self):
self.resources = None
self.baseSkinTemplate = "D:/photobooth/trunk/external/skin/halloween/templates"
def setDescriptorFolder(self, path):
self.baseSkinTemplate = path
def init(self):
self.resources = ressourcesManager()
self.resources.loadXmlSkinGeneratorDescriptor(self.baseSkinTemplate)
def createHierarchy(self):
# print("createHierarchy")
self.generationPath = "../external/skin/chalk/testGene/tmp"
if not os.path.exists(self.generationPath):
os.makedirs(self.generationPath)
if not os.path.exists(self.generationPath + "/layouts/"):
os.makedirs(self.generationPath + "/layouts/")
if not os.path.exists(self.generationPath + "/pages/"):
os.makedirs(self.generationPath + "/pages/")
def setOutpuFolder(self, folder):
self.currentOutputFolder = folder
if not os.path.exists(self.currentOutputFolder):
os.makedirs(self.currentOutputFolder)
if not os.path.exists(self.currentOutputFolder + "/layouts/"):
os.makedirs(self.currentOutputFolder + "/layouts/")
if not os.path.exists(self.currentOutputFolder + "/pages/"):
os.makedirs(self.currentOutputFolder + "/pages/")
def copyLayouts(self):
# print("copyLayouts")
source = [s for s in os.listdir(self.baseSkinTemplate + "/layouts/") if s.endswith('.png')]
destination = self.currentOutputFolder + "/layouts/"
for files in source:
# print(files)
shutil.copy(self.baseSkinTemplate + "/layouts/" + files, destination)
def copyPages(self):
# print("copyPages")
source = [s for s in os.listdir(self.baseSkinTemplate + "/pages/") if s.endswith('.png')]
destination = self.currentOutputFolder + "/pages/"
for files in source:
# print(files)
shutil.copy(self.baseSkinTemplate + "/pages/" + files, destination)
def copyDescriptor(self):
# print("copyDescriptor")
source = self.baseSkinTemplate + "/descriptor.xml"
destination = self.currentOutputFolder + "/"
shutil.copy(source, destination)
def copyFiles(self):
self.copyLayouts()
self.copyPages()
self.copyDescriptor()
def flattenSubtheme(self, copyright):
# print("flattenSubtheme")
generatorLayoutDatas = self.resources.skinGeneratorLayoutDatas
generatorPageDatas = self.resources.skinGeneratorPagesDatas
for lay in generatorLayoutDatas:
for i in range(len(lay)):
fileAA = lay[i]['template']
fileBB = self.choosenSkinTheme[0] + "/" + fileAA
outFile = self.generationPath + "/layouts/" + fileAA
fileA = self.baseSkinTemplate + "/layouts/" + fileAA
fileB = self.baseSkinTemplate + "/layouts/" + fileBB
self.flattenFiles(fileA, fileB, outFile, lay[i], copyright)
self.createOverlayFile(generatorPageDatas, copyright)
def buildSkinInteractively(self):
generatorLayoutDatas = self.resources.skinGeneratorLayoutDatas
generatorPageDatas = self.resources.skinGeneratorPagesDatas
builder = dialogSkinPreviewBuilder(self.currentOutputFolder)
for lay in generatorLayoutDatas:
for i in range(len(lay)):
cLay = lay[i]
print(cLay)
dbuilder = dialogSkinBuilder(self.currentOutputFolder + "/layouts", cLay["template"],
self.baseSkinTemplate + "/layouts")
for ii in range(1, len(cLay["messages"]) + 1):
if cLay['messages'][ii]["type"] == "cubic":
dbuilder.addTextInput(cLay["messages"][ii])
builder.addBuilder(dbuilder)
for lay in generatorPageDatas:
cLay = lay
dbuilder = dialogSkinBuilder(self.currentOutputFolder + "/pages", cLay["filename"],
self.baseSkinTemplate + "/pages/")
for ii in range(1, len(cLay["messages"]) + 1):
if cLay['messages'][ii]["type"] == "cubic":
dbuilder.addTextInput(cLay["messages"][ii])
builder.addBuilder(dbuilder)
for t in self.resources.skinGeneratorThemes:
builder.addSubThemes(t[0], t[1])
builder.arrangeLayout()
builder.exec()
def createOverlayFile(self, generatorPageDatas, copyright):
# print(generatorPageDatas)
for ol in generatorPageDatas:
base = QPixmap(self.currentOutputFolder + "/pages/" + ol["filename"])
painter = QPainter(base)
painter.setRenderHint(QPainter.Antialiasing)
painter.setPen(QColor(0, 0, 0))
for ii in range(1, len(ol["messages"]) + 1):
if ol['messages'][ii]["type"] == "cubic":
self.drawTextAlongCubic(ol["messages"][ii], painter, ol["filename"])
f = self.resources.savePicture(base, self.currentOutputFolder + "/pages/" + ol["filename"], 0, 0, "JPG")
del painter
def flattenFiles(self, fileA, fileB, output, lay, copyright):
overLay = QPixmap(fileB)
outPixmap = QPixmap(fileA)
painter = QPainter(outPixmap)
painter.setRenderHint(QPainter.Antialiasing)
painter.drawPixmap(0, 0, overLay)
painter.setPen(QColor(255, 255, 255))
# font = QFont('Arial', fs)
for ii in range(1, len(lay["messages"]) + 1):
if lay['messages'][ii]["type"] == "cubic":
if QFileInfo(fileA).fileName() == lay['template']:
self.drawTextAlongCubic(lay["messages"][ii], painter, lay['template'])
if copyright == True:
if lay["landscape"] == 0:
overLayCopyright = QPixmap(self.baseSkinTemplate + "/copyrightPortrait.png")
painter.drawPixmap(0, 0, overLayCopyright)
else:
overLayCopyright = QPixmap(self.baseSkinTemplate + "/copyrightLandscape.png")
painter.drawPixmap(0, 0, overLayCopyright)
f = self.resources.savePicture(outPixmap, output, 0, 0, "JPG")
del painter
return f
def drawTextAlongCubic(self, lay, painter, filename):
fs = lay["defaultFontSize"]
font = QFont('Right Chalk', fs)
defaultMessage = lay["defaultMessage"]
c1 = QPointF(lay["x1"], lay["y1"])
c2 = QPointF(lay["x2"], lay["y2"])
c3 = QPointF(lay["x3"], lay["y3"])
c4 = QPointF(lay["x4"], lay["y4"])
path = QPainterPath(c1)
path.cubicTo(c2, c3, c4)
# painter.drawPath(path)
pathLength = path.length()
textMetricLength = QFontMetrics(font).width(defaultMessage)
fsn = int(fs * pathLength / (textMetricLength) * .95)
if fsn > 70:
fsn = 70
font = QFont('Right Chalk', fsn)
textMetricLength = QFontMetrics(font).width(defaultMessage)
messageSpacing = []
defaultMessageM = []
sumMessageSpacing = 0.0
for i in range(len(defaultMessage)):
messageSpacing.append(QFontMetrics(font).width(defaultMessage[i]))
sumMessageSpacing += messageSpacing[i]
for i in range(len(defaultMessage)):
messageSpacing[i] = messageSpacing[i] / sumMessageSpacing
steps = 0
painter.setFont(font)
for i in range(len(defaultMessage)):
steps += messageSpacing[i] / 2
point = QPointF(path.pointAtPercent(steps))
angle = path.angleAtPercent(steps)
painter.save()
painter.translate(point)
painter.rotate(-angle)
x = -QFontMetrics(font).width(defaultMessage[i]) / 2
y = -QFontMetrics(font).height() / 2
w = QFontMetrics(font).width(defaultMessage[i])
h = QFontMetrics(font).height()
r = QRectF(x, y, w, h)
painter.setPen(QPen(Qt.white, 2))
painter.drawText(r, defaultMessage[i])
if i % 2 == 0:
painter.setPen(QPen(Qt.red, 2))
else:
painter.setPen(QPen(Qt.green, 2))
painter.restore()
steps += messageSpacing[i] / 2
class dialogSkinPreviewBuilder(QDialog):
def __init__(self, rootFolder):
super(dialogSkinPreviewBuilder, self).__init__()
self.init_ui()
self.builderList = []
self.rootFolder = rootFolder
self.subTheme = {}
def init_ui(self):
self.refreshButton = QPushButton("Refresh", self)
self.loadXMLTextButton = QPushButton("Build from xml", self)
self.openXMLButton = QPushButton("Open xml file", self)
self.resetButton = QPushButton("Reset All", self)
self.saveSkinButton = QPushButton("Save As ...")
self.exitSkinButton = QPushButton("Exit")
self.combobox = QComboBox(self)
self.applySubThemeButton = QPushButton("Apply sub-theme")
self.applySubThemeButton.clicked.connect(self.applySelectedSubTheme)
self.loadXMLTextButton.clicked.connect(self.fillTextFromXML)
self.openXMLButton.clicked.connect(self.openXMLFile)
self.saveSkinButton.clicked.connect(self.onSaveSkin)
self.resetButton.clicked.connect(self.resetAll)
self.refreshButton.clicked.connect(self.arrangeLayout)
self.exitSkinButton.clicked.connect(self.reject)
self.box = QGroupBox("Skin managment", self)
hlayout = QHBoxLayout(self)
hlayout.addWidget(self.combobox)
hlayout.addWidget(self.applySubThemeButton)
hSpacer = QSpacerItem(20, 20, QSizePolicy.Expanding, QSizePolicy.Expanding)
hlayout.addItem(hSpacer)
hlayout.addWidget(self.refreshButton)
hlayout.addWidget(self.loadXMLTextButton)
hlayout.addWidget(self.openXMLButton)
hlayout.addWidget(self.resetButton)
hlayout.addWidget(self.saveSkinButton)
hlayout.addWidget(self.exitSkinButton)
self.box.setLayout(hlayout)
self.layout = QGridLayout(self)
def addSubThemes(self, name, folderName):
self.combobox.addItem(name)
self.subTheme[name] = folderName
def fillTextFromXML(self):
for builder in self.builderList:
builder.updatePix(True)
def openXMLFile(self):
QDesktopServices.openUrl(QUrl(self.rootFolder + "/descriptor.xml"))
def addBuilder(self, builder):
self.builderList.append(builder)
def resetAll(self):
for builder in self.builderList:
builder.resetPixmap()
self.arrangeLayout()
QApplication.processEvents()
def applySelectedSubTheme(self):
i=0
for builder in self.builderList:
i= i+1
print("Applying overlay : " + str(i) + "/" + str(len(self.builderList)))
builder.applyOverlay(self.subTheme[self.combobox.currentText()])
self.arrangeLayout()
QApplication.processEvents()
def onSaveSkin(self):
name, ok = QInputDialog.getText(self, 'Skin name', 'Enter the name for your skin:')
if ok:
self.generationPath = "../photobooth/skin/chalk/" + name
if not os.path.exists(self.generationPath):
os.makedirs(self.generationPath)
if not os.path.exists(self.generationPath + "/pages"):
os.makedirs(self.generationPath + "/pages")
if not os.path.exists(self.generationPath + "/layouts"):
os.makedirs(self.generationPath + "/layouts")
source = [s for s in os.listdir(self.rootFolder + "/layouts/") if s.endswith('.png')]
destination = self.generationPath + "/layouts/"
for files in source:
shutil.copy(self.rootFolder + "/layouts/" + files, destination)
source = [s for s in os.listdir(self.rootFolder + "/pages/") if s.endswith('.png')]
destination = self.generationPath + "/pages/"
for files in source:
shutil.copy(self.rootFolder + "/pages/" + files, destination)
shutil.copy(self.rootFolder + "/descriptor.xml", self.generationPath + "/")
def arrangeLayout(self):
n = len(self.builderList)
i = 1
j = 0
for builder in self.builderList:
vlayout = QVBoxLayout(self)
previewLabel = QLabel(self)
previewLabel.setToolTip(builder.inputFilePath + "/" + builder.inputFileName)
edit = QPushButton("Fill text", self)
resetItem = QPushButton("Reset", self)
vlayout.addWidget(previewLabel)
vlayout.addWidget(resetItem)
vlayout.addWidget(edit)
p = QPixmap(builder.inputFilePath + "/" + builder.inputFileName)
previewLabel.setPixmap(
p.scaled(p.width() / 10, p.height() / 10, Qt.KeepAspectRatio, transformMode=Qt.SmoothTransformation))
self.layout.addLayout(vlayout, i, j)
edit.clicked.connect(builder.exec)
#builder.finished.connect(self.arrangeLayout)
resetItem.clicked.connect(builder.resetPixmap)
j += 1
if j == 4:
j = 0
i += 1
self.layout.addWidget(self.box, i + 1, 0, 4, 0)
self.setLayout(self.layout)
class dialogSkinBuilder(QDialog):
def __init__(self, input, filename, inputTemplate):
super(dialogSkinBuilder, self).__init__()
self.inputFilePath = input
self.inputFileName = filename
self.inputTemplateFilePath = inputTemplate
self.init_ui()
self.currentMessagesDatas = []
def init_ui(self):
# Creating a label
self.previewLabel = QLabel(self)
self.updatePushButton = QPushButton("Preview", self)
self.updatePushButton.clicked.connect(self.onUpdate)
self.savePushButton = QPushButton("Validate", self)
self.savePushButton.clicked.connect(self.onSave)
self.skipPushButton = QPushButton("Cancel", self)
self.skipPushButton.clicked.connect(self.onSkip)
self.vboxLayout = QVBoxLayout(self)
# Adding the widgets
self.vboxLayout.addWidget(self.previewLabel)
hl = QVBoxLayout(self)
hl.addWidget(self.updatePushButton)
hl.addWidget(self.savePushButton)
hl.addWidget(self.skipPushButton)
self.vboxLayout.addLayout(hl)
# Setting the hBoxLayout as the main layout
self.setLayout(self.vboxLayout)
self.setWindowTitle('Skin builder for ' + self.inputFilePath + "/" + self.inputFileName)
self.copyBasePixmap()
def resetPixmap(self):
shutil.copy(self.inputFilePath + "/" + self.inputFileName + ".origin",
self.inputFilePath + "/" + self.inputFileName)
def copyBasePixmap(self):
shutil.copy(self.inputFilePath + "/" + self.inputFileName,
self.inputFilePath + "/" + self.inputFileName + ".origin")
def applyOverlay(self, filename):
self.overlayFilename = self.inputTemplateFilePath + "/" + filename + "/" + self.inputFileName
# print(self.overlayFilename)
overLay = QPixmap(self.overlayFilename)
outPixmap = QPixmap(self.inputFilePath + "/" + self.inputFileName)
painter = QPainter(outPixmap)
painter.setRenderHint(QPainter.Antialiasing)
painter.drawPixmap(0, 0, overLay)
painter.setPen(QColor(255, 255, 255))
normPath = os.path.normpath(self.inputFilePath + "/" + self.inputFileName)
file = QFile(normPath)
file.open(QIODevice.WriteOnly)
outPixmap.save(file, "PNG")
del painter
def addTextInput(self, data):
hboxLayout = QHBoxLayout(self)
label = QLabel("Texte " + data["location"], self)
lineEdit = QLineEdit(data["defaultMessage"], self)
hboxLayout.addWidget(label)
hboxLayout.addWidget(lineEdit)
self.vboxLayout.addLayout(hboxLayout)
dataDict = {}
dataDict["linedit"] = lineEdit
dataDict["descriptor"] = data
self.currentMessagesDatas.append(dataDict)
# c1 = QPointF(lay["x1"], lay["y1"])
# c2 = QPointF(lay["x2"], lay["y2"])
# c3 = QPointF(lay["x3"], lay["y3"])
# c4 = QPointF(lay["x4"], lay["y4"])
def onUpdate(self):
self.updatePix(False)
def onSkip(self):
self.reject()
def onSave(self):
self.updatePix(True)
self.accept()
def exec(self):
self.onUpdate()
super(dialogSkinBuilder, self).exec()
def updatePix(self, save):
# print("update")
outPixmap = QPixmap(self.inputFilePath + "/" + self.inputFileName)
# print(self.inputFilePath)
painter = QPainter(outPixmap)
painter.setRenderHint(QPainter.Antialiasing)
painter.setPen(QColor(255, 255, 255))
for dict in self.currentMessagesDatas:
msg = dict["linedit"].text()
self.drawTextAlongCubic(dict["descriptor"], painter, "", msg)
self.previewLabel.setPixmap(outPixmap.scaled(outPixmap.width() / 3, outPixmap.height() / 3, Qt.KeepAspectRatio,
transformMode=Qt.SmoothTransformation))
if save == True:
normPath = os.path.normpath(self.inputFilePath + "/" + self.inputFileName)
file = QFile(normPath)
file.open(QIODevice.WriteOnly)
outPixmap.save(file, "PNG")
del painter
def drawTextAlongCubic(self, lay, painter, filename, msg):
fs = lay["defaultFontSize"]
font = QFont('Right Chalk', fs)
defaultMessage = msg
if len(msg) == 0:
return
c1 = QPointF(lay["x1"], lay["y1"])
c2 = QPointF(lay["x2"], lay["y2"])
c3 = QPointF(lay["x3"], lay["y3"])
c4 = QPointF(lay["x4"], lay["y4"])
path = QPainterPath(c1)
path.cubicTo(c2, c3, c4)
# painter.drawPath(path)
pathLength = path.length()
textMetricLength = QFontMetrics(font).width(defaultMessage)
fsn = int(fs * pathLength / (textMetricLength) * .95)
if fsn > 70:
fsn = 70
font = QFont('Right Chalk', fsn)
textMetricLength = QFontMetrics(font).width(defaultMessage)
messageSpacing = []
defaultMessageM = []
sumMessageSpacing = 0.0
for i in range(len(defaultMessage)):
messageSpacing.append(QFontMetrics(font).width(defaultMessage[i]))
sumMessageSpacing += messageSpacing[i]
for i in range(len(defaultMessage)):
messageSpacing[i] = messageSpacing[i] / sumMessageSpacing
steps = 0
painter.setFont(font)
for i in range(len(defaultMessage)):
steps += messageSpacing[i] / 2
point = QPointF(path.pointAtPercent(steps))
angle = path.angleAtPercent(steps)
painter.save()
painter.translate(point)
painter.rotate(-angle)
x = -QFontMetrics(font).width(defaultMessage[i]) / 2
y = -QFontMetrics(font).height() / 2
w = QFontMetrics(font).width(defaultMessage[i])
h = QFontMetrics(font).height()
r = QRectF(x, y, w, h)
painter.setPen(QPen(Qt.white, 2))
painter.drawText(r, defaultMessage[i])
if i % 2 == 0:
painter.setPen(QPen(Qt.red, 2))
else:
painter.setPen(QPen(Qt.green, 2))
# painter.drawRect(r)
painter.restore()
steps += messageSpacing[i] / 2
def test(nb):
txt = " UNE PHOTO N'A PAS PU ETRE PRISE ! "
if nb > 1:
txt = str(int(nb)) + " PHOTOS N'ONT PAS PU ETRE PRISE !"
resources = ressourcesManager()
outPixmap = QPixmap(resources.getPath(ressourcesManager.PATH.PAGE) + "/onError.png")
painter = QPainter(outPixmap)
painter.setRenderHint(QPainter.Antialiasing)
x = 50
y = 160
r = QRectF(0, 0, 1180, 150)
painter.setPen(QColor(160, 160, 160))
painter.setFont(QFont("Right Chalk", 40))
painter.translate(x, y)
painter.drawText(r, txt)
painter.translate(-x, -y)
del painter
outPixmap.save(str(nb) + "toto.png", "PNG")
if __name__ == '__main__':
app = QApplication(sys.argv)
if len(sys.argv) == 4:
if sys.argv[1] == "redoAssemblies":
ass = Assembly(sys.argv[2], sys.argv[3])
ass.redoAssemblies(True)
if len(sys.argv) == 4:
if sys.argv[1] == "redoAssemblies1Pict":
ass = Assembly(sys.argv[2], sys.argv[3])
ass.redoAssemblies1Pict()
if len(sys.argv) == 4:
if sys.argv[1] == "redoAssembliesRandom":
ass = Assembly(sys.argv[2], sys.argv[3])
ass.redoAssembliesRandom(True)
if len(sys.argv) == 2:
if sys.argv[1] == "buildskin":
skBuilder = skinBuilder()
# skBuilder.askUserName()
skBuilder.createHierarchy()
skBuilder.copyPagesToTemp()
skBuilder.copyLayoutsToTemp()
skBuilder.flattenSubtheme(False)
elif sys.argv[1] == "buildskinCopyright":
skBuilder = skinBuilder()
skBuilder.createHierarchy()
skBuilder.copyPagesToTemp()
skBuilder.copyLayoutsToTemp()
skBuilder.copyDescriptor()
skBuilder.flattenSubtheme(True)
elif sys.argv[1] == "buildskinInteractive":
skBuilder = skinBuilder()
skBuilder.setDescriptorFolder("../external/skin/chalk/templates")
skBuilder.init()
skBuilder.setOutpuFolder("../external/skin/chalk/testGene/tmp")
skBuilder.copyFiles()
skBuilder.buildSkinInteractively()
sys.exit(1)
| 36.447592
| 120
| 0.600731
|
4a15e7ca36679def5ecd39424af663fdc398a093
| 5,045
|
py
|
Python
|
train_i3d.py
|
Lechatelia/video_detection_tools
|
1eebaf3e4b358a940e21f37d387de23503d5bda0
|
[
"Apache-2.0"
] | null | null | null |
train_i3d.py
|
Lechatelia/video_detection_tools
|
1eebaf3e4b358a940e21f37d387de23503d5bda0
|
[
"Apache-2.0"
] | null | null | null |
train_i3d.py
|
Lechatelia/video_detection_tools
|
1eebaf3e4b358a940e21f37d387de23503d5bda0
|
[
"Apache-2.0"
] | null | null | null |
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"]='0,1,2,3'
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-mode', type=str, help='rgb or flow')
parser.add_argument('-save_model', type=str)
parser.add_argument('-root', type=str)
args = parser.parse_args()
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torchvision
from torchvision import datasets, transforms
import videotransforms
import numpy as np
from pytorch_i3d import InceptionI3d
from charades_dataset import Charades as Dataset
def run(init_lr=0.1, max_steps=64e3, mode='rgb', root='/ssd/Charades_v1_rgb', train_split='charades/charades.json', batch_size=8*5, save_model=''):
# setup dataset
train_transforms = transforms.Compose([videotransforms.RandomCrop(224),
videotransforms.RandomHorizontalFlip(),
])
test_transforms = transforms.Compose([videotransforms.CenterCrop(224)])
dataset = Dataset(train_split, 'training', root, mode, train_transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=36, pin_memory=True)
val_dataset = Dataset(train_split, 'testing', root, mode, test_transforms)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=36, pin_memory=True)
dataloaders = {'train': dataloader, 'val': val_dataloader}
datasets = {'train': dataset, 'val': val_dataset}
# setup the model
if mode == 'flow':
i3d = InceptionI3d(400, in_channels=2)
i3d.load_state_dict(torch.load('models/flow_imagenet.pt'))
else:
i3d = InceptionI3d(400, in_channels=3)
i3d.load_state_dict(torch.load('models/rgb_imagenet.pt'))
i3d.replace_logits(157)
#i3d.load_state_dict(torch.load('/ssd/models/000920.pt'))
i3d.cuda()
i3d = nn.DataParallel(i3d)
lr = init_lr
optimizer = optim.SGD(i3d.parameters(), lr=lr, momentum=0.9, weight_decay=0.0000001)
lr_sched = optim.lr_scheduler.MultiStepLR(optimizer, [300, 1000])
num_steps_per_update = 4 # accum gradient
steps = 0
# train it
while steps < max_steps:#for epoch in range(num_epochs):
print ('Step {}/{}'.format(steps, max_steps))
print ('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
i3d.train(True)
else:
i3d.train(False) # Set model to evaluate mode
tot_loss = 0.0
tot_loc_loss = 0.0
tot_cls_loss = 0.0
num_iter = 0
optimizer.zero_grad()
# Iterate over data.
for data in dataloaders[phase]:
num_iter += 1
# get the inputs
inputs, labels = data
# wrap them in Variable
inputs = Variable(inputs.cuda())
t = inputs.size(2)
labels = Variable(labels.cuda())
per_frame_logits = i3d(inputs)
# upsample to input size
per_frame_logits = F.upsample(per_frame_logits, t, mode='linear')
# compute localization loss
loc_loss = F.binary_cross_entropy_with_logits(per_frame_logits, labels)
tot_loc_loss += loc_loss.data[0]
# compute classification loss (with max-pooling along time B x C x T)
cls_loss = F.binary_cross_entropy_with_logits(torch.max(per_frame_logits, dim=2)[0], torch.max(labels, dim=2)[0])
tot_cls_loss += cls_loss.data[0]
loss = (0.5*loc_loss + 0.5*cls_loss)/num_steps_per_update
tot_loss += loss.data[0]
loss.backward()
if num_iter == num_steps_per_update and phase == 'train':
steps += 1
num_iter = 0
optimizer.step()
optimizer.zero_grad()
lr_sched.step()
if steps % 10 == 0:
print ('{} Loc Loss: {:.4f} Cls Loss: {:.4f} Tot Loss: {:.4f}'.format(phase, tot_loc_loss/(10*num_steps_per_update), tot_cls_loss/(10*num_steps_per_update), tot_loss/10))
# save model
torch.save(i3d.module.state_dict(), save_model+str(steps).zfill(6)+'.pt')
tot_loss = tot_loc_loss = tot_cls_loss = 0.
if phase == 'val':
print ('{} Loc Loss: {:.4f} Cls Loss: {:.4f} Tot Loss: {:.4f}'.format(phase, tot_loc_loss/num_iter, tot_cls_loss/num_iter, (tot_loss*num_steps_per_update)/num_iter))
if __name__ == '__main__':
# need to add argparse
run(mode=args.mode, root=args.root, save_model=args.save_model)
| 37.649254
| 194
| 0.61328
|
4a15e89583605ccc27471d0d2048fdae53bb67dc
| 12,534
|
py
|
Python
|
docs/conf.py
|
anukaal/python-bigquery-connection
|
10b677a34e2b0d76ed4d2c3f22006996857e23f5
|
[
"Apache-2.0"
] | 18
|
2020-05-26T20:04:57.000Z
|
2022-03-28T21:16:46.000Z
|
docs/conf.py
|
anukaal/python-bigquery-connection
|
10b677a34e2b0d76ed4d2c3f22006996857e23f5
|
[
"Apache-2.0"
] | 52
|
2020-05-26T22:01:46.000Z
|
2022-03-08T16:59:59.000Z
|
docs/conf.py
|
anukaal/python-bigquery-connection
|
10b677a34e2b0d76ed4d2c3f22006996857e23f5
|
[
"Apache-2.0"
] | 9
|
2020-05-19T23:27:25.000Z
|
2022-01-29T08:07:35.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-bigquery-connection documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-bigquery-connection"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-bigquery-connection",
"github_user": "googleapis",
"github_repo": "python-bigquery-connection",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-bigquery-connection-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-bigquery-connection.tex",
"google-cloud-bigquery-connection Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-bigquery-connection",
"google-cloud-bigquery-connection Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-bigquery-connection",
"google-cloud-bigquery-connection Documentation",
author,
"google-cloud-bigquery-connection",
"google-cloud-bigquery-connection Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| 32.811518
| 88
| 0.707117
|
4a15e95286a2efe921d727a73f67fd11992e87ae
| 377
|
py
|
Python
|
changes/api/serializer/models/comment.py
|
bowlofstew/changes
|
ebd393520e0fdb07c240a8d4e8747281b6186e28
|
[
"Apache-2.0"
] | null | null | null |
changes/api/serializer/models/comment.py
|
bowlofstew/changes
|
ebd393520e0fdb07c240a8d4e8747281b6186e28
|
[
"Apache-2.0"
] | null | null | null |
changes/api/serializer/models/comment.py
|
bowlofstew/changes
|
ebd393520e0fdb07c240a8d4e8747281b6186e28
|
[
"Apache-2.0"
] | null | null | null |
from changes.api.serializer import Crumbler, register
from changes.models import Comment
@register(Comment)
class CommentCrumbler(Crumbler):
def crumble(self, instance, attrs):
return {
'id': instance.id.hex,
'user': instance.user,
'text': instance.text,
'dateCreated': instance.date_created.isoformat(),
}
| 26.928571
| 61
| 0.633952
|
4a15ea50b891ad686683495c6dffda276f1499ab
| 9,637
|
py
|
Python
|
userbot/modules/emotion.py
|
Furuhashii/TaliauBot
|
c59c3494faa4b3dd2d51ffb4b910c10cefc16098
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/emotion.py
|
Furuhashii/TaliauBot
|
c59c3494faa4b3dd2d51ffb4b910c10cefc16098
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/emotion.py
|
Furuhashii/TaliauBot
|
c59c3494faa4b3dd2d51ffb4b910c10cefc16098
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Lord-Userbot
from time import sleep
from userbot import CMD_HELP, bot
from userbot.events import register
from telethon import events
import asyncio
@bot.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(117)
input_str = event.pattern_match.group(1)
if input_str == "bulan":
await event.edit(input_str)
animation_chars = [
"🌗",
"🌘",
"🌑",
"🌒",
"🌓",
"🌔",
"🌕",
"🌖",
"🌗",
"🌘",
"🌑",
"🌒",
"🌓",
"🌔",
"🌕",
"🌖",
"🌗",
"🌘",
"🌑",
"🌒",
"🌓",
"🌔",
"🌕",
"🌖",
"🌗",
"🌘",
"🌑",
"🌒",
"🌓",
"🌔",
"🌕",
f"🌖"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 32])
@register(outgoing=True, pattern='^.helikopter(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("▬▬▬.◙.▬▬▬ \n"
"═▂▄▄▓▄▄▂ \n"
"◢◤ █▀▀████▄▄▄▄◢◤ \n"
"█▄ █ █▄ ███▀▀▀▀▀▀▀╬ \n"
"◥█████◤ \n"
"══╩══╩══ \n"
"╬═╬ \n"
"╬═╬ \n"
"╬═╬ \n"
"╬═╬ \n"
"╬═╬ \n"
"╬═╬ \n"
"╬═╬ Hallo Semuanya :) \n"
"╬═╬☻/ \n"
"╬═╬/▌ \n"
"╬═╬/ \ \n")
@register(outgoing=True, pattern='^.tembak(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("_/﹋\_\n"
"(҂`_´)\n"
"<,︻╦╤─ ҉\n"
r"_/﹋\_"
"\n**Mau Jadi Pacarku Gak?!**")
@register(outgoing=True, pattern='^.bundir(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`Dadah Semuanya...` \n |"
"\n | \n"
" | \n"
" | \n"
" | \n"
" | \n"
" | \n"
" | \n"
" / ̄ ̄\| \n"
"< ´・ |\ \n"
" | 3 | 丶\ \n"
"< 、・ | \ \n"
" \__/∪ _ ∪) \n"
" U U\n")
@register(outgoing=True, pattern='^.awkwok(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("────██──────▀▀▀██\n"
"──▄▀█▄▄▄─────▄▀█▄▄▄\n"
"▄▀──█▄▄──────█─█▄▄\n"
"─▄▄▄▀──▀▄───▄▄▄▀──▀▄\n"
"─▀───────▀▀─▀───────▀▀\n`Awkwokwokwok..`")
@register(outgoing=True, pattern='^.ular(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("░░░░▓\n"
"░░░▓▓\n"
"░░█▓▓█\n"
"░██▓▓██\n"
"░░██▓▓██\n"
"░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░██▓▓██\n"
"░░██▓▓██\n"
"░██▓▓██\n"
"░░██▓▓██\n"
"░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░██▓▓██\n"
"░░██▓▓██\n"
"░██▓▓██\n"
"░░██▓▓██\n"
"░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░██▓▓██\n"
"░░██▓▓██\n"
"░██▓▓██\n"
"░░██▓▓██\n"
"░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░██▓▓██\n"
"░░██▓▓██\n"
"░██▓▓██\n"
"░░██▓▓██\n"
"░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░██▓▓██\n"
"░░██▓▓██\n"
"░██▓▓██\n"
"░░██▓▓██\n"
"░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░██▓▓██\n"
"░░██▓▓██\n"
"░██▓▓██\n"
"░░██▓▓██\n"
"░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░██▓▓██\n"
"░░██▓▓██\n"
"░██▓▓██\n"
"░░██▓▓██\n"
"░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░░░██▓▓██\n"
"░░░░██▓▓██\n"
"░░░██▓▓██\n"
"░░██▓▓██\n"
"░░██▓▓██\n"
"░░██▓▓██\n"
"░░██▓▓██\n"
"░░██▓▓██\n"
"░░██▓▓██\n"
"░░░██▓▓███\n"
"░░░░██▓▓████\n"
"░░░░░██▓▓█████\n"
"░░░░░░██▓▓██████\n"
"░░░░░░███▓▓███████\n"
"░░░░░████▓▓████████\n"
"░░░░█████▓▓█████████\n"
"░░░█████░░░█████●███\n"
"░░████░░░░░░░███████\n"
"░░███░░░░░░░░░██████\n"
"░░██░░░░░░░░░░░████\n"
"░░░░░░░░░░░░░░░░███\n"
"░░░░░░░░░░░░░░░░░░░\n")
@register(outgoing=True, pattern='^.y(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("‡‡‡‡‡‡‡‡‡‡‡‡▄▄▄▄\n"
"‡‡‡‡‡‡‡‡‡‡‡█‡‡‡‡█\n"
"‡‡‡‡‡‡‡‡‡‡‡█‡‡‡‡█\n"
"‡‡‡‡‡‡‡‡‡‡█‡‡‡‡‡█\n"
"‡‡‡‡‡‡‡‡‡█‡‡‡‡‡‡█\n"
"██████▄▄█‡‡‡‡‡‡████████▄\n"
"▓▓▓▓▓▓█‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡█\n"
"▓▓▓▓▓▓█‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡█\n"
"▓▓▓▓▓▓█‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡█\n"
"▓▓▓▓▓▓█‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡█\n"
"▓▓▓▓▓▓█‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡█\n"
"▓▓▓▓▓▓█████‡‡‡‡‡‡‡‡‡‡‡‡██\n"
"█████‡‡‡‡‡‡‡██████████\n")
@register(outgoing=True, pattern='^.tank(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("█۞███████]▄▄▄▄▄▄▄▄▄▄▃ \n"
"▂▄▅█████████▅▄▃▂…\n"
"[███████████████████]\n"
"◥⊙▲⊙▲⊙▲⊙▲⊙▲⊙▲⊙◤\n")
@register(outgoing=True, pattern='^.babi(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("┈┈┏━╮╭━┓┈╭━━━━╮\n"
"┈┈┃┏┗┛┓┃╭┫Ngok ┃\n"
"┈┈╰┓▋▋┏╯╯╰━━━━╯\n"
"┈╭━┻╮╲┗━━━━╮╭╮┈\n"
"┈┃▎▎┃╲╲╲╲╲╲┣━╯┈\n"
"┈╰━┳┻▅╯╲╲╲╲┃┈┈┈\n"
"┈┈┈╰━┳┓┏┳┓┏╯┈┈┈\n"
"┈┈┈┈┈┗┻┛┗┻┛┈┈┈┈\n")
@register(outgoing=True, pattern='^.ajg(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("╥━━━━━━━━╭━━╮━━┳\n"
"╢╭╮╭━━━━━┫┃▋▋━▅┣\n"
"╢┃╰┫┈┈┈┈┈┃┃┈┈╰┫┣\n"
"╢╰━┫┈┈┈┈┈╰╯╰┳━╯┣\n"
"╢┊┊┃┏┳┳━━┓┏┳┫┊┊┣\n"
"╨━━┗┛┗┛━━┗┛┗┛━━┻\n")
@register(outgoing=True, pattern='^.bernyanyi(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("**Ganteng Doang Gak Bernyanyi (ง˙o˙)ว**")
sleep(2)
await typew.edit("**♪┗ ( ・o・) ┓♪┏ (・o・) ┛♪**")
sleep(1)
await typew.edit("**♪┏(・o・)┛♪┗ ( ・o・) ┓**")
sleep(1)
await typew.edit("**♪┗ ( ・o・) ┓♪┏ (・o・) ┛♪**")
sleep(1)
await typew.edit("**♪┏(・o・)┛♪┗ ( ・o・) ┓**")
sleep(1)
await typew.edit("**♪┗ ( ・o・) ┓♪┏ (・o・) ┛♪**")
sleep(1)
await typew.edit("**♪┏(・o・)┛♪┗ ( ・o・) ┓**")
sleep(1)
await typew.edit("**♪┗ ( ・o・) ┓♪┏ (・o・) ┛♪**")
sleep(1)
await typew.edit("**♪┏(・o・)┛♪┗ ( ・o・) ┓**")
sleep(1)
await typew.edit("**♪┗ ( ・o・) ┓♪┏ (・o・) ┛♪**")
sleep(1)
await typew.edit("**♪┏(・o・)┛♪┗ ( ・o・) ┓**")
sleep(1)
await typew.edit("**♪┗ ( ・o・) ┓♪┏ (・o・) ┛♪**")
sleep(1)
await typew.edit("**♪┏(・o・)┛♪┗ ( ・o・) ┓**")
sleep(1)
await typew.edit("**♪┗ ( ・o・) ┓♪┏ (・o・) ┛♪**")
sleep(1)
await typew.edit("**♪┏(・o・)┛♪┗ ( ・o・) ┓**")
sleep(1)
await typew.edit("**♪┗ ( ・o・) ┓♪┏ (・o・) ┛♪**")
CMD_HELP.update({
"vip":
"`.bulan` ; `.hati` ; `.bernyanyi`\
\nUsage: liat aja.\
\n\n`.helikopter` ; `.tank` ; `.tembak`\n`.bundir`\
\nUsage: liat sendiri\
\n\n`.y`\
\nUsage: jempol\
\n\n`.awkwok`\
\nUsage: ketawa lari.\
\n\n`.ular` ; `.babi` ; `.ajg`\
\nUsage: liat sendiri."
})
| 30.400631
| 64
| 0.217495
|
4a15ea5dd8e24307c09dac0f015a5e6ed79bbd59
| 643
|
py
|
Python
|
src/cipr/commands/app.py
|
six8/corona-cipr
|
a2f45761080c874afa39bf95fd5c4467c8eae272
|
[
"MIT"
] | 1
|
2015-04-19T20:53:15.000Z
|
2015-04-19T20:53:15.000Z
|
src/cipr/commands/app.py
|
six8/corona-cipr
|
a2f45761080c874afa39bf95fd5c4467c8eae272
|
[
"MIT"
] | null | null | null |
src/cipr/commands/app.py
|
six8/corona-cipr
|
a2f45761080c874afa39bf95fd5c4467c8eae272
|
[
"MIT"
] | 2
|
2016-04-11T15:35:10.000Z
|
2020-04-13T10:42:32.000Z
|
import clik
from os import path
import os
from optparse import make_option as opt
from cipr.commands.cfg import CiprCfg
from cipr.commands import env
def _args(opts):
env.project_directory = opts.project_directory
return dict(
env = env,
ciprcfg = CiprCfg(path.join(env.project_directory, '.ciprcfg'))
)
command = clik.App('cipr',
version='0.8',
description='Corona SDK package manager.',
console_opts=True,
conf_enabled=False,
opts= opt('-d', '--project',
dest='project_directory', default=path.abspath(os.getcwd()),
help='Project directory'
),
args_callback=_args
)
| 23.814815
| 71
| 0.679627
|
4a15eb83ee61430cd91694b98fec5588198fb378
| 4,040
|
py
|
Python
|
app/main/controller/langid_controller.py
|
meedan/alegre
|
ad28736f53b8905882e196e90cac66d39db341a3
|
[
"MIT"
] | 11
|
2018-02-07T00:16:54.000Z
|
2021-05-13T22:47:07.000Z
|
app/main/controller/langid_controller.py
|
meedan/alegre
|
ad28736f53b8905882e196e90cac66d39db341a3
|
[
"MIT"
] | 47
|
2018-11-26T23:17:37.000Z
|
2022-03-25T16:12:05.000Z
|
app/main/controller/langid_controller.py
|
meedan/alegre
|
ad28736f53b8905882e196e90cac66d39db341a3
|
[
"MIT"
] | 9
|
2019-05-23T22:06:03.000Z
|
2020-10-27T20:45:04.000Z
|
from flask import request, current_app as app
from flask_restplus import Resource, Namespace, fields
import redis
import hashlib
import json
import importlib
import tenacity
from twitter_text import extract_urls_with_indices, extract_emojis_with_indices
api = Namespace('langid', description='langid operations')
langid_request = api.model('langid_request', {
'text': fields.String(required=True, description='text to identify'),
'provider': fields.String(required=False, description='langid provider to use')
})
def _after_log(retry_state):
app.logger.debug("Retrying langid...")
@api.route('/')
class LangidResource(Resource):
def respond(self):
provider = app.config['PROVIDER_LANGID']
if(request.args):
text=request.args.get('text')
if 'provider' in request.args: provider = request.args.get('provider')
else:
text=request.json['text']
if 'provider' in request.json: provider = request.json['provider']
# Read from cache first.
r = redis.Redis(host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'], db=app.config['REDIS_DATABASE'])
key = 'langid:' + provider + ':' + hashlib.md5(text.encode('utf-8')).hexdigest()
try:
result = json.loads(r.get(key))
except:
result = None
# Otherwise, call the service and cache the result.
if result == None:
result = self.langid(LangidResource.cleanup_input(text), provider)
r.set(key, json.dumps(result))
return result
@api.response(200, 'langid successfully queried.')
@api.doc('Identify the language of a text document')
@api.expect(langid_request, validate=False)
def get(self):
return self.respond()
@api.response(200, 'langid successfully queried.')
@api.doc('Identify the language of a text document')
@api.expect(langid_request, validate=False)
def post(self):
return self.respond()
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=0, max=4), stop=tenacity.stop_after_delay(10), after=_after_log)
def langid(self, text, provider):
if not text:
return {
'result': {
'language': 'und',
'confidence': 1.0
},
'raw': {}
}
# In module `app.main.lib.langid`,
# look for a class called `#{ProviderName}LangidProvider`, e.g. `GoogleLangidProvider`
# then call static method `langid()` on that class.
class_ = getattr(importlib.import_module('app.main.lib.langid'), provider.title() + 'LangidProvider')
# Cleanup the result, then add the provider information.
return dict(LangidResource.cleanup_result(class_.langid(text)), **{ 'provider': provider })
@staticmethod
def cleanup_result(result):
clean = result
language = clean['result']['language']
# TODO Return 'und' if confidence is low.
# Remove region codes.
language = language.split('-', 1)[0]
# Special case: Convert Tagalog to Filipino.
if language == 'tl':
language = 'fil'
clean['result']['language'] = language
return clean
@staticmethod
def cleanup_input(text):
clean = text
clean = LangidResource.slice_around(clean, extract_urls_with_indices(clean))
clean = LangidResource.slice_around(clean, extract_emojis_with_indices(clean))
return clean.strip()
@staticmethod
def slice_around(text, ranges):
# We want the text surrounding the given ranges, so we:
# - Create surrounding ranges
# - Create text slice for each range (end of range n -> start of range n+1)
# - Join slices into a full string
slices = [{'indices': [0, 0]}] + ranges + [{'indices': [len(text), len(text)]}]
return "".join([text[s['indices'][1] : slices[i+1]['indices'][0] ] for i, s in enumerate(slices[:-1])])
| 37.06422
| 133
| 0.62896
|
4a15ebc94cc81949f42bbb3871a625a56468ef33
| 79
|
py
|
Python
|
torchsar/version.py
|
aisari/torchsar
|
05a46610d68bc884743a483565279f361ade5384
|
[
"Apache-2.0"
] | 3
|
2021-06-04T13:13:07.000Z
|
2021-08-24T16:28:31.000Z
|
torchsar/version.py
|
aisari/torchsar
|
05a46610d68bc884743a483565279f361ade5384
|
[
"Apache-2.0"
] | null | null | null |
torchsar/version.py
|
aisari/torchsar
|
05a46610d68bc884743a483565279f361ade5384
|
[
"Apache-2.0"
] | 2
|
2021-08-15T09:01:03.000Z
|
2021-12-21T08:53:53.000Z
|
# Copyright (c) 2015-2030, Zhi Liu. All rights reserved.
__version__ = '1.1'
| 19.75
| 57
| 0.683544
|
4a15ec11c1288a3e857d72689baeb1a59c6dd600
| 1,731
|
py
|
Python
|
ipregistry/cache.py
|
ipregistry/ipregistry-python
|
224888fa198c98423a5ac949eb588e7941de89a8
|
[
"Apache-2.0"
] | 7
|
2019-07-28T08:29:54.000Z
|
2021-08-06T10:42:31.000Z
|
ipregistry/cache.py
|
ipregistry/ipregistry-python
|
224888fa198c98423a5ac949eb588e7941de89a8
|
[
"Apache-2.0"
] | 15
|
2020-07-05T15:22:58.000Z
|
2022-01-10T17:01:20.000Z
|
ipregistry/cache.py
|
ipregistry/ipregistry-python
|
224888fa198c98423a5ac949eb588e7941de89a8
|
[
"Apache-2.0"
] | 3
|
2020-01-06T13:43:41.000Z
|
2020-09-25T11:59:04.000Z
|
"""
Copyright 2019 Ipregistry (https://ipregistry.co).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc, six
from cachetools import TTLCache
@six.add_metaclass(abc.ABCMeta)
class IpregistryCache:
@abc.abstractmethod
def get(self, key):
pass
@abc.abstractmethod
def put(self, key, data):
pass
@abc.abstractmethod
def invalidate(self, key):
pass
@abc.abstractmethod
def invalidateAll(self):
pass
class InMemoryCache(IpregistryCache):
def __init__(self, maxsize=2048, ttl=600):
self._cache = TTLCache(maxsize, ttl)
def get(self, key):
try:
return self._cache[key]
except:
return None
def put(self, key, data):
self._cache[key] = data
def invalidate(self, key):
del self._cache[key]
def invalidateAll(self):
for key in self._cache:
del self._cache[key]
class NoCache(IpregistryCache):
def __init__(self, maxsize=2048, ttl=86400):
pass
def get(self, key):
return None
def put(self, key, data):
pass
def invalidate(self, key):
pass
def invalidateAll(self):
pass
| 23.391892
| 76
| 0.64818
|
4a15ecb2004abcb64e9d12196bb5c7c031896d2f
| 609,866
|
py
|
Python
|
python/paddle/fluid/layers/nn.py
|
hang245141253/Paddle
|
ee13a2ab88c1896c2f73ebe7c9c78364b6befd54
|
[
"Apache-2.0"
] | 1
|
2021-12-27T02:49:16.000Z
|
2021-12-27T02:49:16.000Z
|
python/paddle/fluid/layers/nn.py
|
wozna/Paddle
|
0ecf441af14d554c85f69a206e3e3a9bdd86fb13
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/nn.py
|
wozna/Paddle
|
0ecf441af14d554c85f69a206e3e3a9bdd86fb13
|
[
"Apache-2.0"
] | 1
|
2021-02-08T16:02:12.000Z
|
2021-02-08T16:02:12.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the neural network.
"""
from __future__ import print_function
import os
import inspect
import warnings
import numpy as np
import six
import paddle
from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator
from .. import dygraph_utils
from ..param_attr import ParamAttr
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
from .tensor import concat, assign, fill_constant, zeros, tensor_array_to_tensor
from . import utils
from .. import unique_name
from functools import reduce
from .. import core
from ...utils import deprecated
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle
from paddle.utils import deprecated
__all__ = [
'fc',
'embedding',
'linear_chain_crf',
'crf_decoding',
'cos_sim',
'chunk_eval',
'conv2d',
'conv3d',
'softmax',
'pool2d',
'pool3d',
'adaptive_pool2d',
'adaptive_pool3d',
'batch_norm',
'inplace_abn',
'instance_norm',
'data_norm',
'conv2d_transpose',
'conv3d_transpose',
'reduce_sum',
'reduce_mean',
'reduce_max',
'reduce_min',
'reduce_prod',
'reduce_all',
'reduce_any',
'dropout',
'split',
'ctc_greedy_decoder',
'l2_normalize',
'matmul',
'topk',
'transpose',
'im2sequence',
'row_conv',
'multiplex',
'layer_norm',
'group_norm',
'spectral_norm',
'smooth_l1',
'one_hot',
'autoincreased_step_counter',
'reshape',
'squeeze',
'unsqueeze',
'lod_reset',
'lod_append',
'lrn',
'pad',
'pad_constant_like',
'label_smooth',
'roi_pool',
'roi_align',
'dice_loss',
'image_resize',
'image_resize_short',
'resize_linear',
'resize_bilinear',
'resize_trilinear',
'resize_nearest',
'gather',
'gather_nd',
'scatter',
'scatter_nd_add',
'scatter_nd',
'random_crop',
'mean_iou',
'relu',
'selu',
'log',
'crop',
'crop_tensor',
'elu',
'relu6',
'pow',
'stanh',
'hard_sigmoid',
'swish',
'prelu',
'brelu',
'leaky_relu',
'soft_relu',
'flatten',
'stack',
'pad2d',
'unstack',
'unique',
'unique_with_counts',
'expand',
'expand_as',
'scale',
'elementwise_add',
'elementwise_div',
'elementwise_sub',
'elementwise_mul',
'elementwise_max',
'elementwise_min',
'elementwise_pow',
'elementwise_mod',
'elementwise_floordiv',
'uniform_random_batch_size_like',
'gaussian_random',
'sampling_id',
'gaussian_random_batch_size_like',
'sum',
'slice',
'strided_slice',
'shape',
'rank',
'size',
'logical_and',
'logical_or',
'logical_xor',
'logical_not',
'clip',
'clip_by_norm',
'mean',
'mul',
'maxout',
'space_to_depth',
'affine_grid',
'affine_channel',
'similarity_focus',
'hash',
'grid_sampler',
'log_loss',
'add_position_encoding',
'bilinear_tensor_product',
'merge_selected_rows',
'get_tensor_from_selected_rows',
'shuffle_channel',
'temporal_shift',
'py_func',
'psroi_pool',
'prroi_pool',
'pixel_shuffle',
'fsp_matrix',
'continuous_value_model',
'where',
'sign',
'deformable_conv',
'unfold',
'deformable_roi_pooling',
'filter_by_instag',
'shard_index',
'hard_swish',
'mish',
'gather_tree',
'uniform_random',
'unbind',
]
@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
axis=-1,
act=None,
use_mkldnn=False,
op_name=None):
op = getattr(core.ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
def fc(input,
size,
num_flatten_dims=1,
param_attr=None,
bias_attr=None,
act=None,
name=None):
"""
:api_attr: Static Graph
**Fully Connected Layer**
This operator creates a fully connected layer in the network. It can take
a Tensor(or LoDTensor) or a list of Tensor(or LoDTensor) as its inputs(see
Args in detail). It creates a variable called weight for each input Tensor,
which represents a fully connected weight matrix from each input unit to
each output unit. The fully connected layer multiplies each input Tensor
with its corresponding weight to produce an output Tensor with shape :math:`[M, size]` ,
where M is batch size. If a list of Tensor is given, the results of
multiple output Tensors with shape :math:`[M, size]` will be summed up. If :attr:`bias_attr`
is not None, a bias variable will be created and added to the output.
Finally, if :attr:`act` is not None, it will be applied to the output as well.
When the input is a single Tensor(or LoDTensor):
.. math::
Out = Act({XW + b})
When the input is a list of Tensor(or LoDTensor):
.. math::
Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
In the above equation:
* :math:`N`: Number of the input. N equals to len(input) if input is list of Variable.
* :math:`X_i`: The i-th input tensor.
* :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.
* :math:`b`: The bias parameter created by this layer (if needed).
* :math:`Act`: The activation function.
* :math:`Out`: The output Tensor.
.. code-block:: text
Case 1:
Given a single Tensor data_1, and num_flatten_dims = 2:
data_1.data = [[[0.1, 0.2],
[0.3, 0.4]]]
data_1.shape = (1, 2, 2) # 1 is batch_size
out = fluid.layers.fc(input=data_1, size=1, num_flatten_dims=2)
Then output is:
out.data = [[0.83234344], [0.34936576]]
out.shape = (1, 2, 1)
Case 2:
Given a list of Tensor:
data_1.data = [[[0.1, 0.2],
[0.3, 0.4]]]
data_1.shape = (1, 2, 2) # 1 is batch_size
data_2 = [[[0.1, 0.2, 0.3]]]
data_2.shape = (1, 1, 3)
out = fluid.layers.fc(input=[data_1, data_2], size=2)
Then:
out.data = [[0.18669507, 0.1893476]]
out.shape = (1, 2)
Args:
input (Variable|list of Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` or
a list of Tensor(or LoDTensor). The dimensions of the input Tensor is at least 2 and the data
type should be float32 or float64.
size(int): The number of output units in this layer, which also means the feature size of output
Tensor(or LoDTensor).
num_flatten_dims (int): The fc layer can accept an input Tensor with more than
two dimensions. If this happens, the multidimensional tensor will first be flattened
into a 2-D matrix. The parameter :attr:`num_flatten_dims` determines how the input
Tensor is flattened: the first :attr:`num_flatten_dims` (inclusive, index starts from 1)
dimensions will be flatten to form the first dimension of the final matrix (height of
the matrix), and the rest :math:`rank(X) - num\_flatten\_dims` dimensions are flattened to
form the second dimension of the final matrix (width of the matrix). For example, assuming that
X is a 5-dimensional Tensor with a shape [2, 3, 4, 5, 6], and :attr:`num_flatten_dims` = 3.
Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1.
param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the
default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
act (str): Activation to be applied to the output of this layer, such as tanh, softmax,
sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: Tensor or LoDTensor calculated by fc layer. The data type is same with input.
Raises:
ValueError: If dimensions of the input Tensor is less than 2.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# when input is single tensor
data = fluid.data(name="data", shape=[-1, 32], dtype="float32")
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
# when input are multiple tensors
data_1 = fluid.data(name="data_1", shape=[-1, 32], dtype="float32")
data_2 = fluid.data(name="data_2", shape=[-1, 36], dtype="float32")
fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh")
"""
helper = LayerHelper("fc", **locals())
check_type(input, 'input', (list, tuple, Variable), 'fc')
if isinstance(input, (list, tuple)):
for i, input_x in enumerate(input):
check_type(input_x, 'input[' + str(i) + ']', Variable, 'fc')
dtype = helper.input_dtype()
check_dtype(dtype, 'input', ['float16', 'float32', 'float64'], 'fc')
mul_results = []
for input_var, param_attr in helper.iter_inputs_and_params():
input_shape = input_var.shape
if num_flatten_dims == -1:
num_flatten_dims = len(input_shape) - 1
param_shape = [
reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
] + [size]
w = helper.create_parameter(
attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False)
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="mul",
inputs={"X": input_var,
"Y": w},
outputs={"Out": tmp},
attrs={"x_num_col_dims": num_flatten_dims,
"y_num_col_dims": 1})
mul_results.append(tmp)
if len(mul_results) == 1:
pre_bias = mul_results[0]
else:
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="sum",
inputs={"X": mul_results},
outputs={"Out": pre_bias},
attrs={"use_mkldnn": False})
# add bias
pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims)
# add activation
return helper.append_activation(pre_activation)
@deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding")
def embedding(input,
size,
is_sparse=False,
is_distributed=False,
padding_idx=None,
param_attr=None,
dtype='float32'):
"""
:api_attr: Static Graph
**WARING:** This OP will be deprecated in a future release. This OP requires the
last dimension of Tensor shape must be equal to 1. It is recommended to use
fluid. :ref:`api_fluid_embedding` .
The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the
input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
This OP requires the last dimension of Tensor shape must be equal to 1. The shape
of output Tensor is generated by replacing the last dimension of the input Tensor shape
with emb_size.
**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
input is a Tensor. padding_idx = -1
input.data = [[[1], [3]], [[2], [4]], [[4], [127]]]
input.shape = [3, 2, 1]
Given size = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when ids is 127.
Case 2:
input is a LoDTensor with 1-level LoD. padding_idx = 0
input.lod = [[2, 3]]
input.data = [[1], [3], [2], [4], [0]]
input.shape = [5, 1]
Given size = [128, 16]
output is a LoDTensor:
out.lod = [[2, 3]]
out.shape = [5, 16]
out.data = [[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654],
[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]] # padding data
It will pad all-zero data when ids is 0.
Args:
input(Variable): A Tensor or LoDTensor with type int64, which contains the id information.
The last dimension of Tensor shape must be equal to 1. The value of the input id should
satisfy :math:`0<= id < size[0]` .
size(tuple|list): The shape of lookup table parameter. It should have two elements which
indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizer does not support sparse update,
such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
:ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
:ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
In these case, is_sparse must be False. Default: False.
is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
in multi-machine distributed CPU training. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
The local word vector needs to be transformed into numpy format, and the shape of local word
vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
is used to load custom or pre-trained word vectors. See code example 2 for details.
dtype(str|core.VarDesc.VarType): It refers to the data type of output Tensor.
It must be float32 or float64. Default: float32.
Returns:
Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name='x', shape=[None, 1], dtype='int64')
# example 1
emb_1 = fluid.embedding(input=data, size=[128, 64])
# example 2: load custom or pre-trained word vectors
weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format
w_param_attrs = fluid.ParamAttr(
name="emb_weight",
learning_rate=0.5,
initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
trainable=True)
emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32')
"""
helper = LayerHelper('embedding', **locals())
check_variable_and_dtype(input, 'input', ['int64'],
'fluid.layers.embedding')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'fluid.layers.embedding')
if is_distributed:
is_distributed = False
warnings.warn(
"is_distributed is go out of use, `fluid.contrib.layers.sparse_embedding` is your needed"
)
remote_prefetch = True if is_sparse else False
w = helper.create_parameter(
attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)
tmp = helper.create_variable_for_type_inference(dtype)
padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
helper.append_op(
type='lookup_table',
inputs={'Ids': input,
'W': w},
outputs={'Out': tmp},
attrs={
'is_sparse': is_sparse,
'is_distributed': is_distributed,
'remote_prefetch': remote_prefetch,
'padding_idx': padding_idx
})
return tmp
def _pull_sparse(input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True):
"""
**Pull Fleet Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
Fleet lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
table_id(int): the fleet table id of this embedding.
accessor_class(str): the pslib accessor of the table, default is DownpourCtrAccessor.
ctr_label_name(str): the layer name of click.
padding_id(int): the padding id during lookup, default is 0.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
is True.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True)
helper.append_op(
type='pull_sparse',
inputs={'Ids': inputs,
'W': w},
outputs={'Out': outs},
attrs=attrs)
if len(outs) == 1:
return outs[0]
return outs
def _pull_sparse_v2(input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True):
"""
**Pull Fleet Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
Fleet lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
table_id(int): the pslib table id of this embedding.
accessor_class(str): the fleet accessor of the table, default is DownpourCtrAccessor.
ctr_label_name(str): the layer name of click.
padding_id(int): the padding id during lookup, default is 0.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
is True.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse_v2(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True)
helper.append_op(
type='pull_sparse_v2',
inputs={'Ids': inputs,
'W': w},
outputs={'Out': outs},
attrs=attrs)
if len(outs) == 1:
return outs[0]
return outs
def _pull_box_sparse(input, size, dtype='float32'):
"""
**Pull Box Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
BoxPS lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.pull_box_sparse(input=data, size=[11])
"""
helper = LayerHelper('pull_box_sparse', **locals())
if dtype != 'float32':
raise ValueError(
"BoxPS only support float type embedding now, and your type is: " +
dtype)
helper.input_dtype()
inputs = helper.multiple_input()
outs = [
helper.create_variable_for_type_inference(dtype)
for i in range(len(inputs))
]
helper.append_op(
type='pull_box_sparse',
inputs={'Ids': inputs},
outputs={'Out': outs},
attrs={'size': size})
if len(outs) == 1:
return outs[0]
return outs
@templatedoc()
def linear_chain_crf(input, label, param_attr=None, length=None):
"""
:api_attr: Static Graph
Linear Chain CRF.
${comment}
Args:
input(${emission_type}): ${emission_comment}
label(${label_type}): ${label_comment}
Length(${length_type}): ${length_comment}
param_attr(ParamAttr): The attribute of the learnable parameter for transition parameter.
Returns:
output(${emission_exps_type}): ${emission_exps_comment} \n
output(${transition_exps_type}): ${transition_exps_comment} \n
output(${log_likelihood_type}): ${log_likelihood_comment} \n
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
#define net structure, using LodTensor
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input_data = fluid.data(name='input_data', shape=[-1,10], dtype='float32')
label = fluid.data(name='label', shape=[-1,1], dtype='int')
emission= fluid.layers.fc(input=input_data, size=10, act="tanh")
crf_cost = fluid.layers.linear_chain_crf(
input=emission,
label=label,
param_attr=fluid.ParamAttr(
name='crfw',
learning_rate=0.01))
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
#define data, using LoDTensor
a = fluid.create_lod_tensor(np.random.rand(12,10).astype('float32'), [[3,3,4,2]], place)
b = fluid.create_lod_tensor(np.array([[1],[1],[2],[3],[1],[1],[1],[3],[1],[1],[1],[1]]),[[3,3,4,2]] , place)
feed1 = {'input_data':a,'label':b}
loss= exe.run(train_program,feed=feed1, fetch_list=[crf_cost])
print(loss)
#define net structure, using padding
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input_data2 = fluid.data(name='input_data2', shape=[-1,10,10], dtype='float32')
label2 = fluid.data(name='label2', shape=[-1,10,1], dtype='int')
label_length = fluid.data(name='length', shape=[-1,1], dtype='int')
emission2= fluid.layers.fc(input=input_data2, size=10, act="tanh", num_flatten_dims=2)
crf_cost2 = fluid.layers.linear_chain_crf(
input=emission2,
label=label2,
length=label_length,
param_attr=fluid.ParamAttr(
name='crfw',
learning_rate=0.01))
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
#define data, using padding
cc=np.random.rand(4,10,10).astype('float32')
dd=np.random.rand(4,10,1).astype('int64')
ll=np.array([[3],[3],[4],[2]])
feed2 = {'input_data2':cc,'label2':dd,'length':ll}
loss2= exe.run(train_program,feed=feed2, fetch_list=[crf_cost2])
print(loss2)
#[array([[ 7.8902354],
# [ 7.3602567],
# [ 10.004011],
# [ 5.86721 ]], dtype=float32)]
#you can use find_var to get transition parameter.
transition=np.array(fluid.global_scope().find_var('crfw').get_tensor())
print(transition)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'linear_chain_crf')
check_variable_and_dtype(label, 'label', ['int64'], 'linear_chain_crf')
helper = LayerHelper('linear_chain_crf', **locals())
size = input.shape[2] if length else input.shape[1]
transition = helper.create_parameter(
attr=helper.param_attr,
shape=[size + 2, size],
dtype=helper.input_dtype())
alpha = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
emission_exps = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
transition_exps = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
log_likelihood = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
this_inputs = {
"Emission": [input],
"Transition": transition,
"Label": [label]
}
if length:
this_inputs['Length'] = [length]
helper.append_op(
type='linear_chain_crf',
inputs=this_inputs,
outputs={
"Alpha": [alpha],
"EmissionExps": [emission_exps],
"TransitionExps": transition_exps,
"LogLikelihood": log_likelihood
})
return log_likelihood
@templatedoc()
def crf_decoding(input, param_attr, label=None, length=None):
"""
:api_attr: Static Graph
${comment}
Args:
input(${emission_type}): ${emission_comment}
param_attr (ParamAttr|None): To specify the weight parameter attribute.
Default: None, which means the default weight parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
label(${label_type}, optional): ${label_comment}
length(${length_type}, optional): ${length_comment}
Returns:
Variable: ${viterbi_path_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
# LoDTensor-based example
num_labels = 10
feature = fluid.data(name='word_emb', shape=[-1, 784], dtype='float32', lod_level=1)
label = fluid.data(name='label', shape=[-1, 1], dtype='int64', lod_level=1)
emission = fluid.layers.fc(input=feature, size=num_labels)
crf_cost = fluid.layers.linear_chain_crf(input=emission, label=label,
param_attr=fluid.ParamAttr(name="crfw"))
crf_decode = fluid.layers.crf_decoding(input=emission,
param_attr=fluid.ParamAttr(name="crfw"))
# Common tensor example
num_labels, max_len = 10, 20
feature = fluid.data(name='word_emb_pad', shape=[-1, max_len, 784], dtype='float32')
label = fluid.data(name='label_pad', shape=[-1, max_len, 1], dtype='int64')
length = fluid.data(name='length', shape=[-1, 1], dtype='int64')
emission = fluid.layers.fc(input=feature, size=num_labels,
num_flatten_dims=2)
crf_cost = fluid.layers.linear_chain_crf(input=emission, label=label, length=length,
param_attr=fluid.ParamAttr(name="crfw_pad"))
crf_decode = fluid.layers.crf_decoding(input=emission, length=length,
param_attr=fluid.ParamAttr(name="crfw_pad"))
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'crf_decoding')
helper = LayerHelper('crf_decoding', **locals())
transition = helper.get_parameter(param_attr.name)
viterbi_path = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
inputs = {"Emission": [input], "Transition": transition, "Label": label}
if length:
inputs['Length'] = length
helper.append_op(
type='crf_decoding',
inputs=inputs,
outputs={"ViterbiPath": [viterbi_path]})
return viterbi_path
@templatedoc()
def cos_sim(X, Y):
"""
${comment}
Args:
X (Variable): ${x_comment}.
Y (Variable): ${y_comment}.
Returns:
A Variable holding LoDTensor representing the output of cosine(X, Y).
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7], dtype='float32')
y = fluid.data(name='y', shape=[1, 7], dtype='float32')
out = fluid.layers.cos_sim(x, y)
"""
check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim')
check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim')
helper = LayerHelper('cos_sim', **locals())
out = helper.create_variable_for_type_inference(dtype=X.dtype)
xnorm = helper.create_variable_for_type_inference(dtype=X.dtype)
ynorm = helper.create_variable_for_type_inference(dtype=X.dtype)
helper.append_op(
type='cos_sim',
inputs={'X': [X],
'Y': [Y]},
outputs={'Out': [out],
'XNorm': [xnorm],
'YNorm': [ynorm]})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.dropout")
def dropout(x,
dropout_prob,
is_test=False,
seed=None,
name=None,
dropout_implementation="downgrade_in_infer"):
"""
Computes dropout.
Drop or keep each element of `x` independently. Dropout is a regularization
technique for reducing overfitting by preventing neuron co-adaption during
training. The dropout operator randomly sets (according to the given dropout
probability) the outputs of some units to zero, while others are remain
unchanged.
dropout op can be removed from the program to make the program more efficient.
Args:
x (Variable): The input tensor variable. The data type is float16 or float32 or float64.
dropout_prob (float): Probability of setting units to zero.
is_test (bool): A flag indicating whether it is in test phrase or not.
seed (int): A Python integer used to create random seeds. If this
parameter is set to None, a random seed is used.
NOTE: If an integer seed is given, always the same output
units will be dropped. DO NOT use a fixed seed in training.Default: None.
name (str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
dropout_implementation(string): ['downgrade_in_infer'(default)|'upscale_in_train']
1. downgrade_in_infer(default), downgrade the outcome at inference
- train: out = input * mask
- inference: out = input * (1.0 - dropout_prob)
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
2. upscale_in_train, upscale the outcome at training time
- train: out = input * mask / ( 1.0 - dropout_prob )
- inference: out = input
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
Returns:
A Variable holding Tensor representing the dropout, has same shape and data type with `x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="data", shape=[None, 32, 32], dtype="float32")
dropped = fluid.layers.dropout(x, dropout_prob=0.5)
"""
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': dropout_implementation,
}
return attrs
if in_dygraph_mode():
if (seed is None or
seed == 0) and default_main_program().random_seed != 0:
seed = default_main_program().random_seed
_is_test = not _dygraph_tracer()._train_mode
out, mask = core.ops.dropout(
x, 'dropout_prob', dropout_prob, 'is_test', _is_test, 'fix_seed',
seed is not None, 'seed', seed if seed is not None else 0,
'dropout_implementation', dropout_implementation)
return out
helper = LayerHelper('dropout', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'dropout')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
mask = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
attrs = get_attrs(helper.main_program, dropout_prob, is_test, seed)
helper.append_op(
type='dropout',
inputs={'X': [x]},
outputs={'Out': [out],
'Mask': [mask]},
attrs=attrs)
return out
@templatedoc()
def chunk_eval(input,
label,
chunk_scheme,
num_chunk_types,
excluded_chunk_types=None,
seq_length=None):
"""
This operator computes the precision, recall and F1-score for chunk detection.
It is often used in sequence tagging tasks, such as Named Entity Recognition(NER).
For some basics of chunking, please refer to
`Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>`_ .
This operator supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
Here is a NER example for the usage of these tagging schemes:
.. code-block:: python
====== ====== ====== ===== == ============ ===== ===== ===== == =========
Li Ming works at Agricultural Bank of China in Beijing.
====== ====== ====== ===== == ============ ===== ===== ===== == =========
IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC
IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC
IOE I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC
IOBES B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC
====== ====== ====== ===== == ============ ===== ===== ===== == =========
There are three chunk types(named entity types) including PER(person), ORG(organization)
and LOC(location), and we can see that the labels have the form `<tag type>-<chunk type>` .
Since the implementation of this operator actually uses label ids rather than
label strings, to make it work, there should be a way to map label ids to
tag types and chunk types. This operator uses the following way to do mapping:
.. code-block:: python
tag_type = label % num_tag_type
chunk_type = label / num_tag_type
where `num_tag_type` is the num of tag types in the tagging scheme, `num_chunk_type`
is the num of chunk types, and `tag_type` get its value from the following table.
.. code-block:: python
Scheme Begin Inside End Single
plain 0 - - -
IOB 0 1 - -
IOE - 0 1 -
IOBES 0 1 2 3
Accordingly, in the above NER example, if the tagging scheme is IOB and chunk
types are ORG, PER and LOC, then the label ids would be as follows:
.. code-block:: python
B-ORG 0
I-ORG 1
B-PER 2
I-PER 3
B-LOC 4
I-LOC 5
O 6
With which we can map each label id to the corresponding tag type and chunk
type correctly.
Args:
input (Variable): A Tensor or LoDTensor, representing the predicted labels
from the network. When it is a Tensor, its shape would be `[N, M, 1]`,
where `N` stands for batch size, `M` for sequence length; When it is
a LoDTensor, its shape would be `[N, 1]` where `N` stands for the total
sequence lengths in this mini-batch. The data type should be int64.
label (Variable): A Tensor or LoDTensor representing the ground-truth labels.
It should have the same shape, lod and data type as ``input`` .
chunk_scheme (str): Indicate the tagging schemes used here. The value must
be IOB, IOE, IOBES or plain.
num_chunk_types (int): The number of chunk types.
excluded_chunk_types (list, optional): Indicate the chunk types shouldn't
be taken into account. It should be a list of chunk type ids(integer).
Default None.
seq_length(Variable, optional): A 1D Tensor containing the length of each
sequence when ``input`` and ``label`` are Tensor. It needn't be
provided if ``input`` and ``label`` are LoDTensor. Default None.
Returns:
tuple: A tuple including precision, recall, F1-score, chunk number detected, \
chunk number in ground-truth, chunk number correctly detected. Each \
is a Tensor with shape `[1]`. The data type of precision, recall and \
F1-score all is float32, and the others' data type all is int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dict_size = 10000
label_dict_len = 7
sequence = fluid.data(
name='id', shape=[None, 1], lod_level=1, dtype='int64')
embedding = fluid.embedding(
input=sequence, size=[dict_size, 512])
hidden = fluid.layers.fc(input=embedding, size=512)
label = fluid.data(
name='label', shape=[None, 1], lod_level=1, dtype='int64')
crf = fluid.layers.linear_chain_crf(
input=hidden, label=label, param_attr=fluid.ParamAttr(name="crfw"))
crf_decode = fluid.layers.crf_decoding(
input=hidden, param_attr=fluid.ParamAttr(name="crfw"))
fluid.layers.chunk_eval(
input=crf_decode,
label=label,
chunk_scheme="IOB",
num_chunk_types=int((label_dict_len - 1) / 2))
"""
helper = LayerHelper("chunk_eval", **locals())
check_variable_and_dtype(input, 'input', ['int64'], 'chunk_eval')
check_variable_and_dtype(label, 'label', ['int64'], 'chunk_eval')
# prepare output
precision = helper.create_variable_for_type_inference(dtype="float32")
recall = helper.create_variable_for_type_inference(dtype="float32")
f1_score = helper.create_variable_for_type_inference(dtype="float32")
num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64")
num_label_chunks = helper.create_variable_for_type_inference(dtype="int64")
num_correct_chunks = helper.create_variable_for_type_inference(
dtype="int64")
this_input = {"Inference": [input], "Label": [label]}
if seq_length is not None:
this_input["SeqLength"] = [seq_length]
helper.append_op(
type="chunk_eval",
inputs=this_input,
outputs={
"Precision": [precision],
"Recall": [recall],
"F1-Score": [f1_score],
"NumInferChunks": [num_infer_chunks],
"NumLabelChunks": [num_label_chunks],
"NumCorrectChunks": [num_correct_chunks]
},
attrs={
"num_chunk_types": num_chunk_types,
"chunk_scheme": chunk_scheme,
"excluded_chunk_types": excluded_chunk_types or []
})
return (precision, recall, f1_score, num_infer_chunks, num_label_chunks,
num_correct_chunks)
@deprecated(since="2.0.0", update_to="paddle.nn.functional.softmax")
def softmax(input, use_cudnn=False, name=None, axis=-1):
"""
This operator implements the softmax layer. The calculation process is as follows:
1. The dimension :attr:`axis` of the ``input`` will be permuted to the last.
2. Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
second dimension(row length) is the same as the dimension :attr:`axis` of the input
tensor, and the first dimension(column length) is the product of all other
dimensions of the input tensor. For each row of the matrix, the softmax operator
squashes the K-dimensional(K is the width of the matrix, which is also the size
of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a
K-dimensional vector of real values in the range [0, 1] that add up to 1.
3. After the softmax operation is completed, the inverse operations of steps 1 and 2
are performed to restore the two-dimensional matrix to the same dimension as the ``input``.
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
exponential values of all the other dimensions is the output of the softmax
operator.
For each row :math:`i` and each column :math:`j` in the matrix, we have:
.. math::
Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}
Example:
.. code-block:: text
Case 1:
Input:
X.shape = [2, 3, 4]
X.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = -1
Output:
Out.shape = [2, 3, 4]
Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.07232949, 0.19661193, 0.19661193, 0.53444665]],
[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
Case 2:
Input:
X.shape = [2, 3, 4]
X.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = 1
Output:
Out.shape = [2, 3, 4]
Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
[0.01786798, 0.01786798, 0.04661262, 0.04661262],
[0.97555875, 0.97555875, 0.93623955, 0.93623955]],
[[0.00490169, 0.00490169, 0.00490169, 0.00490169],
[0.26762315, 0.26762315, 0.26762315, 0.26762315],
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Args:
input (Variable): The input variable. A multi-dimension ``Tensor`` with type float32 or float64.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \
library is installed. To improve numerical stability, set use_cudnn to \
False by default.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Default: None.
will be named automatically. Default: None.
axis (int, optional): The index of dimension to perform softmax calculations, it should
be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of
input variable. Default: -1. -1 means the last dimension.
Returns:
Variable: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name="input", shape=[-1, 3],dtype="float32")
result = fluid.layers.softmax(data,axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3, 3).astype("float32")
output= exe.run(feed={"input": x},
fetch_list=[result[0]])
print(output)
"""
if in_dygraph_mode():
return core.ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn)
inputs = {"X": [input]}
attrs = {"axis": axis, "use_cudnn": use_cudnn}
helper = LayerHelper('softmax', **locals())
check_variable_and_dtype(input, 'input/x',
['float16', 'float32', 'float64'], 'softmax')
dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="softmax",
inputs={"X": input},
outputs={"Out": softmax_out},
attrs=attrs)
return softmax_out
def conv2d(input,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format="NCHW"):
"""
:api_attr: Static Graph
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW or NHWC format, where N is batch size, C is the number of
channels, H is the height of the feature, and W is the width of the feature.
Filter is in MCHW format, where M is the number of output image channels,
C is the number of input image channels, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input image channels divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a tensor with MCHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Variable): The input is 4-D Tensor with shape [N, C, H, W], the data type
of input is float16 or float32 or float64.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size
is a tuple, it must contain two integers, (filter_size_height,
filter_size_width). Otherwise, filter_size_height = filter_size_width =\
filter_size.
stride (int|tuple): The stride size. It means the stride in convolution.
If stride is a tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension.If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when
`data_format` is `"NCHW"`, `padding` can be in the form `[[0,0], [0,0],
[pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel
points. If dilation is a tuple, it must contain two integers, (dilation_height,
dilation_width). Otherwise, dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv2d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str): Activation type, if it is set to None, activation is not appended.
Default: None
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv2d, whose data type is the
same with input. If act is None, the tensor variable storing the convolution
result, and if act is not None, the tensor variable storing convolution
and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If the channel dimmention of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ShapeError: If the input is not 4-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels * groups.
ShapeError: If the number of output channels is not be divided by groups.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
"""
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'conv2d')
num_channels = input.shape[1]
if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
channel_last = (data_format == "NHWC")
num_channels = input.shape[3] if channel_last else input.shape[1]
if num_channels < 0:
raise ValueError(
"The channel dimmention of the input(%s) should be defined. "
"Received: %s." % (str(input.shape), str(num_channels)))
assert param_attr is not False, "param_attr should not be False here."
l_type = 'conv2d'
if (num_channels == groups and num_filters % num_channels == 0 and
not use_cudnn):
l_type = 'depthwise_conv2d'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError(
"the channel of input must be divisible by groups,"
"received: the channel of input is {}, the shape of input is {}"
", the groups is {}".format(num_channels, input.shape, groups))
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
# padding
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
if utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
else:
padding = utils.convert_to_list(padding, 2, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0]
padding = _update_padding(padding, data_format)
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=l_type,
inputs={
'Input': input,
'Filter': filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'use_mkldnn': False,
'fuse_relu_before_depthwise_conv': False,
"padding_algorithm": padding_algorithm,
"data_format": data_format,
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
return helper.append_activation(pre_act)
def conv3d(input,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format="NCDHW"):
"""
:api_attr: Static Graph
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args:
input (Variable): The input is 5-D Tensor with shape [N, C, D, H, W], the data
type of input is float16 or float32 or float64.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_depth, filter_size_height,
filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
filter_size_width = filter_size.
stride (int|tuple): The stride size. It means the stride in convolution. If stride is a
tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv3d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d, whose data type is
the same with input. If act is None, the tensor variable storing the
convolution result, and if act is not None, the tensor variable storing
convolution and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If the channel dimmention of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ShapeError: If the input is not 5-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels * groups.
ShapeError: If the number of output channels is not be divided by groups.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
conv3d = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu")
"""
l_type = 'conv3d'
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s." % str(data_format))
channel_last = (data_format == "NDHWC")
num_channels = input.shape[4] if channel_last else input.shape[1]
if num_channels < 0:
raise ValueError(
"The channel dimmention of the input(%s) should be defined. "
"Received: %s." % (str(input.shape), str(num_channels)))
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError(
"The number of input channels must be divisible by Attr(groups). "
"Received: number of channels(%s), groups(%s)." %
(str(num_channels), str(groups)))
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 3, 'filter_size')
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
else:
padding = utils.convert_to_list(padding, 3, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0]
padding = _update_padding(padding, data_format)
input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * filter_size[
2] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=l_type,
inputs={
'Input': input,
'Filter': filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'use_mkldnn': False,
"padding_algorithm": padding_algorithm,
"data_format": data_format,
})
if data_format == 'NCDHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
return helper.append_activation(pre_act)
@deprecated(since="2.0.0", update_to="paddle.nn.functional.pool2d")
@templatedoc()
def pool2d(input,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
name=None,
exclusive=True,
data_format="NCHW"):
"""
:alias_main: paddle.nn.functional.pool2d
:alias: paddle.nn.functional.pool2d,paddle.nn.functional.pooling.pool2d
:old_api: paddle.fluid.layers.pool2d
${comment}
Args:
input (Variable): The input tensor of pooling operator which is a 4-D tensor with
shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
`"NHWC"`, where `N` is batch size, `C` is the number of channels,
`H` is the height of the feature, and `W` is the width of the
feature. The data type if float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be a square of an int.
pool_type: ${pooling_type_comment}
pool_stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
it must contain two integers, (pool_stride_Height, pool_stride_Width).
Otherwise, the pool stride size will be a square of an int.
pool_padding (string|int|list|tuple): The pool padding. If `pool_padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If pool padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCHW"`,
`pool_padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Otherwise, the pool padding size will be a square of an int.
global_pooling (bool): ${global_pooling_comment}
use_cudnn (bool): ${use_cudnn_comment}
ceil_mode (bool): ${ceil_mode_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is `true`.
data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `pool_type` is not "max" nor "avg".
ValueError: If `global_pooling` is False and `pool_size` is -1.
TypeError: If `use_cudnn` is not a bool value.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If `pool_padding` is a string, but not "SAME" or "VALID".
ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True.
ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero.
ShapeError: If the input is not a 4-D or 5-D Tensor.
ShapeError: If the dimension of input minus the size of `pool_stride` is not 2.
ShapeError: If the size of `pool_size` and `pool_stride` is not equal.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
# max pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "max",
pool_stride = 1,
global_pooling=False)
# average pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=False)
# global average pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=True)
# Attr(pool_padding) is a list with 4 elements, Attr(data_format) is "NCHW".
out_1 = fluid.layers.pool2d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = [1, 2, 1, 0],
data_format = "NCHW")
# Attr(pool_padding) is a string, Attr(data_format) is "NCHW".
out_2 = fluid.layers.pool2d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = "VALID",
data_format = "NCHW")
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When Attr(global_pooling) is False, Attr(pool_size) must be passed "
"and be a valid value. Received pool_size: %s." % str(pool_size))
if not isinstance(use_cudnn, bool):
raise TypeError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s." % str(use_cudnn))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')
def update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
if utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
else:
padding = utils.convert_to_list(padding, 2, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(pool_padding, str):
pool_padding = pool_padding.upper()
if pool_padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'."
% str(pool_padding))
if pool_padding == "VALID":
padding_algorithm = "VALID"
pool_padding = [0, 0]
if ceil_mode != False:
raise ValueError(
"When Attr(pool_padding) is \"VALID\", Attr(ceil_mode) must be False. "
"Received ceil_mode: True.")
elif pool_padding == "SAME":
padding_algorithm = "SAME"
pool_padding = [0, 0]
pool_padding = update_padding(pool_padding, data_format)
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=op_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": use_cudnn,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
return pool_out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.pool3d")
@templatedoc()
def pool3d(input,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
name=None,
exclusive=True,
data_format="NCDHW"):
"""
:alias_main: paddle.nn.functional.pool3d
:alias: paddle.nn.functional.pool3d,paddle.nn.functional.pooling.pool3d
:old_api: paddle.fluid.layers.pool3d
${comment}
Args:
input (Variable): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of
input tensor is `"NCDHW"` or `"NDHWC"`, where `N` is batch size, `C` is
the number of channels, `D` is the depth of the feature,
`H` is the height of the feature, and `W` is the width
of the feature.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size
is a tuple or list, it must contain three integers,
(pool_size_Depth, pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be the cube of an int.
pool_type (string): ${pooling_type_comment}
pool_stride (string|int|list|tuple)): The pool padding. If `pool_padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If pool stride size is a tuple or list,
it must contain three integers, `[stride_Depth, stride_Height, stride_Width]`.
Otherwise, the pool stride size will be a cube of an int.
pool_padding (int|list|tuple): The pool padding size. If pool padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
global_pooling (bool): ${global_pooling_comment}
use_cudnn (bool): ${use_cudnn_comment}
ceil_mode (bool): ${ceil_mode_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is true.
data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
Variable: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `pool_type` is not "max" nor "avg".
ValueError: If `global_pooling` is False and `pool_size` is -1.
TypeError: If `use_cudnn` is not a bool value.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If `pool_padding` is a string, but not "SAME" or "VALID".
ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True.
ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero.
ShapeError: If the input is not a 4-D or 5-D Tensor.
ShapeError: If the dimension of input minus the size of `pool_stride` is not 2.
ShapeError: If the size of `pool_size` and `pool_stride` is not equal.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
# max pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "max",
pool_stride = 1,
global_pooling=False)
# average pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=False)
# global average pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=True)
# example 1:
# Attr(pool_padding) is a list with 6 elements, Attr(data_format) is "NCDHW".
out_1 = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
pool_padding = [1, 2, 1, 0, 1, 2],
global_pooling = False,
data_format = "NCDHW")
# example 2:
# Attr(pool_padding) is a string, Attr(data_format) is "NCDHW".
out_2 = fluid.layers.pool3d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = "VALID",
global_pooling = False,
data_format = "NCDHW")
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When Attr(global_pooling) is False, Attr(pool_size) must be passed "
"and be a valid value. Received Attr(pool_size): %s." %
str(pool_size))
if not isinstance(use_cudnn, bool):
raise TypeError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s" % str(data_format))
pool_size = utils.convert_to_list(pool_size, 3, 'pool_size')
pool_stride = utils.convert_to_list(pool_stride, 3, 'pool_stride')
def update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, (list, tuple)):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
else:
padding = utils.convert_to_list(padding, 3, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(pool_padding, str):
pool_padding = pool_padding.upper()
if pool_padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'."
% str(pool_padding))
if pool_padding == "VALID":
padding_algorithm = "VALID"
pool_padding = [0, 0, 0]
if ceil_mode != False:
raise ValueError(
"When Attr(pool_padding) is \"VALID\", ceil_mode must be False. "
"Received ceil_mode: True.")
elif pool_padding == "SAME":
padding_algorithm = "SAME"
pool_padding = [0, 0, 0]
pool_padding = update_padding(pool_padding, data_format)
op_type = "pool3d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=op_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": use_cudnn,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
return pool_out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.adaptive_pool2d")
@templatedoc(op_type="pool2d")
def adaptive_pool2d(input,
pool_size,
pool_type="max",
require_index=False,
name=None):
"""
:alias_main: paddle.nn.functional.adaptive_pool2d
:alias: paddle.nn.functional.adaptive_pool2d,paddle.nn.functional.pooling.adaptive_pool2d
:old_api: paddle.fluid.layers.adaptive_pool2d
This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch
size, C is the number of channels, H is the height of the feature, and W is
the width of the feature. Parameters(pool_size) should contain two elements which
represent height and width, respectively. Also the H and W dimensions of output(Out)
is same as Parameter(pool_size). The output tensor shape will be [N, C, pool_size[0], pool_size[1]]
For average adaptive pool2d:
.. math::
hstart &= floor(i * H_{in} / H_{out})
hend &= ceil((i + 1) * H_{in} / H_{out})
wstart &= floor(j * W_{in} / W_{out})
wend &= ceil((j + 1) * W_{in} / W_{out})
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
Args:
input (Variable): The input tensor of pooling operator, which is a 4-D tensor
with shape [N, C, H, W]. The format of input tensor is NCHW,
where N is batch size, C is the number of channels, H is the
height of the feature, and W is the width of the feature.
The data type is float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
pool_type: ${pooling_type_comment}
require_index (bool): If true, the index of max pooling point will be returned along
with outputs. It cannot be set in average pooling type. Default False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: The output tensor of adaptive pooling result. The data type is same
as input tensor.
Raises:
ValueError: 'pool_type' is not 'max' nor 'avg'.
ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'.
ValueError: 'pool_size' should be a list or tuple with length as 2.
Examples:
.. code-block:: python
# average adaptive pool2d
# suppose input data in shape of [N, C, H, W], `pool_size` is [m, n],
# output shape is [N, C, m, n], adaptive pool divide H and W dimensions
# of input data into m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(m):
# for j in range(n):
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool2d(
input=data,
pool_size=[3, 3],
pool_type='avg')
# max adaptive pool2d
# suppose input data in shape of [N, C, H, W], `pool_size` is [m, n],
# output shape is [N, C, m, n], adaptive pool divide H and W dimensions
# of input data into m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(m):
# for j in range(n):
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool2d(
input=data,
pool_size=[3, 3],
pool_type='max')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_pool2d')
check_type(pool_type, 'pool_type', str, 'adaptive_pool2d')
check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool2d')
check_type(require_index, 'require_index', bool, 'adaptive_pool2d')
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if pool_type == "avg" and require_index:
raise ValueError(
"invalid setting 'require_index' true when 'pool_type' is 'avg'.")
pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
if pool_type == "max":
l_type = 'max_pool2d_with_index'
else:
l_type = "pool2d"
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
if pool_type == "max":
mask = helper.create_variable_for_type_inference(dtype)
outputs["Mask"] = mask
helper.append_op(
type=l_type,
inputs={"X": input},
outputs=outputs,
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"adaptive": True,
})
return (pool_out, mask) if require_index else pool_out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.adaptive_pool3d")
@templatedoc(op_type="pool3d")
def adaptive_pool3d(input,
pool_size,
pool_type="max",
require_index=False,
name=None):
"""
:alias_main: paddle.nn.functional.adaptive_pool3d
:alias: paddle.nn.functional.adaptive_pool3d,paddle.nn.functional.pooling.adaptive_pool3d
:old_api: paddle.fluid.layers.adaptive_pool3d
This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch
size, C is the number of channels, D is the depth of the feature, H is the height of
the feature, and W is the width of the feature. Parameters(pool_size) should contain
three elements which represent height and width, respectively. Also the D, H and W
dimensions of output(Out) is same as Parameter(pool_size). The output tensor shape
will be [N, C, pool_size[0], pool_size[1], pool_size[2]]
For average adaptive pool3d:
.. math::
dstart &= floor(i * D_{in} / D_{out})
dend &= ceil((i + 1) * D_{in} / D_{out})
hstart &= floor(j * H_{in} / H_{out})
hend &= ceil((j + 1) * H_{in} / H_{out})
wstart &= floor(k * W_{in} / W_{out})
wend &= ceil((k + 1) * W_{in} / W_{out})
Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
Args:
input (Variable): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of input tensor is NCDHW, where
N is batch size, C is the number of channels, D is the depth of the feature,
H is the height of the feature, and W is the width of the feature.
The data type is float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain three integers, (Depth, Height, Width).
pool_type: ${pooling_type_comment}
require_index (bool): If true, the index of max pooling point will be returned along
with outputs. It cannot be set in average pooling type. Default False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: The output tensor of adaptive pooling result. The data type is same as input tensor.
Raises:
ValueError: 'pool_type' is not 'max' nor 'avg'.
ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'.
ValueError: 'pool_size' should be a list or tuple with length as 2.
Examples:
.. code-block:: python
# average adaptive pool3d
# suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n],
# output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
# of input data into l * m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(l):
# for j in range(m):
# for k in range(n):
# dstart = floor(i * D / l)
# dend = ceil((i + 1) * D / l)
# hstart = floor(j * H / m)
# hend = ceil((j + 1) * H / m)
# wstart = floor(k * W / n)
# wend = ceil((k + 1) * W / n)
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(
name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='avg')
# max adaptive pool3d
# suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n],
# output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
# of input data into l * m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(l):
# for j in range(m):
# for k in range(n):
# dstart = floor(i * D / l)
# dend = ceil((i + 1) * D / l)
# hstart = floor(j * H / m)
# hend = ceil((j + 1) * H / m)
# wstart = floor(k * W / n)
# wend = ceil((k + 1) * W / n)
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(
name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='max')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_pool3d')
check_type(pool_type, 'pool_type', str, 'adaptive_pool3d')
check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool3d')
check_type(require_index, 'require_index', bool, 'adaptive_pool3d')
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if pool_type == "avg" and require_index:
raise ValueError(
"invalid setting 'require_index' true when 'pool_type' is 'avg'.")
pool_size = utils.convert_to_list(pool_size, 3, 'pool_size')
if pool_type == "max":
l_type = 'max_pool3d_with_index'
else:
l_type = "pool3d"
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
if pool_type == "max":
mask = helper.create_variable_for_type_inference(dtype)
outputs["Mask"] = mask
helper.append_op(
type=l_type,
inputs={"X": input},
outputs=outputs,
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"adaptive": True,
})
return (pool_out, mask) if require_index else pool_out
def batch_norm(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
in_place=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False):
"""
:api_attr: Static Graph
**Batch Normalization Layer**
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
moving\_mean = moving\_mean * momentum + mini-batch\_mean * (1. - momentum) \\\\
moving\_var = moving\_var * momentum + mini-batch\_var * (1. - momentum)
moving_mean is global mean and moving_var is global variance.
When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
They are global (or running) statistics. (It usually got from the
pre-trained model.)
The training and testing (or inference) have the same behavior:
.. math::
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta
Note:
if build_strategy.sync_batch_norm=True, the batch_norm in network will use
sync_batch_norm automatically.
`is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.
Args:
input(Variable): The rank of input variable can be 2, 3, 4, 5. The data type
is float16 or float32 or float64.
act(string, Default None): Activation type, linear|relu|prelu|...
is_test (bool, Default False): A flag indicating whether it is in
test phrase or not.
momentum(float|Variable, Default 0.9): The value used for the moving_mean and
moving_var computation. This should be a float number or a Variable with
shape [1] and data type as float32. The updated formula is:
:math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
:math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
Default is 0.9.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
in_place(bool, Default False): Make the input and output of batch norm reuse memory.
name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
Usually name is no need to set and None by default.
moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
is set to None, batch_norm will save global mean with a random name, otherwise, batch_norm
will save global mean with the string.
moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
If it is set to None, batch_norm will save global variance with a random name, otherwise, batch_norm
will save global variance with the string.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
average when model average is enabled.
use_global_stats(bool, Default False): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period.
Returns:
A Variable holding Tensor which is the result after applying batch normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.batch_norm(input=hidden1)
.. code-block:: python
# batch_norm with momentum as Variable
import paddle.fluid as fluid
import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler
def get_decay_momentum(momentum_init, decay_steps, decay_rate):
global_step = lr_scheduler._decay_step_counter()
momentum = fluid.layers.create_global_var(
shape=[1],
value=float(momentum_init),
dtype='float32',
# set persistable for save checkpoints and resume
persistable=True,
name="momentum")
div_res = global_step / decay_steps
decayed_momentum = momentum_init * (decay_rate**div_res)
fluid.layers.assign(decayed_momentum, momentum)
return momentum
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
momentum = get_decay_momentum(0.9, 1e5, 0.9)
hidden2 = fluid.layers.batch_norm(input=hidden1, momentum=momentum)
"""
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
helper = LayerHelper('batch_norm', **locals())
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'batch_norm')
dtype = helper.input_dtype()
has_reserve_space = False
if data_layout == 'NHWC':
flag = os.environ.get('FLAGS_cudnn_batchnorm_spatial_persistent')
if flag is not None and flag.lower() in ['true', '1']:
has_reserve_space = True
# use fp32 for bn parameter
if dtype == core.VarDesc.VarType.FP16:
dtype = core.VarDesc.VarType.FP32
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
mean.stop_gradient = True
variance = helper.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
variance.stop_gradient = True
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance out share the same memory
variance_out = variance
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
reserve_space = None
if has_reserve_space:
reserve_space = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.FP16, stop_gradient=True)
batch_norm_out = input if in_place else \
helper.create_variable_for_type_inference(dtype)
inputs = {
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
}
attrs = {
"epsilon": epsilon,
"is_test": is_test,
"data_layout": data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats
}
if isinstance(momentum, Variable):
inputs['MomemtumTensor'] = momentum
else:
attrs['momentum'] = momentum
outputs = {
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
}
if reserve_space is not None:
outputs["ReserveSpace"] = reserve_space
helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)
return helper.append_activation(batch_norm_out)
def inplace_abn(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False,
act_alpha=1.0):
"""
**In-place Activation Batch Normalization Layer**
This layer calculates batch normalization and activation with in-place memory.
For batch normalization calculations, see `fluid.layers.batch_norm`.
For in-place activation batch normalization, see `In-Place Activated BatchNorm for
Memory-Optimized Training of DNNs <https://arxiv.org/abs/1712.02616>`_
`inplace_abn` only support activation type as `None`, `identity`, `leaky_relu`,
`elu` currently.
`inplace_abn` only support data type as `float32`, `float64` currently.
Note:
if build_strategy.sync_batch_norm=True, the batch_norm in network will use
sync_batch_norm automatically.
`is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.
Args:
input(Variable): The rank of input variable can be 2, 3, 4, 5. The data type
is float16 or float32 or float64.
act(string, Default None): Activation type, linear|relu|prelu|...
is_test (bool, Default False): A flag indicating whether it is in
test phrase or not.
momentum(float|Variable, Default 0.9): The value used for the moving_mean and
moving_var computation. This should be a float number or a Variable with
shape [1] and data type as float32. The updated formula is:
:math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
:math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
Default is 0.9.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of inplace_abn. If it is set to None or one attribute of ParamAttr, inplace_abn
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of inplace_abn.
If it is set to None or one attribute of ParamAttr, inplace_abn
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
Usually name is no need to set and None by default.
moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
is set to None, inplace_abn will save global mean with a random name, otherwise, inplace_abn
will save global mean with the string.
moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
If it is set to None, inplace_abn, will save global variance with a random name, otherwise, inplace_abn
will save global variance with the string.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
average when model average is enabled.
use_global_stats(bool, Default False): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period.
act_alpha(float, Default 1.0): when activation is in ['elu', 'identity', 'leaky_relu'],
inplace activative batch normalization will be used, and alpha parameter for activation
can be given by this parameter.
Returns:
A Variable holding Tensor which is the result after applying batch normalization and activation on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.inplace_abn(input=hidden1)
hidden3 = fluid.layers.inplace_abn(input=hidden2, act='leaky_relu', act_alpha=0.2)
"""
assert act in [None, 'identity', 'leaky_relu', 'elu'], \
"inplace_abn only support act as None, 'identity', " \
"'leaky_relu', 'elu' currently"
assert bias_attr is not False, "bias_attr should not be False in inplace_abn."
helper = LayerHelper('inplace_abn', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'inplace_abn')
dtype = helper.input_dtype()
has_reserve_space = False
if data_layout == 'NHWC':
flag = os.environ.get('FLAGS_cudnn_batchnorm_spatial_persistent')
if flag is not None and flag.lower() in ['true', '1']:
has_reserve_space = True
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
mean.stop_gradient = True
variance = helper.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
variance.stop_gradient = True
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance out share the same memory
variance_out = variance
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
reserve_space = None
if has_reserve_space:
reserve_space = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.FP16, stop_gradient=True)
batch_norm_out = input
inputs = {
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
}
attrs = {
"epsilon": epsilon,
"is_test": is_test,
"data_layout": data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats,
"activation": act,
"alpha": act_alpha,
}
if isinstance(momentum, Variable):
inputs['MomemtumTensor'] = momentum
else:
attrs['momentum'] = momentum
outputs = {
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
}
if reserve_space is not None:
outputs["ReserveSpace"] = reserve_space
helper.append_op(
type="inplace_abn", inputs=inputs, outputs=outputs, attrs=attrs)
return batch_norm_out
def instance_norm(input,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
name=None):
"""
:api_attr: Static Graph
**Instance Normalization Layer**
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
DataLayout: NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Instance Normalization: The Missing Ingredient for
Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW} x_i \\qquad &//\\
\\ mean\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Note:
`H` means height of feature map, `W` means width of feature map.
Args:
input(variable): The rank of input variable can be 2, 3, 4, 5.
The data type is float32 or float64.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None|bool, optional): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. If the param_attr is set to False, instance_norm will not create param_attr.
Default: None.
bias_attr(ParamAttr|None|bool, optional): The parameter attribute for the bias of instance_norm.
If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
If the bias_attr is set to False, instance_norm will not create bias_attr.
Default: None.
name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
A Variable holding Tensor which is the result after applying instance normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.instance_norm(input=hidden1)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'instance_norm')
if param_attr is False:
assert bias_attr is False, "param_attr and bias_attr must be set to Fasle at the same time in instance_norm"
helper = LayerHelper('instance_norm', **locals())
dtype = helper.input_dtype()
# use fp32 for in parameter
if dtype == core.VarDesc.VarType.FP16:
dtype = core.VarDesc.VarType.FP32
input_shape = input.shape
channel_num = input_shape[1]
param_shape = [channel_num]
if param_attr != False and bias_attr != False:
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr,
shape=param_shape,
dtype=dtype,
is_bias=True,
default_initializer=Constant(0.0))
# create output
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
instance_norm_out = helper.create_variable_for_type_inference(dtype)
inputs = {"X": input}
if param_attr != False and bias_attr != False:
inputs["Scale"] = scale
inputs["Bias"] = bias
helper.append_op(
type="instance_norm",
inputs=inputs,
outputs={
"Y": instance_norm_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
},
attrs={"epsilon": epsilon, })
return instance_norm_out
def data_norm(input,
act=None,
epsilon=1e-05,
param_attr=None,
data_layout='NCHW',
in_place=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
slot_dim=-1,
sync_stats=False,
summary_decay_rate=0.9999999,
enable_scale_and_shift=False):
"""
:api_attr: Static Graph
**Data Normalization Layer**
This op can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Args:
input(variable): The input variable which is a LoDTensor.
act(string, Default None): Activation type, linear|relu|prelu|...
epsilon(float, Default 1e-05):
param_attr(ParamAttr): The parameter attribute for Parameter `scale`.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
in_place(bool, Default False): Make the input and output of batch norm reuse memory.
name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically.
moving_mean_name(string, Default None): The name of moving_mean which store the global Mean.
moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance
should do model average when model average is enabled.
slot_dim(int): The embedding dimension of one slot. Slot is a set of one specific feature. In pslib mode, we
distinguish feature ids by slot and pull their embeddings from parameter server (pslib). The first
place of the embedding is the historical show number (occurence time of this feature id with a label 0).
If the input of this op is concated by slot-wise embeddings, and the show number is zero when this slot
is new or empty, the normalization result may be impractical. To avoid this, we add slot_dim to locate
the show number and judge if the show number is zero. If so, we choose to skip normalization on this
embedding.
sync_stats(bool, Default False): When running with multiple GPU cards, using allreduce to sync the
summary messages.
summary_decay_rate(float, Default 0.9999999): The decay rate when updating summary.
enable_scale_and_shift(bool, Default False): do scale&shift after normalization.
Returns:
Variable: A tensor variable which is the result after applying data normalization on the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
hidden1 = fluid.data(name="hidden1", shape=[64, 200])
hidden2 = fluid.layers.data_norm(name="hidden2", input=hidden1)
"""
helper = LayerHelper('data_norm', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
batch_size_default = 1e4
batch_sum_default = 0.0
batch_square_sum_default = 1e4
scale_w_default = 1.0
bias_default = 0.0
if param_attr and isinstance(param_attr, dict):
batch_size_default = param_attr.get("batch_size", 1e4)
batch_sum_default = param_attr.get("batch_sum", 0.0)
batch_square_sum_default = param_attr.get("batch_square", 1e4)
if enable_scale_and_shift:
scale_w_default = param_attr.get("scale_w", 1.0)
bias_default = param_attr.get("bias", 0.0)
# create scale and shift(bias) when enable_scale_and_shift is True
if name == None:
name = "dn"
if enable_scale_and_shift:
scale_w = helper.create_parameter(
attr=ParamAttr(
name=name + '.scale_w',
initializer=Constant(value=float(scale_w_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
bias = helper.create_parameter(
attr=ParamAttr(
name=name + '.bias',
initializer=Constant(value=float(bias_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
# create parameter
batch_size = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_size',
initializer=Constant(value=float(batch_size_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
batch_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_sum',
initializer=Constant(value=float(batch_sum_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
batch_square_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_square_sum',
initializer=Constant(value=float(batch_square_sum_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
means = helper.create_variable(dtype=dtype, stop_gradient=True)
scales = helper.create_variable(dtype=dtype, stop_gradient=True)
data_norm_out = input if in_place else helper.create_variable(dtype=dtype)
inputs = {
"X": input,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum
}
attrs = {
"epsilon": epsilon,
"sync_stats": sync_stats,
"summary_decay_rate": summary_decay_rate,
}
if slot_dim > 0:
attrs["slot_dim"] = slot_dim
if enable_scale_and_shift:
attrs["enable_scale_and_shift"] = enable_scale_and_shift
if enable_scale_and_shift:
inputs["scale_w"] = scale_w
inputs["bias"] = bias
helper.append_op(
type="data_norm",
inputs=inputs,
outputs={
"Y": data_norm_out,
"Means": means,
"Scales": scales,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum
},
attrs=attrs)
return helper.append_activation(data_norm_out)
@templatedoc()
def layer_norm(input,
scale=True,
shift=True,
begin_norm_axis=1,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
name=None):
"""
:api_attr: Static Graph
**Layer Normalization Layer**
The API implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
The formula is as follows:
.. math::
\\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
\\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
- :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
- :math:`H`: the number of hidden units in a layers
- :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
- :math:`g`: the trainable scale parameter.
- :math:`b`: the trainable bias parameter.
Args:
input(Variable): A multi-dimension ``Tensor`` , and the data type is float32 or float64.
scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
normalization. Default: True.
shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
normalization. Default: True.
begin_norm_axis(int, optional): The normalization will be performed along
dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.
Default: 1.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as scale. The
:attr:`param_attr` is initialized as 1 if it is added. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as bias. The
:attr:`bias_attr` is initialized as 0 if it is added. Default: None.
act(str, optional): Activation to be applied to the output of layer normalization.
Default: None.
name(str): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: ``Tensor`` indicating the normalized result, the data type is the same as ``input`` , and the return dimension is the same as ``input`` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x = fluid.data(name='x', shape=[-1, 32, 32], dtype='float32')
hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
np_x = np.random.random(size=(8, 3, 32, 32)).astype('float32')
output = exe.run(feed={"x": np_x}, fetch_list = [hidden1])
print(output)
"""
assert in_dygraph_mode(
) is not True, "please use LayerNorm instead of layer_norm in dygraph mode!"
helper = LayerHelper('layer_norm', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'layer_norm')
dtype = helper.input_dtype()
# create intput and parameters
inputs = {'X': input}
input_shape = input.shape
param_shape = [reduce(lambda x, y: x * y, input_shape[begin_norm_axis:])]
if scale:
assert param_attr is not False, "param_attr should not be False when using scale."
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
inputs['Scale'] = scale
else:
if param_attr:
warnings.warn("param_attr is only available with scale is True.")
if shift:
assert bias_attr is not False, "bias_attr should not be False when using shift."
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
inputs['Bias'] = bias
else:
if bias_attr:
warnings.warn("bias_attr is only available with shift is True.")
# create output
mean_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
layer_norm_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="layer_norm",
inputs=inputs,
outputs={
"Y": layer_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={"epsilon": epsilon,
"begin_norm_axis": begin_norm_axis})
return helper.append_activation(layer_norm_out)
@templatedoc()
def group_norm(input,
groups,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
data_layout='NCHW',
name=None):
"""
:api_attr: Static Graph
**Group Normalization Layer**
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
Parameters:
input(Variable): 4-D Tensor, the data type is float32 or float64.
groups(int): The number of groups that divided from channels, the data type
is int32.
epsilon(float, optional): The small value added to the variance to prevent
division by zero, the data type is float32. Default: 1e-05.
param_attr(ParamAttr|bool, optional): ParamAttr object that specifies weight parameter
attribute. If a bool type, only False is supported, which means there is no weight parameter.
Default: None, the default weight parameter attribute is used. For more information, please
refer to :ref:`api_guide_ParamAttr` .
bias_attr(ParamAttr|bool, optional): ParamAttr object that specifies bias parameter
attribute. If a bool type, only False is supported, which means there is no bias parameter.
Default: None, the default bias parameter attribute is used. For more information, please
refer to :ref:`api_guide_ParamAttr` .
act(str, optional): Activation to be applied to the output of group normalization.
data_layout(str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name (str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A 4-D Tensor has same data type and data format with `input`.
Raises:
ValueError: If `data_layout` is neither 'NCHW' nor 'NHWC'.
ValueError: If `groups` is greater than the number of input channels.
ValueError: If `groups` is less than 1.
ShapeError: If the param_attr(Scale) is not 1-D Tensor.
ShapeError: If the param_attr(Scale)'s first dimension size is not equal to the input channels.
ShapeError: If the bias_attr(Bias) is not 1-D Tensor.
ShapeError: If the bias_attr(Bias)'s first dimension size is not equal to the input channels.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 8, 32, 32], dtype='float32')
x = fluid.layers.group_norm(input=data, groups=4)
"""
helper = LayerHelper('group_norm', **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'group_norm')
# create intput and parameters
inputs = {'X': input}
input_shape = input.shape
if data_layout != 'NCHW' and data_layout != 'NHWC':
raise ValueError(
"Param(data_layout) of Op(fluid.layers.group_norm) got wrong value: received "
+ data_layout + " but only NCHW or NHWC supported.")
channel_num = input_shape[1] if data_layout == 'NCHW' else input_shape[-1]
param_shape = [channel_num]
if param_attr:
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
inputs['Scale'] = scale
if bias_attr:
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
inputs['Bias'] = bias
# create output
mean_out = helper.create_variable(dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable(dtype=dtype, stop_gradient=True)
group_norm_out = helper.create_variable(dtype=dtype)
helper.append_op(
type="group_norm",
inputs=inputs,
outputs={
"Y": group_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={
"epsilon": epsilon,
"groups": groups,
"data_layout": data_layout
})
return helper.append_activation(group_norm_out)
@templatedoc()
def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None):
"""
:api_attr: Static Graph
**Spectral Normalization Layer**
This operation calculates the spectral normalization value of weight parameters of
fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
Parameters. Output tensor will be in same shape with input tensor.
Calculations are showed as follows.
Step 1:
Generate vector U in shape of [H], and V in shape of [W].
While H is the :attr:`dim` th dimension of the input weights,
and W is the product result of remaining dimensions.
Step 2:
:attr:`power_iters` should be a positive integer, do following
calculations with U and V for :attr:`power_iters` rounds. Calculations
as follows:
.. math::
\mathbf{v} := \\frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}
\mathbf{u} := \\frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}
Step 3:
Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.
.. math::
\sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}
\mathbf{W} = \\frac{\mathbf{W}}{\sigma(\mathbf{W})}
Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .
Args:
weight(${weight_type}): ${weight_comment}
dim(int): ${dim_comment}
power_iters(int): ${power_iters_comment}
eps(float): ${eps_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: A tensor variable of weight parameters after spectral normalization.
The data type and shape is same as input tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
weight = fluid.data(name='weight', shape=[2, 8, 32, 32], dtype='float32')
x = fluid.layers.spectral_norm(weight=weight, dim=1, power_iters=2)
"""
helper = LayerHelper('spectral_norm', **locals())
check_variable_and_dtype(weight, 'weight', ['float32', 'float64'],
'spectral_norm')
check_type(dim, 'dim', int, 'spectral_norm')
check_type(power_iters, 'power_iters', int, 'spectral_norm')
check_type(eps, 'eps', float, 'spectral_norm')
dtype = weight.dtype
# create intput and parameters
inputs = {'Weight': weight}
input_shape = weight.shape
h = input_shape[dim]
w = np.prod(input_shape) // h
u = helper.create_parameter(
attr=ParamAttr(),
shape=[h],
dtype=dtype,
default_initializer=Normal(0., 1.))
u.stop_gradient = True
inputs['U'] = u
v = helper.create_parameter(
attr=ParamAttr(),
shape=[w],
dtype=dtype,
default_initializer=Normal(0., 1.))
inputs['V'] = v
v.stop_gradient = True
# create output
out = helper.create_variable(dtype=dtype)
helper.append_op(
type="spectral_norm",
inputs=inputs,
outputs={"Out": out, },
attrs={
"dim": dim,
"power_iters": power_iters,
"eps": eps,
})
return out
def conv2d_transpose(input,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format='NCHW'):
"""
:api_attr: Static Graph
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCHW or NHWC format. Where N is batch size, C is the number of channels,
H is the height of the feature, and W is the width of the feature.
Parameters(dilations, strides, paddings) are two elements. These two elements
represent height and width, respectively. The details of convolution transpose
layer, please refer to the following explanation and references
`therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a 4-D Tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a 4-D Tensor with MCHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, a 4-D Tensor with data format 'NCHW' or 'NHWC', the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom + dilations[0] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - pad_width_left - pad_width_right + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ] \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] ]
Note:
The conv2d_transpose can be seen as the backward of the conv2d. For conv2d,
when stride > 1, conv2d maps multiple input shape to the same output shape,
so for conv2d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, W_{out} = W^\prime_{out}`;
else, the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[0]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[1]`,
conv2d_transpose can compute the kernel size automatically.
Args:
input(Variable): 4-D Tensor with [N, C, H, W] or [N, H, W, C] format,
its data type is float32 or float64.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain two integers, (image_height, image_width). None if use
filter_size, padding, and stride to calculate output_size.
If output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None. output_size and filter_size
should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_height, filter_size_width).
Otherwise, filter_size_height = filter_size_width = filter_size. None if
use output size to calculate filter_size. Default: None. filter_size and
output_size should not be None at the same time.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively adds
`dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a
string, either 'VALID' or 'SAME' supported, which is the padding algorithm.
If `padding` is a tuple or list, it could be in three forms:
`[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and
when `data_format` is `'NCHW'`,
`padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NHWC'`, `padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain two integers, (dilation_height, dilation_width).
Otherwise, dilation_height = dilation_width = dilation. Default: dilation = 1.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_height, filter_size_width).
Otherwise, filter_size_height = filter_size_width = filter_size. None if
use output size to calculate filter_size. Default: None.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups = 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv2d_transpose, whose
data type is the same with input and shape is (num_batches, channels, out_h,
out_w) or (num_batches, out_h, out_w, channels). If act is None, the tensor variable
storing the transposed convolution result, and if act is not None, the
tensor variable storing transposed convolution and non-linearity activation
result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ValueError: If `output_size` and filter_size are None at the same time.
ShapeError: If the input is not 4-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels.
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3)
"""
assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(fluid.layers.conv2d_transpose) got wrong value: received "
+ data_format + " but only NCHW or NHWC supported.")
input_channel = input.shape[1] if data_format == 'NCHW' else input.shape[-1]
op_type = 'conv2d_transpose'
if (input_channel == groups and num_filters == input_channel and
not use_cudnn):
op_type = 'depthwise_conv2d_transpose'
helper = LayerHelper(op_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv2d_transpose must be Variable")
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
else:
padding = utils.convert_to_list(padding, 2, 'padding')
padding = [padding[0], padding[0], padding[1], padding[1]]
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0, 0]
padding = _update_padding(padding, data_format)
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = [output_size, output_size]
h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1]
w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2]
filter_size_h = (output_size[0] - (h_in - 1) * stride[0] + padding[0] +
padding[1] - 1) // dilation[0] + 1
filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + padding[2] +
padding[3] - 1) // dilation[1] + 1
filter_size = [filter_size_h, filter_size_w]
else:
filter_size = utils.convert_to_list(filter_size, 2,
'conv2d_transpose.filter_size')
if len(padding) == 4 and utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 2, 'output_size')
else:
raise ValueError("output_size should be int, list[int] or tuple[int]")
groups = 1 if groups is None else groups
filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'Input': [input],
'Filter': [img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': output_size,
'strides': stride,
'paddings': padding,
'padding_algorithm': padding_algorithm,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'data_format': data_format
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
out = helper.append_activation(pre_act)
return out
def conv3d_transpose(input,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format='NCDHW'):
"""
:api_attr: Static Graph
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW or NDHWC format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a Tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a Tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\
D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[2] ]
Note:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Args:
input(Variable): The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C], the data type
of input is float32 or float64.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain three integers, (image_depth, image_height, image_width). This
parameter only works when filter_size is None. If output_size and filter_size are
specified at the same time, They should follow the formula above. Default: None.
Output_size and filter_size should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_depth, filter_size_height,
filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
filter_size_width = filter_size. None if use output size to
calculate filter_size. Default: None. filter_size and output_size should not be
None at the same time.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively
adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `'NCDHW'`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NDHWC'`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
Default: stride = 1.
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups=1
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d_transpose, whose data
type is the same with input and shape is (num_batches, channels, out_d, out_h,
out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor
variable storing the transposed convolution result, and if act is not None, the tensor
variable storing transposed convolution and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ValueError: If `output_size` and filter_size are None at the same time.
ShapeError: If the input is not 5-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels.
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
conv3d_transpose = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3)
"""
assert param_attr is not False, "param_attr should not be False in conv3d_transpose."
if data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Param(data_format) of Op(fluid.layers.conv3d_transpose) got wrong value: received "
+ data_format + " but only NCDHW or NDHWC supported.")
l_type = "conv3d_transpose"
helper = LayerHelper(l_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv3d_transpose must be Variable")
input_channel = input.shape[1] if data_format == 'NCDHW' else input.shape[
-1]
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
else:
padding = utils.convert_to_list(padding, 3, 'padding')
padding = [
padding[0], padding[0], padding[1], padding[1], padding[2],
padding[2]
]
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0, 0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0, 0, 0, 0]
padding = _update_padding(padding, data_format)
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = [output_size, output_size, output_size]
d_in = input.shape[2] if data_format == 'NCDHW' else input.shape[1]
h_in = input.shape[3] if data_format == 'NCDHW' else input.shape[2]
w_in = input.shape[4] if data_format == 'NCDHW' else input.shape[3]
filter_size_d = (output_size[0] - (d_in - 1) * stride[0] + padding[0] +
padding[1] - 1) // dilation[0] + 1
filter_size_h = (output_size[1] - (h_in - 1) * stride[1] + padding[2] +
padding[3] - 1) // dilation[1] + 1
filter_size_w = (output_size[2] - (w_in - 1) * stride[2] + padding[4] +
padding[5] - 1) // dilation[2] + 1
filter_size = [filter_size_d, filter_size_h, filter_size_w]
else:
filter_size = utils.convert_to_list(filter_size, 3,
'conv3d_transpose.filter_size')
if len(padding) == 6 and utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 3, 'output_size')
else:
raise ValueError("output_size should be int, list[int] or tuple[int]")
groups = 1 if groups is None else groups
filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
if data_format == 'NCDHW':
data_format = 'NCHW'
if data_format == 'NDHWC':
data_format = 'NHWC'
pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=l_type,
inputs={'Input': [input],
'Filter': [img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': output_size,
'strides': stride,
'paddings': padding,
'padding_algorithm': padding_algorithm,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'data_format': data_format
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
out = helper.append_activation(pre_act)
return out
def reduce_sum(input, dim=None, keep_dim=False, name=None):
"""
:alias_main: paddle.reduce_sum
:alias: paddle.reduce_sum,paddle.tensor.reduce_sum,paddle.tensor.math.reduce_sum
:old_api: paddle.fluid.layers.reduce_sum
Computes the sum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the sum is performed. If
:attr:`None`, sum all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of summation operation on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_sum(x) # [3.5]
fluid.layers.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6]
fluid.layers.reduce_sum(x, dim=-1) # [1.9, 1.6]
fluid.layers.reduce_sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1, 2], [3, 4]],
# [[5, 6], [7, 8]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_sum(y, dim=[1, 2]) # [10, 26]
fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20]
"""
if dim is not None and not isinstance(dim, list):
dim = [dim]
if in_dygraph_mode():
reduce_all = True if dim == None or dim == [] or len(dim) == len(
input.shape) else False
dim = dim if dim != None and dim != [] else [0]
return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
attrs = {
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True
if dim == None or dim == [] or len(dim) == len(input.shape) else False
}
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_sum',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
@deprecated(since="2.0.0", update_to="paddle.mean")
def reduce_mean(input, dim=None, keep_dim=False, name=None):
"""
Computes the mean of the input tensor's elements along the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimension along which the mean is computed. If
`None`, compute the mean over all elements of :attr:`input`
and return a variable with a single element, otherwise it
must be in the range :math:`[-rank(input), rank(input))`. If
:math:`dim[i] < 0`, the dimension to reduce is
:math:`rank(input) + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of average on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_mean(x) # [0.4375]
fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8]
fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4]
fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_mean(y, dim=[1, 2]) # [2.5, 6.5]
fluid.layers.reduce_mean(y, dim=[0, 1]) # [4.0, 5.0]
"""
return paddle.mean(x=input, axis=dim, keepdim=keep_dim, name=name)
def reduce_max(input, dim=None, keep_dim=False, name=None):
"""
:alias_main: paddle.reduce_max
:alias: paddle.reduce_max,paddle.tensor.reduce_max,paddle.tensor.math.reduce_max
:old_api: paddle.fluid.layers.reduce_max
Computes the maximum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimension along which the maximum is computed.
If :attr:`None`, compute the maximum over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of maximum on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_max(x) # [0.9]
fluid.layers.reduce_max(x, dim=0) # [0.2, 0.3, 0.6, 0.9]
fluid.layers.reduce_max(x, dim=-1) # [0.9, 0.7]
fluid.layers.reduce_max(x, dim=1, keep_dim=True) # [[0.9], [0.7]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_max(y, dim=[1, 2]) # [4.0, 8.0]
fluid.layers.reduce_max(y, dim=[0, 1]) # [7.0, 8.0]
"""
helper = LayerHelper('reduce_max', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_max',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] or
len(dim) == len(input.shape) else False
})
return out
def reduce_min(input, dim=None, keep_dim=False, name=None):
"""
:alias_main: paddle.reduce_min
:alias: paddle.reduce_min,paddle.tensor.reduce_min,paddle.tensor.math.reduce_min
:old_api: paddle.fluid.layers.reduce_min
Computes the minimum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the minimum is computed.
If :attr:`None`, compute the minimum over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, result of minimum on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_min(x) # [0.1]
fluid.layers.reduce_min(x, dim=0) # [0.1, 0.2, 0.5, 0.7]
fluid.layers.reduce_min(x, dim=-1) # [0.2, 0.1]
fluid.layers.reduce_min(x, dim=1, keep_dim=True) # [[0.2], [0.1]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_min(y, dim=[1, 2]) # [1.0, 5.0]
fluid.layers.reduce_min(y, dim=[0, 1]) # [1.0, 2.0]
"""
helper = LayerHelper('reduce_min', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_min',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] or
len(dim) == len(input.shape) else False
})
return out
def reduce_prod(input, dim=None, keep_dim=False, name=None):
"""
:alias_main: paddle.reduce_prod
:alias: paddle.reduce_prod,paddle.tensor.reduce_prod,paddle.tensor.math.reduce_prod
:old_api: paddle.fluid.layers.reduce_prod
Computes the product of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (int|list|tuple, optional): The dimensions along which the product is performed. If
:attr:`None`, multiply all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, result of product on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_prod(x) # [0.0002268]
fluid.layers.reduce_prod(x, dim=0) # [0.02, 0.06, 0.3, 0.63]
fluid.layers.reduce_prod(x, dim=-1) # [0.027, 0.0084]
fluid.layers.reduce_prod(x, dim=1,
keep_dim=True) # [[0.027], [0.0084]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_prod(y, dim=[1, 2]) # [24.0, 1680.0]
fluid.layers.reduce_prod(y, dim=[0, 1]) # [105.0, 384.0]
"""
helper = LayerHelper('reduce_prod', **locals())
if dim is not None and not isinstance(dim, list):
if isinstance(dim, tuple):
dim = list(dim)
elif isinstance(dim, int):
dim = [dim]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".
format(type(dim)))
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_prod')
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_prod',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] or
len(dim) == len(input.shape) else False
})
return out
def reduce_all(input, dim=None, keep_dim=False, name=None):
"""
:alias_main: paddle.reduce_all
:alias: paddle.reduce_all,paddle.tensor.reduce_all,paddle.tensor.logic.reduce_all
:old_api: paddle.fluid.layers.reduce_all
This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. The default value is None.
Returns:
Variable, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# x is a bool Tensor variable with following elements:
# [[True, False]
# [True, True]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
x = layers.cast(x, 'bool')
out = layers.reduce_all(x) # False
out = layers.reduce_all(x, dim=0) # [True, False]
out = layers.reduce_all(x, dim=-1) # [False, True]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = layers.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all')
helper = LayerHelper('reduce_all', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_all',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] or
len(dim) == len(input.shape) else False
})
return out
def reduce_any(input, dim=None, keep_dim=False, name=None):
"""
:alias_main: paddle.reduce_any
:alias: paddle.reduce_any,paddle.tensor.reduce_any,paddle.tensor.logic.reduce_any
:old_api: paddle.fluid.layers.reduce_any
This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
Returns:
Variable, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# x is a bool Tensor variable with following elements:
# [[True, False]
# [False, False]]
x = layers.assign(np.array([[1, 0], [0, 0]], dtype='int32'))
x = layers.cast(x, 'bool')
out = layers.reduce_any(x) # True
out = layers.reduce_any(x, dim=0) # [True, False]
out = layers.reduce_any(x, dim=-1) # [True, False]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = layers.reduce_any(x, dim=1,
keep_dim=True) # [[True], [False]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
check_variable_and_dtype(input, 'input', ('bool'), 'reduce_any')
helper = LayerHelper('reduce_any', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_any',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] or
len(dim) == len(input.shape) else False
})
return out
def split(input, num_or_sections, dim=-1, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
input (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
num_or_sections (int|list|tuple): If ``num_or_sections`` is int, then the ``num_or_sections``
indicates the number of equal sized sub-Tensors that the ``input``
will be divided into. If ``num_or_sections`` is a list or tuple, the length of it
indicates the number of sub-Tensors and the elements in it indicate the sizes of sub-Tensors'
dimension orderly. The length of the list mustn't be larger than the ``input`` 's size of specified dim.
dim (int|Tensor, optional): The dimension along which to split, it can be a scalar with type ``int`` or
a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``. If :math:`dim < 0`,
the dimension to split along is :math:`rank(input) + dim`. Default is -1.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor): The list of segmented Tensors.
Example:
.. code-block:: python
import paddle.fluid as fluid
# input is a Tensor which shape is [3, 9, 5]
input = fluid.data(
name="input", shape=[3, 9, 5], dtype="float32")
out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=1)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1)
# out0.shape [3, 2, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 4, 5]
out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1)
# out0.shape [3, 2, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 4, 5]
# dim is negative, the real dim is (rank(input) + axis) which real
# value is 1.
out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=-2)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
"""
if in_dygraph_mode():
num = None
attrs = ()
if isinstance(dim, Variable):
dim = dim.numpy()
dim = dim.item(0)
dim = (len(input.shape) + dim) if dim < 0 else dim
attrs += ('axis', dim)
if isinstance(num_or_sections, int):
num = num_or_sections
attrs += ('num', num_or_sections)
elif isinstance(num_or_sections, (list, tuple)):
num = len(num_or_sections)
if utils._contain_var(num_or_sections):
for index, item in enumerate(num_or_sections):
if isinstance(item, Variable):
num_or_sections[index] = num_or_sections[index].numpy()[
0]
attrs += ('sections', list(num_or_sections))
else:
attrs += ('sections', list(num_or_sections))
else:
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"received %s." % (type(num_or_sections)))
return core.ops.split(input, num, *attrs)
check_variable_and_dtype(
input, 'input',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'split')
check_type(num_or_sections, 'num_or_sections', (list, int, tuple), 'split')
check_type(dim, 'dim', (int, Variable), 'split')
if isinstance(dim, Variable):
check_dtype(dim.dtype, 'dim', ['int32', 'int64'], 'split')
helper = LayerHelper('split', **locals())
input_shape = input.shape
inputs = {'X': input}
attrs = {'num': num_or_sections if isinstance(num_or_sections, int) else 0}
def _get_SectionsTensorList(one_list):
tensor_list = []
unk_dim_idx = -1
for idx, dim_size in enumerate(one_list):
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
tensor_list.append(dim_size)
else:
assert (isinstance(dim_size, int))
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one value of 'num_or_section' in split can "
"be -1. But received num_or_section[%d] is also -1." %
idx)
unk_dim_idx = idx
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out)
tensor_list.append(temp_out)
return tensor_list
if isinstance(dim, Variable):
dim.stop_gradient = True
inputs['AxisTensor'] = dim
else:
dim = (len(input_shape) + dim) if dim < 0 else dim
attrs['axis'] = dim
if isinstance(num_or_sections, int):
assert num_or_sections > 1, 'num_or_sections must be more than 1.'
if isinstance(dim, int) and input_shape[dim] > 0:
assert input_shape[dim] % num_or_sections ==0, \
"The input's size along the split dimension " \
"must be evenly divisible by Attr(num_or_sections). " \
"But %d is not evenly divisible by %d. " % (num_or_sections,input_shape[dim])
num = num_or_sections
else:
if isinstance(dim, int) and input_shape[dim] > 0:
assert len(num_or_sections) <= input_shape[
dim], 'len(num_or_sections) must not be more than input.shape[dim].'
num = len(num_or_sections)
attrs['sections'] = list(
map(lambda ele: -1 if isinstance(ele, Variable) else ele,
num_or_sections))
if utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections)
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs)
return outs
def l2_normalize(x, axis, epsilon=1e-12, name=None):
"""
:alias_main: paddle.nn.functional.l2_normalize
:alias: paddle.nn.functional.l2_normalize,paddle.nn.functional.norm.l2_normalize
:old_api: paddle.fluid.layers.l2_normalize
This op normalizes `x` along dimension `axis` using an L2
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
.. math::
y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }}
For `x` with more dimensions, this layer independently normalizes each 1-D
slice along dimension `axis`.
Args:
x(Variable|list): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
axis(int): The axis on which to apply normalization. If `axis < 0`, \
the dimension to normalization is rank(X) + axis. -1 is the
last dimension.
epsilon(float): The epsilon value is used to avoid division by zero, \
the default value is 1e-12.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output has the same shape and data type with `x`.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[2,3])
output = fluid.layers.l2_normalize(x=input,axis=0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3).astype("float32")
print(input_data)
# [[0.5171216 0.12704141 0.56018186]
# [0.93251234 0.5382788 0.81709313]]
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([[0.48496857, 0.22970329, 0.56545246],
# [0.8745316 , 0.9732607 , 0.82478094]], dtype=float32)]
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.l2_normalize(x=input, axis=-1)
print(output.numpy())
# [[0.66907585 0.16437206 0.7247892 ]
# [0.6899054 0.3982376 0.6045142 ]]
"""
if len(x.shape) == 1:
axis = 0
check_variable_and_dtype(x, "X", ("float32", "float64"), "norm")
helper = LayerHelper("l2_normalize", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
norm = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="norm",
inputs={"X": x},
outputs={"Out": out,
"Norm": norm},
attrs={
"axis": 1 if axis is None else axis,
"epsilon": epsilon,
})
return out
@deprecated(since="2.0.0", update_to="paddle.matmul")
def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"""
Applies matrix multiplication to two tensors.
Currently, the input tensors' rank can be any, but when the rank of any
inputs is bigger than 3, this two inputs' rank should be equal.
The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
- If a transpose flag is specified, the last two dimensions of the tensor
are transposed. If the tensor is rank-1 of shape :math:`[D]`, then for
:math:`x` it is treated as :math:`[1, D]` in nontransposed form and as
:math:`[D, 1]` in transposed form, whereas for :math:`y` it is the
opposite: It is treated as :math:`[D, 1]` in nontransposed form and as
:math:`[1, D]` in transposed form.
- After transpose, the two tensors are 2-D or n-D and matrix multiplication
performs in the following way.
- If both are 2-D, they are multiplied like conventional matrices.
- If either is n-D, it is treated as a stack of matrices residing in the
last two dimensions and a batched matrix multiply supporting broadcast
applies on the two tensors.
Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
y (Variable): The input variable which is a Tensor or LoDTensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
alpha (float): The scale of output. Default 1.0.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
.. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], y: [B, ..., K, N]
# fluid.layers.matmul(x, y) # out: [B, ..., M, N]
# x: [B, M, K], y: [B, K, N]
# fluid.layers.matmul(x, y) # out: [B, M, N]
# x: [B, M, K], y: [K, N]
# fluid.layers.matmul(x, y) # out: [B, M, N]
# x: [M, K], y: [K, N]
# fluid.layers.matmul(x, y) # out: [M, N]
# x: [B, M, K], y: [K]
# fluid.layers.matmul(x, y) # out: [B, M]
# x: [K], y: [K]
# fluid.layers.matmul(x, y) # out: [1]
# x: [M], y: [N]
# fluid.layers.matmul(x, y, True, True) # out: [M, N]
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2, 3], dtype='float32')
y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
out = fluid.layers.matmul(x, y, True, True)
"""
attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
if in_dygraph_mode():
out = _varbase_creator(dtype=x.dtype)
core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
transpose_y, 'alpha', float(alpha))
return out
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(
val, name, ['float16', 'float32', 'float64'], 'matmul')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if transpose_x:
x_shape[-2], x_shape[-1] = x_shape[-1], x_shape[-2]
if transpose_y:
y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2]
if x_shape[-1] != y_shape[-2]:
assert (x_shape[-1] == -1) or (y_shape[-2] == -1), \
"After performing an optional transpose, Input X's width should be " \
"equal to Y's width for multiplication " \
"prerequisites. But received X's shape: %s, Y's shape: %s\n" % \
(x_shape, y_shape)
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(x, y)
helper = LayerHelper('matmul', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def topk(input, k, name=None):
"""
:alias_main: paddle.topk
:alias: paddle.topk,paddle.tensor.topk,paddle.tensor.search.topk
:old_api: paddle.fluid.layers.topk
This OP is used to find values and indices of the k largest entries
for the last dimension.
If the input is a 1-D Tensor, finds the k largest entries and outputs
their values and indices.
If the input is a Tensor with higher rank, this operator computes the top k
entries along the last dimension.
.. code-block:: text
Case 1:
Input:
input.shape = [3, 4]
input.data = [[5, 4, 2, 3],
[9, 7, 10, 25],
[6, 2, 10, 1]]
k = 2
Output:
The first output:
values.shape = [3, 2]
values.data = [[5, 4],
[10, 25],
[6, 10]]
The second output:
indices.shape = [3, 2]
indices.data = [[0, 1],
[2, 3],
[0, 2]]
Args:
input(Variable): The input tensor. Support data types: float32, float64.
k(int | Variable): The number of top elements to look for along the last dimension
of input tensor.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Values (Variable): Input tensor's k largest elements along each last dimensional slice. The dimension is: :math:`input.shape[:-1]+[k]`.
Indices (Variable): Indices of k largest elements alone the last dimension of input. The dimension is same as values.
Raises:
ValueError: If :math:`k < 1` or :math:`k > last dimension of input`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
input = fluid.data(name="input", shape=[None, 13, 11], dtype='float32')
top5_values, top5_indices = layers.topk(input, k=5) # top5_values.shape[None, 13, 5], top5_indices.shape=[None, 13, 5]
# 1D Tensor
input1 = fluid.data(name="input1", shape=[None, 13], dtype='float32')
top5_values, top5_indices = layers.topk(input1, k=5) #top5_values.shape=[None, 5], top5_indices.shape=[None, 5]
# k=Variable
input2 = fluid.data(name="input2", shape=[None, 13, 11], dtype='float32')
vk = fluid.data(name="vk", shape=[None, 1], dtype='int32') # save k in vk.data[0]
vk_values, vk_indices = layers.topk(input2, k=vk) #vk_values.shape=[None, 13, k], vk_indices.shape=[None, 13, k]
"""
if in_dygraph_mode():
_k = k.numpy().item(0) if isinstance(k, Variable) else k
out, indices = core.ops.top_k(input, 'k', _k)
out.stop_gradient = True
indices.stop_gradient = True
return out, indices
inputs = {"X": [input]}
attrs = {}
if isinstance(k, Variable):
inputs['K'] = [k]
else:
attrs = {'k': k}
helper = LayerHelper("top_k", **locals())
values = helper.create_variable_for_type_inference(dtype=input.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="top_k",
inputs=inputs,
outputs={"Out": [values],
"Indices": [indices]},
attrs=attrs)
values.stop_gradient = True
indices.stop_gradient = True
return values, indices
def ctc_greedy_decoder(input,
blank,
input_length=None,
padding_value=0,
name=None):
"""
This op is used to decode sequences by greedy policy by the following steps:
1. Get the indexes of maximum value for each row in input. a.k.a.
numpy.argmax(input, axis=0).
2. For each sequence in result of step1, merge repeated tokens between two
blanks and delete all blanks.
This op is implemented in two modes: lod and padding, either of them can be used.
The input can be either LoDTensor or Tensor, corresponding to lod and padding
mode respectively.
A simple example as below:
.. code-block:: text
Given:
(1) for lod mode:
input.data = [[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
[0.1, 0.5, 0.1, 0.3],
[0.5, 0.1, 0.3, 0.1],
[0.5, 0.1, 0.3, 0.1],
[0.2, 0.2, 0.2, 0.4],
[0.2, 0.2, 0.1, 0.5],
[0.5, 0.1, 0.3, 0.1]]
input.lod = [[4, 4]]
Computation:
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
[[0], [2], [1], [0]]
step2: merge repeated tokens and remove blank which is 0. Then we get first output sequence:
[[2], [1]]
Finally:
output.data = [[2],
[1],
[3]]
output.lod = [[2, 1]]
(2) for padding mode:
input.data = [[[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
[0.1, 0.5, 0.1, 0.3],
[0.5, 0.1, 0.3, 0.1]],
[[0.5, 0.1, 0.3, 0.1],
[0.2, 0.2, 0.2, 0.4],
[0.2, 0.2, 0.1, 0.5],
[0.5, 0.1, 0.3, 0.1]]]
input_length.data = [[4], [4]]
input.shape = [2, 4, 4]
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
[[0], [2], [1], [0]], for input.data[4:8] is [[0], [3], [3], [0]], shape is [2,4,1]
step2: Change the argmax result to use padding mode, then argmax result is
[[0, 2, 1, 0], [0, 3, 3, 0]], shape is [2, 4], lod is [], input_length is [[4], [4]]
step3: Apply ctc_align to padding argmax result, padding_value is 0
Finally:
output.data = [[2, 1, 0, 0],
[3, 0, 0, 0]]
output_length.data = [[2], [1]]
Parameters:
input(Variable): the probabilities of variable-length sequences. When in lod mode,
it is a 2-D LoDTensor with LoD information. It's shape is [Lp, num_classes + 1]
where Lp is the sum of all input sequences' length and
num_classes is the true number of classes. When in padding mode,
it is a 3-D Tensor with padding, It's shape is [batch_size, N, num_classes + 1].
(not including the blank label). The data type can be float32 or float64.
blank(int): the blank label index of Connectionist Temporal
Classification (CTC) loss, which is in the half-opened
interval [0, num_classes + 1).
input_length(Variable, optional): 2-D LoDTensor, shape is [batch_size, 1], data type is int64.
It is used for padding mode. In lod mode, input_length is None.
padding_value(int): padding value.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
For lod mode, returns the result of CTC greedy decoder, 2-D LoDTensor, shape is [Lp, 1], \
data type is int64. 'Lp' is the sum of all output sequences' length. If all the sequences \
in result were empty, the result LoDTensor will be [-1] with empty \
LoD [[]].
For padding mode, returns a tuple of (output, output_length), which was described as below:
output, 2-D Tensor, shape is [batch_size, N], data type is int64.
output_length, 2-D Tensor, shape is [batch_size, 1], data type is int64. It is the length of \
each sequence of output for padding mode.
Return type:
For lod mode: Variable
For padding mode: tuple of two Variables (output, output_length).
Examples:
.. code-block:: python
# for lod mode
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1)
cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0)
# for padding mode
x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32')
x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64')
out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0,
input_length=x_pad_len)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'ctc_greedy_decoder')
helper = LayerHelper("ctc_greedy_decoder", **locals())
_, topk_indices = topk(input, k=1)
# ctc align op
ctc_out = helper.create_variable_for_type_inference(dtype="int64")
if input_length is None:
helper.append_op(
type="ctc_align",
inputs={"Input": [topk_indices]},
outputs={"Output": [ctc_out]},
attrs={"merge_repeated": True,
"blank": blank})
return ctc_out
else:
ctc_out_len = helper.create_variable_for_type_inference(dtype="int64")
ctc_input = squeeze(topk_indices, [2])
helper.append_op(
type="ctc_align",
inputs={"Input": [ctc_input],
"InputLength": [input_length]},
outputs={"Output": [ctc_out],
"OutputLength": [ctc_out_len]},
attrs={
"merge_repeated": True,
"blank": blank,
"padding_value": padding_value
})
return ctc_out, ctc_out_len
def transpose(x, perm, name=None):
"""
:alias_main: paddle.transpose
:alias: paddle.transpose,paddle.tensor.transpose,paddle.tensor.linalg.transpose,paddle.tensor.manipulation.transpose
:old_api: paddle.fluid.layers.transpose
Permute the data dimensions of `input` according to `perm`.
The `i`-th dimension of the returned tensor will correspond to the
perm[i]-th dimension of `input`.
Args:
x (Variable): The input Tensor. It is a N-D Tensor of data types float32, float64, int32.
perm (list): Permute the input according to the data of perm.
name (str): The name of this layer. It is optional.
Returns:
Variable: A transposed n-D Tensor, with data type being float32, float64, int32, int64.
For Example:
.. code-block:: text
x = [[[ 1 2 3 4] [ 5 6 7 8] [ 9 10 11 12]]
[[13 14 15 16] [17 18 19 20] [21 22 23 24]]]
shape(x) = [2,3,4]
# Example 1
perm0 = [1,0,2]
y_perm0 = [[[ 1 2 3 4] [13 14 15 16]]
[[ 5 6 7 8] [17 18 19 20]]
[[ 9 10 11 12] [21 22 23 24]]]
shape(y_perm0) = [3,2,4]
# Example 2
perm1 = [2,1,0]
y_perm1 = [[[ 1 13] [ 5 17] [ 9 21]]
[[ 2 14] [ 6 18] [10 22]]
[[ 3 15] [ 7 19] [11 23]]
[[ 4 16] [ 8 20] [12 24]]]
shape(y_perm1) = [4,3,2]
Examples:
.. code-block:: python
# use append_batch_size=False to avoid prepending extra
# batch size in shape
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2, 3, 4],
dtype='float32', append_batch_size=False)
x_transposed = fluid.layers.transpose(x, perm=[1, 0, 2])
print x_transposed.shape
#(3L, 2L, 4L)
"""
if in_dygraph_mode():
out, _ = core.ops.transpose2(x, 'axis', perm)
return out
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
check_type(perm, 'perm', list, 'transpose')
if len(perm) != len(x.shape):
raise ValueError(
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s." % (len(x.shape), len(perm)))
for idx, dim in enumerate(perm):
if dim >= len(x.shape):
raise ValueError(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d." % (idx, perm[idx], len(x.shape)))
helper = LayerHelper('transpose', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='transpose2',
inputs={'X': [x]},
outputs={'Out': [out],
'XShape': [x_shape]},
attrs={'axis': perm})
return out
def im2sequence(input,
filter_size=1,
stride=1,
padding=0,
input_image_size=None,
out_stride=1,
name=None):
"""
:api_attr: Static Graph
Extracts image patches from the input tensor to form a tensor of shape
{input.batch_size * output_height * output_width, filter_size_height *
filter_size_width * input.channels}. This op use filter to scan images
and convert these images to sequences. After expanding, the number of time step are
output_height * output_width for an image, in which output_height and
output_width are calculated by below equation:
.. math::
output\_height = 1 + \
(padding\_up + padding\_down + input\_height - filter\_size\_height + stride\_height - 1) / stride\_height \\\\
output\_width = 1 + \
(padding\_left + padding\_right + input\_width - filter\_size\_width + stride\_width - 1) / stride\_width
And the dimension of each time step is filter_size_height * filter_size_width * input.channels.
Parameters:
input (Variable): The input should be a 4-D Tensor in :math:`NCHW` format. The data type is float32.
filter_size(int32 | List[int32]): The filter size. If filter_size is a List,
it must contain two integers, :math:`[filter\_size\_height, filter\_size\_width]` .
Otherwise, the filter size will be a square :math:`[filter\_size, filter\_size]` . Default is 1.
stride(int32 | List[int32]): The stride size. If stride is a List, it must
contain two integers, :math:`[stride\_height, stride\_width]` . Otherwise, the stride size will be a square :math:`[stride\_size, stride\_size]` . Default is 1.
padding(int32 | List[int32]): The padding size. If padding is a List, it can
contain four integers like :math:`[padding\_up, padding\_left, padding\_down, padding\_right]` to indicate
paddings of four direction. Or it can contain two integers :math:`[padding\_height, padding\_width]` which means
padding_up = padding_down = padding_height and
padding_left = padding_right = padding_width. Otherwise, a scalar padding means
padding_up = padding_down = padding_left = padding_right = padding.
Default is 0.
input_image_size(Variable, optional): the input contains image real size.It's dim
is :math:`[batchsize, 2]` . It is just for batch inference when not None. Default is None.
out_stride(int32 | List[int32]): The scaling of image through CNN. It is valid only when input_image_size is not None.
If out_stride is List, it must contain two integers,
:math:`[out\_stride\_height, out\_stride\_W]` . Otherwise,
the out_stride_height = out_stride_width = out_stride. Default is 1.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
The output is a 2-D LoDTensor with shape {input.batch\_size * output\_height * output\_width, \
filter\_size\_height * filter\_size\_width * input.channels}. The data type is float32.
Return Type: Variable
Examples:
.. code-block:: text
Given:
x = [[[[ 6. 2. 1.]
[ 8. 3. 5.]
[ 0. 2. 6.]]
[[ 2. 4. 4.]
[ 6. 3. 0.]
[ 6. 4. 7.]]]
[[[ 6. 7. 1.]
[ 5. 7. 9.]
[ 2. 4. 8.]]
[[ 1. 2. 1.]
[ 1. 3. 5.]
[ 9. 0. 8.]]]]
x.dims = {2, 2, 3, 3}
And:
filter = [2, 2]
stride = [1, 1]
padding = [0, 0]
Then:
output.data = [[ 6. 2. 8. 3. 2. 4. 6. 3.]
[ 2. 1. 3. 5. 4. 4. 3. 0.]
[ 8. 3. 0. 2. 6. 3. 6. 4.]
[ 3. 5. 2. 6. 3. 0. 4. 7.]
[ 6. 7. 5. 7. 1. 2. 1. 3.]
[ 7. 1. 7. 9. 2. 1. 3. 5.]
[ 5. 7. 2. 4. 1. 3. 9. 0.]
[ 7. 9. 4. 8. 3. 5. 0. 8.]]
output.dims = {8, 8}
output.lod = [[4, 4]]
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32],
dtype='float32')
output = fluid.layers.im2sequence(
input=data, stride=[1, 1], filter_size=[2, 2])
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['float32'], 'im2sequence')
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
if isinstance(stride, int):
stride = [stride, stride]
if isinstance(padding, int):
padding = [padding, padding]
if len(padding) == 2:
padding.append(padding[0])
padding.append(padding[1])
inputs = {"X": input}
attrs = {"kernels": filter_size, "strides": stride, "paddings": padding}
if input_image_size:
if isinstance(out_stride, int):
out_stride = [out_stride, out_stride]
inputs["Y"] = input_image_size
attrs["out_stride"] = out_stride
helper = LayerHelper('im2sequence', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
@templatedoc()
def row_conv(input, future_context_size, param_attr=None, act=None):
"""
:api_attr: Static Graph
${comment}
Args:
input (${x_type}): ${x_comment}.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
name, initializer etc.
act (str): Non-linear activation to be applied to output variable.
Returns:
${out_comment}.
Examples:
>>> # for LodTensor inputs
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[9, 16],
>>> dtype='float32', lod_level=1)
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
>>> # for Tensor inputs
>>> x = fluid.data(name='x', shape=[9, 4, 16], dtype='float32')
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
"""
helper = LayerHelper('row_conv', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'row_conv')
dtype = helper.input_dtype()
filter_shape = [future_context_size + 1, input.shape[-1]]
filter_param = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype)
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='row_conv',
inputs={'X': [input],
'Filter': [filter_param]},
outputs={'Out': [out]})
return helper.append_activation(out)
@templatedoc()
def multiplex(inputs, index):
"""
Based on the given index parameter, the OP selects a specific row from each input Tensor to construct the output Tensor.
If the input of this OP contains :math:`m` Tensors, where :math:`I_{i}` means the i-th input Tensor, :math:`i` between :math:`[0,m)` .
And :math:`O` means the output, where :math:`O[i]` means the i-th row of the output, then the output satisfies that :math:`O[i] = I_{index[i]}[i]` .
For Example:
.. code-block:: text
Given:
inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]],
[[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]],
[[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]],
[[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]]
index = [[3],[0],[1],[2]]
out = [[3,0,3,4], # out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4]
[0,1,3,4], # out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4]
[1,2,4,2], # out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2]
[2,3,3,4]] # out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4]
Args:
inputs (list): The input Tensor list. The list elements are N-D Tensors of data types float32, float64, int32, int64. All input Tensor shapes should be the same and rank must be at least 2.
index (Variable): Used to select some rows in the input Tensor to construct an index of the output Tensor. It is a 2-D Tensor with data type int32 or int64 and shape [M, 1], where M is the number of input Tensors.
Returns:
Variable(Tensor): Output of multiplex OP, with data type being float32, float64, int32, int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x1 = fluid.data(name='x1', shape=[None, 2], dtype='float32')
x2 = fluid.data(name='x2', shape=[None, 2], dtype='float32')
index = fluid.data(name='index', shape=[None, 1], dtype='int32')
out = fluid.layers.multiplex(inputs=[x1, x2], index=index)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img1 = np.array([[1, 2], [3, 4]]).astype(np.float32)
img2 = np.array([[5, 6], [7, 8]]).astype(np.float32)
index = np.array([[1], [0]]).astype(np.int32)
res = exe.run(fluid.default_main_program(), feed={'x1':img1, 'x2':img2, 'index':index}, fetch_list=[out])
print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)]
"""
helper = LayerHelper('multiplex', **locals())
check_type(inputs, 'inputs', (list), 'multiplex')
if len(inputs) < 2:
raise ValueError(
"inputs should be a list object with at least 2 elements.")
for id, x in enumerate(inputs):
check_variable_and_dtype(x, 'input[' + str(id) + ']',
['float32', 'float64', 'int32', 'int64'],
'multiplex')
check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex')
out = helper.create_variable_for_type_inference(inputs[0].dtype)
helper.append_op(
type='multiplex',
inputs={'X': inputs,
'Ids': index},
outputs={'Out': [out]})
return out
def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
"""
:alias_main: paddle.nn.functional.smooth_l1
:alias: paddle.nn.functional.smooth_l1,paddle.nn.functional.loss.smooth_l1
:old_api: paddle.fluid.layers.smooth_l1
This layer computes the smooth L1 loss for Variable :attr:`x` and :attr:`y`.
It takes the first dimension of :attr:`x` and :attr:`y` as batch size.
For each instance, it computes the smooth L1 loss element by element first
and then sums all the losses. So the shape of output Variable is
[batch_size, 1].
Args:
x (Variable): A tensor with rank at least 2. The input value of smooth
L1 loss op with shape [batch_size, dim1, ..., dimN].
A LoDTensor or Tensor with type float32.
y (Variable): A tensor with rank at least 2. The target value of smooth
L1 loss op with same shape as :attr:`x`.
A LoDTensor or Tensor with type float32.
inside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If
provided, the result of (:attr:`x` - :attr:`y`) will be multiplied
by this tensor element by element.
A Tensor with type float32.
outside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If
provided, the out smooth L1 loss will be multiplied by this tensor
element by element.
A Tensor with type float32.
sigma (float|None): Hyper parameter of smooth L1 loss layer. A float
scalar with default value 1.0.
Returns:
Variable: The output smooth L1 loss with shape [batch_size, 1]. A Tensor with type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name="x", shape=[-1, 3], dtype="float32")
label = fluid.data(name="y", shape=[-1, 3], dtype="float32")
result = fluid.layers.smooth_l1(data,label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,3).astype("float32")
y = np.random.rand(3,3).astype("float32")
output= exe.run(feed={"x":x, "y":y},
fetch_list=[result])
print(output)
#[array([[0.08220536],
# [0.36652038],
# [0.20541131]], dtype=float32)]
"""
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'smooth_l1_loss')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'smooth_l1_loss')
helper = LayerHelper('smooth_l1_loss', **locals())
diff = helper.create_variable_for_type_inference(dtype=x.dtype)
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='smooth_l1_loss',
inputs={
'X': x,
'Y': y,
'InsideWeight': inside_weight,
'OutsideWeight': outside_weight
},
outputs={'Diff': diff,
'Out': loss},
attrs={'sigma': sigma if sigma is not None else 1.0})
return loss
@deprecated(since='2.0.0', update_to='paddle.nn.functional.one_hot')
def one_hot(input, depth, allow_out_of_range=False):
"""
**WARING:** This OP requires the last dimension of Tensor shape must be equal to 1.
This OP will be deprecated in a future release. It is recommended to use fluid. :ref:`api_fluid_one_hot` .
The operator converts each id in the input to an one-hot vector with a
:attr:`depth` length. The value in the vector dimension corresponding to the id
is 1, and the value in the remaining dimension is 0.
The shape of output Tensor or LoDTensor is generated by adding :attr:`depth` dimension
behind the last dimension of the input shape.
.. code-block:: text
Example 1 (allow_out_of_range=False):
input:
X.shape = [4, 1]
X.data = [[1], [1], [3], [0]]
depth = 4
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.]]
Example 2 (allow_out_of_range=True):
input:
X.shape = [4, 1]
X.data = [[1], [1], [5], [0]]
depth = 4
allow_out_of_range = True
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.], # This id is 5, which goes beyond depth, so set it all-zeros data.
[1., 0., 0., 0.]]
Example 3 (allow_out_of_range=False):
input:
X.shape = [4, 1]
X.data = [[1], [1], [5], [0]]
depth = 4
allow_out_of_range = False
output: Throw an exception for Illegal value
The second dimension in X is 5, which is greater than depth.
Allow_out_of_range =False means that does not allow the word id to exceed depth,
so it throws an exception.
Args:
input(Variable): Tensor or LoDTensor with shape :math:`[N_1, N_2, ..., N_k, 1]` ,
which contains at least one dimension and the last dimension must be 1.
The data type is int32 or int64.
depth(scalar): An integer defining the :attr:`depth` of the one hot dimension. If input
is word id, depth is generally the dictionary size.
allow_out_of_range(bool): A bool value indicating whether the input
indices could be out of range :math:`[0, depth)` . When input indices are
out of range, exceptions :code:`Illegal value` is raised if :attr:`allow_out_of_range`
is False, or zero-filling representations is created if it is set True.
Default: False.
Returns:
Variable: The one-hot representations of input. A Tensor or LoDTensor with type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# Correspond to the first example above, where label.shape is [4, 1] and one_hot_label.shape is [4, 4].
label = fluid.data(name="label", shape=[4, 1], dtype="int64")
one_hot_label = fluid.layers.one_hot(input=label, depth=4)
"""
if in_dygraph_mode():
if isinstance(depth, Variable):
depth = depth.numpy()
assert depth.shape == (
1, ), "depth of type Variable should have shape [1]"
depth = depth.item(0)
out = core.ops.one_hot(input, 'depth', depth, 'allow_out_of_range',
allow_out_of_range)
out.stop_gradient = True
return out
helper = LayerHelper("one_hot", **locals())
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot')
check_type(depth, 'depth', (six.integer_types, Variable), 'one_hot')
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(depth, Variable):
# user attribute
inputs = {'X': input}
attrs = {'depth': depth, 'allow_out_of_range': allow_out_of_range}
else:
depth.stop_gradient = True
inputs = {'X': input, 'depth_tensor': depth}
attrs = {'allow_out_of_range': allow_out_of_range}
helper.append_op(
type="one_hot",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out})
one_hot_out.stop_gradient = True
return one_hot_out
def autoincreased_step_counter(counter_name=None, begin=1, step=1):
"""
:api_attr: Static Graph
Create an auto-increase variable. which will be automatically increased
by 1 in every iteration. By default, the first return of this counter is 1,
and the step size is 1.
Args:
counter_name(str, optional): The counter name. Default '@STEP_COUNTER@'.
begin(int, optional): The first return value of this counter. Default 1.
step(int, optional): The step size. Default 1.
Returns:
Variable: The auto-increased Variable with data type int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
global_step = fluid.layers.autoincreased_step_counter(
counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
"""
helper = LayerHelper('global_step_counter')
if counter_name is None:
counter_name = '@STEP_COUNTER@'
counter, is_new_var = helper.create_or_get_global_variable(
name=counter_name,
dtype='int64',
shape=[1],
persistable=True,
belong_to_optimizer=True)
if is_new_var:
helper.set_variable_initializer(
counter, initializer=Constant(
value=begin - 1, force_cpu=True))
helper.main_program.global_block()._prepend_op(
type='increment',
inputs={'X': [counter]},
outputs={'Out': [counter]},
attrs={'step': float(step)})
counter.stop_gradient = True
return counter
def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
"""
:alias_main: paddle.reshape
:alias: paddle.reshape,paddle.tensor.reshape,paddle.tensor.manipulation.reshape
This operator changes the shape of ``x`` without changing its data.
The target shape can be given by ``shape`` or ``actual_shape``.
When ``shape`` and ``actual_shape`` are set at the same time,
``actual_shape`` has a higher priority than ``shape``
but at this time ``shape`` can only be an integer list or tuple, and ``shape`` still should be set correctly to
guarantee shape inference in compile-time.
Some tricks exist when specifying the target shape.
1. -1 means the value of this dimension is inferred from the total element
number of x and remaining dimensions. Thus one and only one dimension can
be set -1.
2. 0 means the actual dimension value is going to be copied from the
corresponding dimension of x. The index of 0s in shape can not exceed
the dimension of x.
Here are some examples to explain it.
1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [6, 8], the reshape operator will transform x into a 2-D tensor with
shape [6, 8] and leaving x's data unchanged.
2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
specified is [2, 3, -1, 2], the reshape operator will transform x into a
4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this
case, one dimension of the target shape is set to -1, the value of this
dimension is inferred from the total element number of x and remaining
dimensions.
3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor
with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,
besides -1, 0 means the actual dimension value is going to be copied from
the corresponding dimension of x.
**Note**:
The parameter ``actual_shape`` will be deprecated in the future and only use ``shape`` instead to represent the target shape.
Args:
x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
shape(list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1.
The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Tensor, it should be an 1-D Tensor .
actual_shape(variable, optional): An 1-D ``Tensor`` or ``LoDTensor`` . The data type is ``int32`` . If provided, reshape
according to this given shape rather than ``shape`` specifying shape.
That is to say ``actual_shape`` has a higher priority
than ``shape(list|tuple)`` but not ``shape(Tensor)``. \
This argument ``actual_shape`` will be removed in a future version. \
Instructions for updating: ``actual_shape`` will be removed in future versions and replaced by ``shape``.
act (str, optional): The non-linear activation to be applied to the reshaped input. Default None.
inplace(bool, optional): If ``inplace`` is True, the input and output of ``layers.reshape``
are the same variable. Otherwise, the input and output of
``layers.reshape`` are different variable. Default False. Note that if ``x``
is more than one OPs' input, ``inplace`` must be False.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A reshaped Tensor with the same data type as ``x``. It is a new tensor variable if ``inplace`` is ``False``, otherwise it is ``x``. If ``act`` is None, return the reshaped tensor variable, otherwise return the activated tensor variable.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
# example 1:
# attr shape is a list which doesn't contain Tensors.
data_1 = fluid.data(
name='data_1', shape=[2, 4, 6], dtype='float32')
reshaped_1 = fluid.layers.reshape(
x=data_1, shape=[-1, 0, 3, 2])
# the shape of reshaped_1 is [2,4,3,2].
# example 2:
# attr shape is a list which contains Tensors.
data_2 = fluid.layers.fill_constant([2,25], "int32", 3)
dim = fluid.layers.fill_constant([1], "int32", 5)
reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10])
# the shape of reshaped_2 is [5,10].
# example 3:
data_3 = fluid.data(
name="data_3", shape=[2,4,6], dtype='float32')
reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8])
# the shape of reshaped_3 is [6,8].
"""
if in_dygraph_mode():
#TODO(zhiqiu): enable inplace in dygraph mode.
if inplace:
warnings.warn(
"Inplace on reshape is not allowed and will be discarded in dygraph mode currently."
)
if isinstance(shape, (list, tuple)):
shape = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in shape
]
out, _ = core.ops.reshape2(x, 'shape', shape)
return dygraph_utils._append_activation_in_dygraph(out, act)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'reshape')
check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')
helper = LayerHelper("reshape2", **locals())
def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one dimension value of 'shape' in reshape can "
"be -1. But received shape[%d] is also -1." % dim_idx)
unk_dim_idx = dim_idx
elif dim_size == 0:
assert dim_idx < len(x.shape), (
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape[%d] = 0, X's dimensions = %d." %
(dim_idx, len(x.shape)))
else:
assert dim_size > 0, (
"Each dimension value of 'shape' in reshape must not "
"be negative except one unknown dimension. "
"But received shape[%d] = %s." %
(dim_idx, str(dim_size)))
return attrs_shape
inputs = {"X": x}
attrs = {}
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["Shape"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, "
"but received %s." % len(shape))
attrs["shape"] = get_attr_shape(shape)
if utils._contain_var(shape):
inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape)
elif isinstance(actual_shape, Variable):
actual_shape.stop_gradient = True
inputs["Shape"] = actual_shape
out = x if inplace else helper.create_variable_for_type_inference(
dtype=x.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="reshape2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out,
"XShape": x_shape})
return helper.append_activation(out)
def squeeze(input, axes, name=None):
"""
This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will
remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal
to one will be deleted.
.. code-block:: text
Case1:
Input:
X.shape = (1, 3, 1, 5)
axes = [0]
Output:
Out.shape = (3, 1, 5)
Case2:
Input:
X.shape = (1, 3, 1, 5)
axes = []
Output:
Out.shape = (3, 5)
Case3:
Input:
X.shape = [1,3,1,5]
axes = [-2]
Output:
Out.shape = [1,3,5]
Args:
input (Variable): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64.
axes (list): One integer or List of integers, indicating the dimensions to be squeezed.
Axes range is :math:`[-rank(input), rank(input))`.
If axes is negative, :math:`axes=axes+rank(input)`.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Variable: Output squeezed Tensor. Data type is same as input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
x = fluid.data(name='x', shape=[None, 5, 1, 10])
y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10]
"""
if in_dygraph_mode():
out, _ = core.ops.squeeze2(input, 'axes', axes)
return out
helper = LayerHelper("squeeze", **locals())
check_variable_and_dtype(
input, 'input',
['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'],
'squeeze')
check_type(axes, 'axis/axes', (list, tuple), 'squeeze')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="squeeze2",
inputs={"X": input},
attrs={"axes": axes},
outputs={"Out": out,
"XShape": x_shape})
return out
def unsqueeze(input, axes, name=None):
"""
Insert single-dimensional entries to the shape of a Tensor. Takes one
required argument axes, a list of dimensions that will be inserted.
Dimension indices in axes are as seen in the output tensor.
For example:
.. code-block:: text
Given a tensor such that tensor with shape [3, 4, 5],
then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
Args:
input (Variable): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.
axes (int|list|tuple|Variable): Indicates the dimensions to be inserted. The data type is ``int32`` . If ``axes`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``axes`` is an Variable, it should be an 1-D Tensor .
name (str|None): Name for this layer.
Returns:
Variable: Unsqueezed Tensor, with the same data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[5, 10])
y = fluid.layers.unsqueeze(input=x, axes=[1])
"""
if in_dygraph_mode():
if isinstance(axes, int):
axes = [axes]
elif isinstance(axes, Variable):
axes = axes.numpy().tolist()
elif isinstance(axes, (list, tuple)):
axes = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in axes
]
out, _ = core.ops.unsqueeze2(input, 'axes', axes)
return out
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
check_variable_and_dtype(
input, 'input',
['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'],
'unsqueeze')
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
if isinstance(axes, int):
axes = [axes]
if isinstance(axes, Variable):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="unsqueeze2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out,
"XShape": x_shape})
return out
def lod_reset(x, y=None, target_lod=None):
"""
Set LoD of :attr:`x` to a new one specified by :attr:`y` or
:attr:`target_lod`. When :attr:`y` provided, :attr:`y.lod` would be
considered as target LoD first, otherwise :attr:`y.data` would be
considered as target LoD. If :attr:`y` is not provided, target LoD should
be specified by :attr:`target_lod`. If target LoD is specified by
:attr:`y.data` or :attr:`target_lod`, only one level LoD is supported.
.. code-block:: text
* Example 1:
Given a 1-level LoDTensor x:
x.lod = [[ 2, 3, 1 ]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
target_lod: [4, 2]
then we get a 1-level LoDTensor:
out.lod = [[4, 2]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
* Example 2:
Given a 1-level LoDTensor x:
x.lod = [[2, 3, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
y is a Tensor:
y.data = [[2, 4]]
y.dims = [1, 3]
then we get a 1-level LoDTensor:
out.lod = [[2, 4]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
* Example 3:
Given a 1-level LoDTensor x:
x.lod = [[2, 3, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
y is a 2-level LoDTensor:
y.lod = [[2, 2], [2, 2, 1, 1]]
y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]]
y.dims = [6, 1]
then we get a 2-level LoDTensor:
out.lod = [[2, 2], [2, 2, 1, 1]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
Args:
x (Variable): Input variable which could be a Tensor or LoDTensor.
The data type should be int32, int64, float32 or float64.
y (Variable, optional): If provided, output's LoD would be derived from :attr:`y`.
If y's lod level>0, the data type can be any type.
If y's lod level=0, the data type should be int32.
target_lod (list|tuple, optional): One level LoD which should be considered
as target LoD when :attr:`y` not provided.
Returns:
Variable: Output variable with LoD specified by this layer.
Raises:
ValueError: If :attr:`y` and :attr:`target_lod` are both None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10])
y = fluid.layers.data(name='y', shape=[10, 20], lod_level=2)
out = fluid.layers.lod_reset(x=x, y=y)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'lod_reset')
helper = LayerHelper("lod_reset", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if y is not None:
check_type(y, 'y', (Variable), 'lod_reset')
#TODO: check y.lod_level = 0 dtype
helper.append_op(
type="lod_reset", inputs={'X': x,
'Y': y}, outputs={'Out': out})
elif target_lod is not None:
helper.append_op(
type="lod_reset",
inputs={'X': x},
attrs={'target_lod': target_lod},
outputs={'Out': out})
else:
raise ValueError("y and target_lod should not be both none.")
return out
def lod_append(x, level):
"""
Append level to LoD of :attr:`x`.
.. code-block:: text
* Example 1:
given a 1-level LoDTensor x:
x.lod = [[ 2, 3, 1 ]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
level: [1, 1, 1, 1, 1, 1, 1]
then we get a 2-level LoDTensor:
x.lod = [[ 2, 3, 1 ], [1, 1, 1, 1, 1, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
Args:
x (Variable): Input variable which could be a tensor or LoDTensor.
The data type should be int32, int64, float32 or float64.
level (list|tuple|Variable, optional): The LoD level to be appended into LoD of x.
If level is variable and its lod level>0, the data type can be any type.
If level is variable and its lod level=0, the data type should be int32.
Returns:
Variable: Output variable with new LoD level.
Raises:
ValueError: If :attr:`y` is None or and :attr:`level` is not Iterator.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[6, 10], lod_level=1)
out = fluid.layers.lod_append(x, [1,1,1,1,1,1])
"""
from collections import Iterable
if x is None:
raise ValueError("Input(x) can't be None.")
if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)):
raise ValueError("Input(level) must be list, tuple or Variable.")
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'lod_append')
helper = LayerHelper("lod_append", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
inputs = {'X': x}
attrs = {'append': True}
if isinstance(level, Variable):
inputs['Y'] = level
#TODO: check y.lod_level = 0 dtype
else:
attrs['target_lod'] = level
helper.append_op(
type="lod_reset", inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None,
data_format='NCHW'):
"""
:alias_main: paddle.nn.functional.lrn
:alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn
:old_api: paddle.fluid.layers.lrn
This operator implements the Local Response Normalization Layer.
This layer performs a type of "lateral inhibition" by normalizing over local input regions.
For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
The formula is as follows:
.. math::
Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta}
In the above equation:
- :math:`n` : The number of channels to sum over.
- :math:`k` : The offset (avoid being divided by 0).
- :math:`\\alpha` : The scaling parameter.
- :math:`\\beta` : The exponent parameter.
Args:
input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W] or [N, H, W, C],
where N is the batch size, C is the input channel, H is Height, W is weight. The data
type is float32. The rank of this tensor must be 4, otherwise it will raise ValueError.
n (int, optional): The number of channels to sum over. Default: 5
k (float, optional): An offset, positive. Default: 1.0
alpha (float, optional): The scaling parameter, positive. Default:1e-4
beta (float, optional): The exponent, positive. Default:0.75
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: A tensor variable storing the transformation result with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(
name="data", shape=[None, 3, 112, 112], dtype="float32")
lrn = fluid.layers.lrn(input=data)
print(lrn.shape) # [-1, 3, 112, 112]
print(lrn.dtype) # float32
"""
helper = LayerHelper('lrn', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'lrn')
dtype = helper.input_dtype()
input_shape = input.shape
dims = len(input_shape)
if dims != 4:
raise ValueError(
"Input's dimension size of Op(lrn) must be 4, but received %d." %
(dims))
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(lrn) got wrong value: received " +
data_format + " but only NCHW or NHWC supported.")
mid_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
lrn_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="lrn",
inputs={"X": input},
outputs={
"Out": lrn_out,
"MidOut": mid_out,
},
attrs={
"n": n,
"k": k,
"alpha": alpha,
"beta": beta,
"data_format": data_format
})
return lrn_out
def pad(x, paddings, pad_value=0., name=None):
"""
:alias_main: paddle.nn.functional.pad
:alias: paddle.nn.functional.pad,paddle.nn.functional.common.pad
:old_api: paddle.fluid.layers.pad
This op will pad a tensor with a constant value given by :attr:`pad_value`, and the
padded shape is specified by :attr:`paddings`.
Specifically, the number of values padded before the elements of :attr:`x`
in dimension :attr:`i` is indicated by :attr:`paddings[2*i]`, and the number
of values padded after the elements of :attr:`x` in dimension :attr:`i` is
indicated by :attr:`paddings[2*i+1]`.
See below for an example.
.. code-block:: text
Given:
x = [[1, 2], [3, 4]]
paddings = [0, 1, 1, 2]
pad_value = 0
Return:
out = [[0, 1, 2, 0, 0]
[0, 3, 4, 0, 0]
[0, 0, 0, 0, 0]]
Args:
x (Variable): Tensor, data type is float32.
paddings (list): A list of integers. Its elements specify the padded
width before and after each dimension in turn.
The length of :attr:`paddings` must be equal to
:math:`rank(x) \\times 2`.
pad_value (float): The constant value used to pad.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The padded tensor, with the same data type and rank as :attr:`x`
Return Type:
Variable
Examples:
.. code-block:: python
# x is a rank 2 tensor variable
import paddle.fluid as fluid
x = fluid.data(name='data', shape=[300, 300], dtype='float32')
out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.)
"""
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], "pad")
helper = LayerHelper('pad', input=x, **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad',
inputs={'X': x},
outputs={'Out': out},
attrs={'paddings': paddings,
'pad_value': float(pad_value)})
return out
def pad_constant_like(x, y, pad_value=0., name=None):
"""
Pad :attr:`y` with :attr:`pad_value`, the number of values padded to
the edges of each axis is specified by the difference of the shape
of :attr:`x` and :attr:`y` . ((0, shape_x_0 - shape_y_0), ... (0, shape_x_n - shape_y_n))
specify padding widths for each axis. The input should be a k-D tensor(k > 0 and k < 7).
See below for an example.
.. code-block:: text
Given:
X = [[[[ 0, 1, 2],
[ 3, 4, 5]],
[[ 6, 7, 8],
[ 9, 10, 11]],
[[12, 13, 14],
[15, 16, 17]]],
[[[18, 19, 20],
[21, 22, 23]],
[[24, 25, 26],
[27, 28, 29]],
[[30, 31, 32],
[33, 34, 35]]]]
X.shape = (2, 3, 2, 3)
Y = [[[[35, 36, 37]],
[[38, 39, 40]],
[[41, 42, 43]]]]
Y.shape = (1, 3, 1, 3)
And
pad_value = 0.
Return:
Out = [[[[35, 36, 37],
[ 0, 0, 0]],
[[38, 39, 40],
[ 0, 0, 0]],
[[41, 42, 43],
[ 0, 0, 0]]],
[[[ 0, 0, 0],
[ 0, 0, 0]],
[[ 0, 0, 0],
[ 0, 0, 0]],
[[ 0, 0, 0],
[ 0, 0, 0]]]]
Out.shape = [2, 3, 2, 3]
Args:
x (Variable): Tensor, its shape specifies the shape of output.
y (Variable): Tensor, its rank is the same with :attr:`x`, and for each dimension :math:`i` ,
:math:`y\_shape[i] <= x\_shape[i]` . The data type can be float32 or float64.
pad_value (float): The constant value used to pad.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The padded tensor, with the same shape as :attr:`x` and the same data type as :attr:`y`
Return Type:
Variable
Examples:
.. code-block:: python
# x is a rank 4 tensor variable, x.shape = (2, 3, 2, 3)
# y is a rank 4 tensor variable, y.shape = (1, 3, 1, 3)
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2,3,2,3], dtype='float32')
y = fluid.data(name='y', shape=[1,3,1,3], dtype='float32')
out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.)
# out is a rank 4 tensor variable, and out.shape = [2, 3 ,2 , 3]
"""
check_type(x, 'x', (Variable), 'pad_constant_like')
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
"pad_constant_like")
helper = LayerHelper('pad_constant_like', input=x, **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad_constant_like',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'pad_value': float(pad_value)})
return out
def label_smooth(label,
prior_dist=None,
epsilon=0.1,
dtype="float32",
name=None):
"""
:alias_main: paddle.nn.functional.label_smooth
:alias: paddle.nn.functional.label_smooth,paddle.nn.functional.common.label_smooth
:old_api: paddle.fluid.layers.label_smooth
Label smoothing is a mechanism to regularize the classifier layer and is called
label-smoothing regularization (LSR).
Label smoothing is proposed to encourage the model to be less confident,
since optimizing the log-likelihood of the correct label directly may
cause overfitting and reduce the ability of the model to adapt. Label
smoothing replaces the ground-truth label :math:`y` with the weighted sum
of itself and some fixed distribution :math:`\mu`. For class :math:`k`,
i.e.
.. math::
\\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k,
where :math:`1 - \epsilon` and :math:`\epsilon` are the weights
respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually
uniform distribution is used for :math:`\mu`.
See more details about label smoothing in https://arxiv.org/abs/1512.00567.
Parameters:
label(Variable): The input variable containing the label data. The
label data should use one-hot representation. It's
a multidimensional tensor with a shape of
:math:`[N_1, ..., Depth]`, where Depth is class number. The dtype can be "float32" and "float64".
prior_dist(Variable, optional): The prior distribution to be used to smooth
labels. If not provided, an uniform distribution
is used. It's a multidimensional tensor with a shape of
:math:`[1, class\_num]` . The default value is None.
epsilon(float, optional): The weight used to mix up the original ground-truth
distribution and the fixed distribution. The default value is
0.1.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type can be set
as 'float32', 'float64'. The default value is 'float32'.
name(str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to
:ref:`api_guide_Name`.
Returns:
Variable: The tensor variable containing the smoothed labels.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
label = layers.data(name="label", shape=[1], dtype="int32")
one_hot_label = layers.one_hot(input=label, depth=10)
smooth_label = layers.label_smooth(
label=one_hot_label, epsilon=0.1, dtype="float32")
"""
if epsilon > 1. or epsilon < 0.:
raise ValueError("The value of epsilon must be between 0 and 1.")
if in_dygraph_mode():
return core.ops.label_smooth(label, prior_dist, 'epsilon',
float(epsilon))
check_variable_and_dtype(label, 'label', ['float32', 'float64'],
'label_smooth')
helper = LayerHelper("label_smooth", **locals())
label.stop_gradient = True
smooth_label = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="label_smooth",
inputs={"X": label,
"PriorDist": prior_dist} if prior_dist else {"X": label},
outputs={"Out": smooth_label},
attrs={"epsilon": float(epsilon)})
return smooth_label
@templatedoc()
def roi_pool(input,
rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
rois_num=None,
name=None):
"""
:alias_main: paddle.nn.functional.roi_pool
:alias: paddle.nn.functional.roi_pool,paddle.nn.functional.vision.roi_pool
:old_api: paddle.fluid.layers.roi_pool
This operator implements the roi_pooling layer.
Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7).
The operator has three steps:
1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height;
2. Finding the largest value in each section;
3. Copying these max values to the output buffer.
For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn
Args:
input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W], where N is the batch size, C is the input channel, H is Height, W is weight. The data type is float32 or float64.
rois (Variable): ROIs (Regions of Interest) to pool over. 2D-LoDTensor with the shape of [num_rois,4], the lod level is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates.
pooled_height (int, optional): The pooled output height, data type is int32. Default: 1
pooled_width (int, optional): The pooled output height, data type is int32. Default: 1
spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0
rois_num (Tensor): The number of RoIs in each image. Default: None
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: The pooled feature, 4D-Tensor with the shape of [num_rois, C, pooled_height, pooled_width].
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
DATATYPE='float32'
place = fluid.CPUPlace()
#place = fluid.CUDAPlace(0)
input_data = np.array([i for i in range(1,17)]).reshape(1,1,4,4).astype(DATATYPE)
roi_data =fluid.create_lod_tensor(np.array([[1., 1., 2., 2.], [1.5, 1.5, 3., 3.]]).astype(DATATYPE),[[2]], place)
rois_num_data = np.array([2]).astype('int32')
x = fluid.data(name='input', shape=[None,1,4,4], dtype=DATATYPE)
rois = fluid.data(name='roi', shape=[None,4], dtype=DATATYPE)
rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32')
pool_out = fluid.layers.roi_pool(
input=x,
rois=rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
rois_num=rois_num)
exe = fluid.Executor(place)
out, = exe.run(feed={'input':input_data ,'roi':roi_data, 'rois_num': rois_num_data}, fetch_list=[pool_out.name])
print(out) #array([[[[11.]]], [[[16.]]]], dtype=float32)
print(np.array(out).shape) # (2, 1, 1, 1)
"""
if in_dygraph_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
pool_out, argmaxes = core.ops.roi_pool(
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale)
return pool_out, argmaxes
check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool')
check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool')
helper = LayerHelper('roi_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
argmaxes = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": input,
"ROIs": rois,
}
if rois_num is not None:
inputs['RoisNum'] = rois_num
helper.append_op(
type="roi_pool",
inputs=inputs,
outputs={"Out": pool_out,
"Argmax": argmaxes},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale
})
return pool_out
@templatedoc()
def roi_align(input,
rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
sampling_ratio=-1,
rois_num=None,
name=None):
"""
:alias_main: paddle.nn.functional.roi_align
:alias: paddle.nn.functional.roi_align,paddle.nn.functional.vision.roi_align
:old_api: paddle.fluid.layers.roi_align
${comment}
Args:
input (Variable): ${x_comment}
rois (Variable): ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor of shape (num_rois, 4), the lod level is 1. The
data type is float32 or float64. Given as [[x1, y1, x2, y2], ...],
(x1, y1) is the top left coordinates, and (x2, y2) is the bottom
right coordinates.
pooled_height (int32, optional): ${pooled_height_comment} Default: 1
pooled_width (int32, optional): ${pooled_width_comment} Default: 1
spatial_scale (float32, optional): ${spatial_scale_comment} Default: 1.0
sampling_ratio(int32, optional): ${sampling_ratio_comment} Default: -1
rois_num (Tensor): The number of RoIs in each image. Default: None
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
Output: ${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='data', shape=[None, 256, 32, 32], dtype='float32')
rois = fluid.data(
name='rois', shape=[None, 4], dtype='float32')
rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32')
align_out = fluid.layers.roi_align(input=x,
rois=rois,
pooled_height=7,
pooled_width=7,
spatial_scale=0.5,
sampling_ratio=-1,
rois_num=rois_num)
"""
if in_dygraph_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
align_out = core.ops.roi_align(
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale,
"sampling_ratio", sampling_ratio)
return align_out
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'roi_align')
check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], 'roi_align')
helper = LayerHelper('roi_align', **locals())
dtype = helper.input_dtype()
align_out = helper.create_variable_for_type_inference(dtype)
inputs = {
"X": input,
"ROIs": rois,
}
if rois_num is not None:
inputs['RoisNum'] = rois_num
helper.append_op(
type="roi_align",
inputs=inputs,
outputs={"Out": align_out},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale,
"sampling_ratio": sampling_ratio
})
return align_out
def dice_loss(input, label, epsilon=0.00001, name=None):
"""
:alias_main: paddle.nn.functional.dice_loss
:alias: paddle.nn.functional.dice_loss,paddle.nn.functional.loss.dice_loss
:old_api: paddle.fluid.layers.dice_loss
Dice loss for comparing the similarity between the input predictions and the label.
This implementation is for binary classification, where the input is sigmoid
predictions of each pixel, usually used for segmentation task. The dice loss can
be defined as the following equation:
.. math::
dice\_loss &= 1 - \\frac{2 * intersection\_area}{total\_area} \\\\
&= \\frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\\\
&= \\frac{(union\_area - intersection\_area)}{total\_area}
Parameters:
input (Variable): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_D]`, where :math:`N_1` is
the batch_size, :math:`N_D` is 1. It is usually the output predictions of sigmoid activation.
The data type can be float32 or float64.
label (Variable): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_D]`.
where :math:`N_1` is the batch_size, :math:`N_D` is 1. The data type can be float32 or float64.
epsilon (float): The epsilon will be added to the numerator and denominator.
If both input and label are empty, it makes sure dice is 1.
Default: 0.00001
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The dice loss with shape [1], data type is the same as `input` .
Return Type:
Varaible
Example:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='data', shape = [3, 224, 224, 1], dtype='float32')
label = fluid.data(name='label', shape=[3, 224, 224, 1], dtype='float32')
predictions = fluid.layers.sigmoid(x)
loss = fluid.layers.dice_loss(input=predictions, label=label)
"""
label = one_hot(label, depth=input.shape[-1])
reduce_dim = list(range(1, len(input.shape)))
inse = reduce_sum(input * label, dim=reduce_dim)
dice_denominator = reduce_sum(
input, dim=reduce_dim) + reduce_sum(
label, dim=reduce_dim)
dice_score = 1 - inse * 2 / (dice_denominator + epsilon)
return reduce_mean(dice_score)
def image_resize(input,
out_shape=None,
scale=None,
name=None,
resample='BILINEAR',
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
:alias_main: paddle.nn.functional.image_resize
:alias: paddle.nn.functional.image_resize,paddle.nn.functional.vision.image_resize
:old_api: paddle.fluid.layers.image_resize
This op resizes a batch of images.
The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
or a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
and the resizing only applies on the three dimensions(depth, height and width).
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Supporting resample methods:
'LINEAR' : Linear interpolation
'BILINEAR' : Bilinear interpolation
'TRILINEAR' : Trilinear interpolation
'NEAREST' : Nearest neighbor interpolation
'BICUBIC' : Bicubic interpolation
Linear interpolation is the method of using a line connecting two known quantities
to determine the value of an unknown quantity between the two known quantities.
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
Bicubic interpolation is an extension of cubic interpolation for interpolating
data points on a two-dimensional regular grid. The interpolated surface is
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
linear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,W_in)
output: (N,C,H_out,W_out) where:
W_out = W_{in} * scale_{factor}
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
For details of linear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Linear_interpolation.
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
For details of bicubic interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bicubic_interpolation
Parameters:
input (Variable): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape (list|tuple|Variable|None): Output shape of image resize
layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w)
when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor.
Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
resample(str): The resample method. It supports 'LINEAR', 'BICUBIC', 'BILINEAR', 'TRILINEAR'
and 'NEAREST' currently. Default: 'BILINEAR'
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.
Default: True
align_mode(int) : An optional for linear/bilinear/trilinear interpolation. Refer to the fomula in the
the example code above, it can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 ,
can be \'1\' for src_idx = scale*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: out_shape should be a list or tuple or Variable.
TypeError: actual_shape should either be Variable or None.
ValueError: The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR',
'TRILINEAR', 'BICUBIC' or 'NEAREST' currently.
ValueError: 'LINEAR' only support 3-D tensor.
ValueError: 'BICUBIC', 'BILINEAR' and 'NEAREST' only support 4-D tensor.
ValueError: 'TRILINEAR' only support 5-D tensor.
ValueError: One of out_shape and scale must not be None.
ValueError: out_shape length should be 1 for input 3-D tensor.
ValueError: out_shape length should be 2 for input 4-D tensor.
ValueError: out_shape length should be 3 for input 5-D tensor.
ValueError: scale should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.image_resize(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.image_resize(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.image_resize(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.image_resize(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.image_resize(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
resample_methods = {
'LINEAR': 'linear',
'BILINEAR': 'bilinear',
'TRILINEAR': 'trilinear',
'NEAREST': 'nearest',
'LINEAR': 'linear',
}
resample = resample.upper()
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR', 'TRILINEAR' "
"or 'NEAREST' currently.")
resample_type = resample_methods[resample]
if resample == 'LINEAR' and len(input.shape) != 3:
raise ValueError("'LINER only support 3-D tensor.")
elif resample in ['BILINEAR', 'NEAREST'] and len(input.shape) != 4:
raise ValueError("'BILINEAR' and 'NEAREST' only support 4-D tensor.")
elif resample == 'TRILINEAR' and len(input.shape) != 5:
raise ValueError("'TRILINEAR'only support 5-D tensor.")
if not isinstance(align_corners, bool):
raise TypeError("Attr align_corners should be a bool value")
if align_mode != 0 and align_mode != 1:
raise ValueError("align_mode can only be 0 or 1")
if out_shape is None and scale is None:
raise ValueError("One of out_shape and scale must not be None.")
helper = LayerHelper('{}_interp'.format(resample_type), **locals())
dtype = helper.input_dtype()
if len(input.shape) == 3 and data_format not in ['NCW', 'NWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCW` or `NWC` supported for 3-D input.")
elif len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCHW` or `NHWC` supported for 4-D input.")
elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCDHW` or `NDHWC` supported for 5-D input.")
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
data_layout = 'NCHW'
if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC':
data_layout = 'NHWC'
inputs = {"X": input}
attrs = {
"out_d": -1,
"out_h": -1,
"out_w": -1,
"interp_method": resample_type,
"align_corners": align_corners,
"align_mode": align_mode,
"data_layout": data_layout
}
if out_shape is not None:
if isinstance(out_shape, Variable):
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if not (_is_list_or_turple_(out_shape)):
raise TypeError(
"out_shape should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
if isinstance(dim_size, Variable):
contain_var = True
continue
assert dim_size > 0, (
"Each dimension size given in out_shape must be greater than 0."
)
if contain_var:
new_size_tensor = []
size_list = []
for dim in out_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_size_tensor.append(dim)
size_list.append(-1)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference(
'int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_size_tensor.append(temp_out)
size_list.append(dim)
inputs['SizeTensor'] = new_size_tensor
if len(input.shape) == 3:
if len(out_shape) != 1:
raise ValueError("out_shape length should be 1 for "
"input 3-D tensor.")
if contain_var:
attrs['out_w'] = size_list[0]
else:
out_shape = list(map(int, out_shape))
attrs['out_w'] = out_shape[0]
elif len(input.shape) == 4:
if len(out_shape) != 2:
raise ValueError("out_shape length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
attrs['out_w'] = size_list[1]
else:
out_shape = list(map(int, out_shape))
attrs['out_h'] = out_shape[0]
attrs['out_w'] = out_shape[1]
if len(input.shape) == 5:
if len(out_shape) != 3:
raise ValueError("out_shape length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
attrs['out_h'] = size_list[1]
attrs['out_w'] = size_list[2]
else:
out_shape = list(map(int, out_shape))
attrs['out_d'] = out_shape[0]
attrs['out_h'] = out_shape[1]
attrs['out_w'] = out_shape[2]
else:
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
attrs['scale'] = float(scale)
else:
raise TypeError(
"Attr(scale)'s type should be float, int or Variable.")
if isinstance(actual_shape, Variable):
warnings.warn(
"actual_shape will be deprecated, it is recommended to use "
"out_shape instead of actual_shape to specify output shape dynamically."
)
actual_shape.stop_gradient = True
inputs["OutSize"] = actual_shape
elif actual_shape is not None:
raise TypeError("actual_shape should either be Variable or None.")
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='{}_interp'.format(resample_type),
inputs=inputs,
outputs={"Out": out},
attrs=attrs)
return out
@templatedoc(op_type="linear_interp")
def resize_linear(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCW'):
"""
This op resizes the input by performing linear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in
the future and only use :attr:`out_shape` instead.
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Linear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = W_{in} * scale_{factor}
Parameters:
input(Variable): 3-D Tensor(NCW), its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of resize linear
layer, the shape is (out_w,). Default: None. If a list, each
element can be an integer or a Tensor Variable with shape: [1]. If a
Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCW"`, `"NWC"`.
The default is `"NCW"`. When it is `"NCW"`, the data is stored in the order of:
`[batch_size, input_channels, input_width]`.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: 3-D tensor(NCW or NWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,100])
output = fluid.layers.resize_linear(input=input,out_shape=[50,])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(1,3,100).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
# (1, 3, 50)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_linear(input=input, out_shape=[50,])
print(output.shape)
# [1L, 3L, 50L]
"""
return image_resize(input, out_shape, scale, name, 'LINEAR', actual_shape,
align_corners, align_mode, data_format)
@templatedoc(op_type="bilinear_interp")
def resize_bilinear(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
:alias_main: paddle.nn.functional.resize_bilinear
:alias: paddle.nn.functional.resize_bilinear,paddle.nn.functional.vision.resize_bilinear
:old_api: paddle.fluid.layers.resize_bilinear
This op resizes the input by performing bilinear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in
the future and only use :attr:`out_shape` instead.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Parameters:
input(Variable): 4-D Tensor(NCHW), its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of resize bilinear
layer, the shape is (out_h, out_w).Default: None. If a list, each
element can be an integer or a Tensor Variable with shape: [1]. If a
Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: 4-D tensor(NCHW or NHWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.resize_bilinear(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_bilinear(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_bilinear(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_bilinear(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_bilinear(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape,
align_corners, align_mode, data_format)
@templatedoc(op_type="trilinear_interp")
def resize_trilinear(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCDHW'):
"""
:alias_main: paddle.nn.functional.resize_trilinear
:alias: paddle.nn.functional.resize_trilinear,paddle.nn.functional.vision.resize_trilinear
:old_api: paddle.fluid.layers.resize_trilinear
This op resizes the input by performing trilinear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated
in the future and only use :attr:`out_shape` instead.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Parameters:
input(${x_type}): 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_d, out_h, out_w). Default: None. Every element should be an integer or a Tensor Variable with shape: [1] if it is a list. If it is a Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input depth, height or width.
At least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
Variable: A 5-D Tensor(NCDHW or NDHWC)
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,8,10])
#1
output = fluid.layers.resize_trilinear(input=input,out_shape=[12,12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_trilinear(input=input,out_shape=[12,dim1,4])
#3
#x = np.array([3,12,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_trilinear(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_trilinear(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,8,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12, 12)
#2
# (2, 3, 12, 2, 4)
#3
# (2, 3, 3, 12, 12)
#4
# (2, 3, 3, 4, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_trilinear(input=input, out_shape=[12,12,12])
print(output.shape)
# [2L, 3L, 12L, 12L, 12L]
"""
return image_resize(input, out_shape, scale, name, 'TRILINEAR',
actual_shape, align_corners, align_mode, data_format)
@templatedoc(op_type="nearest_interp")
def resize_nearest(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
data_format='NCHW'):
"""
:alias_main: paddle.nn.functional.resize_nearest
:alias: paddle.nn.functional.resize_nearest,paddle.nn.functional.vision.resize_nearest
:old_api: paddle.fluid.layers.resize_nearest
This op resizes the input by performing nearest neighbor interpolation in both the
height direction and the width direction based on given output shape
which is specified by actual_shape, out_shape and scale in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor(H_{in} * scale_{factor})
W_out = floor(W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
Parameters:
input(${x_type}): 4-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_h, out_w). Default: None. Every element should be an integer or a tensor Variable with shape: [1] if it is a list. If it is a tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: 4-D tensor(NCHW or NHWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.resize_nearest(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_nearest(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_nearest(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_nearest(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_nearest(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
return image_resize(
input,
out_shape,
scale,
name,
'NEAREST',
actual_shape,
align_corners,
align_mode=1,
data_format=data_format)
def image_resize_short(input, out_short_len, resample='BILINEAR'):
"""
This op resizes a batch of images. The short edge of input images will be
resized to the given 'out_short_len'. The long edge of input images
will be resized proportionately to make images' length-width ratio
constant.
Parameters:
input (Variable): 4-D tensor(NCHW), The input tensor of image resize layer.
out_short_len(int): The length of output images' short edge.
resample (str): resample method, default: BILINEAR.
Returns:
Variable: 4-D tensor(NCHW).
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[None,3,6,9], dtype="float32")
out = fluid.layers.image_resize_short(input, out_short_len=3)
"""
in_shape = input.shape
if len(in_shape) != 4:
raise ValueError(
"The rank of input must be 4 (num_batches, channels, in_h, in_w).")
hw = in_shape[2:4]
short_idx = hw.index(min(hw))
long_idx = 1 - short_idx
out_shape = list(hw)
out_shape[short_idx] = out_short_len
out_shape[long_idx] = int(
float(out_shape[long_idx]) * (float(out_short_len) / float(hw[
short_idx])) + 0.5)
return image_resize(input=input, out_shape=out_shape, resample=resample)
@deprecated(since="2.0.0", update_to="paddle.gather")
def gather(input, index, overwrite=True):
"""
Output is obtained by gathering entries of the outer-most dimension
of X indexed by `index` and concatenate them together.
.. math::
Out = X[Index]
.. code-block:: text
Given:
X = [[1, 2],
[3, 4],
[5, 6]]
Index = [1, 2]
Then:
Out = [[3, 4],
[5, 6]]
Args:
input (Tensor): The source input tensor with rank>=1. Supported data type is
int32, int64, float32, float64 and uint8 (only for CPU),
float16 (only for GPU).
index (Tensor): The index input tensor with rank=1. Data type is int32 or int64.
overwrite (bool, optional): The mode that updating the grad when has same index.
If True, use the overwrite mode to update the grad of the same index,
if False, use the accumulate mode to update the grad of the same index.
Default value is True.
Returns:
output (Tensor): The output is a tensor with the same rank as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[-1, 5], dtype='float32')
index = fluid.data(name='index', shape=[-1, 1], dtype='int32')
output = fluid.layers.gather(x, index)
"""
if in_dygraph_mode():
return core.ops.gather(input, index, None)
check_variable_and_dtype(
input, 'x',
['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], 'gather')
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather')
helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather",
inputs={"X": input,
"Index": index},
outputs={"Out": out},
attrs={'overwrite': overwrite})
return out
@deprecated(since="2.0.0", update_to="paddle.gather_nd")
def gather_nd(input, index, name=None):
"""
**Gather Nd Layer**
This function is actually a high-dimensional extension of :code:`gather`
and supports for simultaneous indexing by multiple axes. :attr:`index` is a
K-dimensional integer tensor, which is regarded as a (K-1)-dimensional
tensor of :attr:`index` into :attr:`input`, where each element defines
a slice of params:
.. math::
output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]]
Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has
shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` .
.. code-block:: text
Given:
input = [[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]
input.shape = (2, 3, 4)
* Case 1:
index = [[1]]
gather_nd(input, index)
= [input[1, :, :]]
= [[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]
* Case 2:
index = [[0,2]]
gather_nd(input, index)
= [input[0, 2, :]]
= [8, 9, 10, 11]
* Case 3:
index = [[1, 2, 3]]
gather_nd(input, index)
= [input[1, 2, 3]]
= [23]
Args:
input (Tensor): The input Tensor which it's data type should be bool, float32, float64, int32, int64.
index (Tensor): The index input with rank > 1, index.shape[-1] <= input.rank.
Its dtype should be int32, int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
output (Tensor): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:]
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 4, 5], dtype='float32')
index = fluid.data(name='index', shape=[2, 2], dtype='int32')
output = fluid.layers.gather_nd(x, index)
"""
if in_dygraph_mode():
return core.ops.gather_nd(input, index)
check_variable_and_dtype(input, 'input',
['bool', 'float32', 'float64', 'int32', 'int64'],
'gather_np')
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather_np')
helper = LayerHelper('gather_nd', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather_nd",
inputs={"X": input,
"Index": index},
outputs={"Out": output})
return output
@deprecated(since="2.0.0", update_to="paddle.scatter")
def scatter(input, index, updates, name=None, overwrite=True):
"""
:alias_main: paddle.scatter
:alias: paddle.scatter,paddle.tensor.scatter,paddle.tensor.manipulation.scatter
:old_api: paddle.fluid.layers.scatter
**Scatter Layer**
Output is obtained by updating the input on selected indices based on updates.
.. code-block:: python
import numpy as np
#input:
input = np.array([[1, 1], [2, 2], [3, 3]])
index = np.array([2, 1, 0, 1])
# shape of updates should be the same as input
# shape of updates with dim > 1 should be the same as input
updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])
overwrite = False
# calculation:
if not overwrite:
for i in range(len(index)):
input[index[i]] = np.zeros((2))
for i in range(len(index)):
if (overwrite):
input[index[i]] = updates[i]
else:
input[index[i]] += updates[i]
# output:
out = np.array([[3, 3], [6, 6], [1, 1]])
out.shape # [3, 2]
Args:
input (Variable): The input N-D Tensor with rank>=1. Data type can be float32.
index (Variable): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
updates (Variable): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
overwrite (bool): The mode that updating the output when there are same indices.
If True, use the overwrite mode to update the output of the same index,
if False, use the accumulate mode to update the output of the same index.
Default value is True.
Returns:
Variable(Tensor|LoDTensor): The output is a Tensor with the same shape as input.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
input = fluid.layers.data(name='data', shape=[3, 2], dtype='float32', append_batch_size=False)
index = fluid.layers.data(name='index', shape=[4], dtype='int64', append_batch_size=False)
updates = fluid.layers.data(name='update', shape=[4, 2], dtype='float32', append_batch_size=False)
output = fluid.layers.scatter(input, index, updates, overwrite=False)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
in_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32)
index_data = np.array([2, 1, 0, 1]).astype(np.int64)
update_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'data':in_data, "index":index_data, "update":update_data}, fetch_list=[output])
print(res)
# [array([[3., 3.],
# [6., 6.],
# [1., 1.]], dtype=float32)]
"""
helper = LayerHelper('scatter', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="scatter",
inputs={"X": input,
"Ids": index,
"Updates": updates},
attrs={'overwrite': overwrite},
outputs={"Out": out})
return out
def scatter_nd_add(ref, index, updates, name=None):
"""
**Scatter_nd_add Layer**
Output is obtained by applying sparse addition to a single value
or slice in a Variable.
:attr:`ref` is a Tensor with rank :math:`R`
and :attr:`index` is a Tensor with rank :math:`K` . Thus, :attr:`index`
has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \leq R` . :attr:`updates`
is a Tensor with rank :math:`K - 1 + R - Q` and its
shape is :math:`index.shape[:-1] + ref.shape[index.shape[-1]:]` .
According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` ,
add the corresponding :attr:`updates` slice to the :attr:`ref` slice
which is obtained by the last one dimension of :attr:`index` .
.. code-block:: text
Given:
* Case 1:
ref = [0, 1, 2, 3, 4, 5]
index = [[1], [2], [3], [1]]
updates = [9, 10, 11, 12]
we get:
output = [0, 22, 12, 14, 4, 5]
* Case 2:
ref = [[65, 17], [-14, -25]]
index = [[], []]
updates = [[[-1, -2], [1, 2]],
[[3, 4], [-3, -4]]]
ref.shape = (2, 2)
index.shape = (2, 0)
updates.shape = (2, 2, 2)
we get:
output = [[67, 19], [-16, -27]]
Args:
ref (Variable): The ref input. Its dtype should be float32, float64.
index (Variable): The index input with rank > 1 and index.shape[-1] <= ref.rank.
Its dtype should be int32 or int64 as it is used as indexes.
updates (Variable): The updated value of scatter_nd_add op, and it must have the same dtype
as ref. It must have the shape index.shape[:-1] + ref.shape[index.shape[-1]:].
name (str|None): The output variable name. If set None, the layer will be named automatically.
Returns:
output (Variable): The output is a tensor with the same shape and dtype as ref.
Examples:
.. code-block:: python
import paddle.fluid as fluid
ref = fluid.data(name='ref', shape=[3, 5, 9, 10], dtype='float32')
index = fluid.data(name='index', shape=[3, 2], dtype='int32')
updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32')
output = fluid.layers.scatter_nd_add(ref, index, updates)
"""
if ref.dtype != updates.dtype:
raise ValueError("ref and updates must have same data type.")
helper = LayerHelper('scatter_nd_add', **locals())
dtype = helper.input_dtype(input_param_name='ref')
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="scatter_nd_add",
inputs={"X": ref,
"Index": index,
"Updates": updates},
outputs={"Out": output})
return output
def scatter_nd(index, updates, shape, name=None):
"""
**Scatter_nd Layer**
Output is obtained by scattering the :attr:`updates` in a new tensor according
to :attr:`index` . This op is similar to :code:`scatter_nd_add`, except the
tensor of :attr:`shape` is zero-initialized. Correspondingly, :code:`scatter_nd(index, updates, shape)`
is equal to :code:`scatter_nd_add(fluid.layers.zeros(shape, updates.dtype), index, updates)` .
If :attr:`index` has repeated elements, then the corresponding updates are accumulated.
Because of the numerical approximation issues, the different order of repeated elements
in :attr:`index` may cause different results. The specific calculation method can be
seen :code:`scatter_nd_add` . This op is the inverse of the :code:`gather_nd` op.
Args:
index (Variable): The index input with rank > 1 and index.shape[-1] <= len(shape).
Its dtype should be int32 or int64 as it is used as indexes.
updates (Variable): The updated value of scatter_nd op. Its dtype should be float32, float64.
It must have the shape index.shape[:-1] + shape[index.shape[-1]:]
shape(tuple|list): Shape of output tensor.
name (str|None): The output variable name. If set None, the layer will be named automatically.
Returns:
output (Variable): The output is a tensor with the same type as :attr:`updates` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
index = fluid.data(name='index', shape=[3, 2], dtype='int64')
updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32')
shape = [3, 5, 9, 10]
output = fluid.layers.scatter_nd(index, updates, shape)
"""
return scatter_nd_add(zeros(shape, updates.dtype), index, updates, name)
@templatedoc()
def random_crop(x, shape, seed=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
shape(${shape_type}): ${shape_comment}
seed(int|${seed_type}|None): ${seed_comment} By default, the seed will
get from `random.randint(-65536, 65535)`.
Returns:
${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
img = fluid.data("img", [None, 3, 256, 256])
# cropped_img is [-1, 3, 224, 224]
cropped_img = fluid.layers.random_crop(img, shape=[3, 224, 224])
# cropped_img2 shape: [-1, 2, 224, 224]
# cropped_img2 = fluid.layers.random_crop(img, shape=[2, 224, 224])
# cropped_img3 shape: [-1, 3, 128, 224]
# cropped_img3 = fluid.layers.random_crop(img, shape=[128, 224])
"""
helper = LayerHelper("random_crop", **locals())
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'uint8', 'int16', 'int32'],
'random_crop')
check_type(shape, 'shape', (list, Variable), 'random_crop')
dtype = x.dtype
out = helper.create_variable_for_type_inference(dtype)
if seed is None:
seed = np.random.randint(-65536, 65536)
op_attrs = {"shape": shape}
if isinstance(seed, int):
op_attrs["startup_seed"] = seed
seed = helper.create_variable(
name=unique_name.generate("random_crop_seed"),
dtype="int64",
persistable=True)
elif not isinstance(seed, Variable):
raise ValueError("'seed' must be a Variable or an int.")
helper.append_op(
type="random_crop",
inputs={"X": x,
"Seed": seed},
outputs={"Out": out,
"SeedOut": seed},
attrs=op_attrs)
return out
def log(x, name=None):
"""
:alias_main: paddle.log
:alias: paddle.log,paddle.tensor.log,paddle.tensor.math.log
:old_api: paddle.fluid.layers.log
Calculates the natural log of the given input tensor, element-wise.
.. math::
Out = \\ln(x)
Args:
x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The natural log of the input LoDTensor or Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.layers.data(name="x", shape=[1], dtype="float32")
res = fluid.layers.log(x)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1], [2]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[0.], [0.6931472]]
"""
if in_dygraph_mode():
return core.ops.log(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
inputs = {'X': [x]}
helper = LayerHelper('log', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu")
def relu(x, name=None):
"""
${comment}
Args:
x(Variable): ${x_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[1,2.6]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu(x1)
print(out1.numpy())
# [[0. 0. ]
# [1. 2.6]]
"""
if in_dygraph_mode():
return core.ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
inputs = {'X': [x]}
helper = LayerHelper('relu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.selu")
def selu(x, scale=None, alpha=None, name=None):
"""
Selu Operator.
The equation is:
.. math::
selu= \\lambda*
\\begin{cases}
x &\\quad \\text{ if } x>0 \n
\\alpha * e^x - \\alpha &\\quad \\text{ if } x<=0
\\end{cases}
The input `X` can carry the LoD (Level of Details) information,
or not. And the output shares the LoD information with input `X`.
Args:
x (Variable): The input N-D Tensor.
scale(float, optional): lambda in selu activation function,
the default value is 1.0507009873554804934193349852946.
For more information about this value, please refer
to: https://arxiv.org/abs/1706.02515.
alpha(float, optional): alpha in selu activation function,
the default value is 1.6732632423543772848170429916717.
For more information about this value, please refer
to: https://arxiv.org/abs/1706.02515.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable(Tensor|LoDTensor): The output Tensor or LoDTensor with the same shape and LoD information as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
output = fluid.layers.selu(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[0, 1],[2, 3]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[0. , 1.050701],[2.101402, 3.152103]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'selu')
helper = LayerHelper('selu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
attrs = {}
if scale is not None:
attrs["scale"] = scale
if alpha is not None:
attrs["alpha"] = alpha
helper.append_op(
type="selu", inputs={"X": x}, outputs={"Out": out}, attrs=attrs)
return out
def mean_iou(input, label, num_classes):
"""
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
.. math::
IOU = \\frac{true\_positive}{(true\_positive + false\_positive + false\_negative)}.
The predictions are accumulated in a confusion matrix and mean-IOU
is then calculated from it.
Parameters:
input (Variable): A n-D Tensor of prediction results for semantic labels with type int32 or int64.
label (Variable): A Tensor of ground truth labels with type int32 or int64.
Its shape should be the same as input.
num_classes (int32): The possible number of labels.
Returns:
Three Variables.
- mean_iou(Variable) : A 1-D Tensor representing the mean intersection-over-union with shape [1]. \
Data type is float32.
- out_wrong(Variable) : A 1-D Tensor with shape [num_classes]. Data type is int32. \
The wrong numbers of each class.
- out_correct(Variable): A 1-D Tensor with shape [num_classes]. Data type is int32. The correct numbers of each class.
Examples:
.. code-block:: python
import paddle.fluid as fluid
iou_shape = [None, 32, 32]
num_classes = 5
predict = fluid.data(name='predict', shape=iou_shape, dtype='int64')
label = fluid.data(name='label', shape=iou_shape, dtype='int64')
mean_iou, out_wrong, out_correct = fluid.layers.mean_iou(predict, label,
num_classes)
"""
helper = LayerHelper('mean_iou', **locals())
check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'],
'mean_iou')
check_variable_and_dtype(label, 'Labels', ['int32', 'int64'], 'mean_iou')
dtype = helper.input_dtype()
out_mean_iou = helper.create_variable_for_type_inference(dtype='float32')
out_wrong = helper.create_variable_for_type_inference(dtype='int32')
out_correct = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="mean_iou",
inputs={"Predictions": input,
"Labels": label},
outputs={
"OutMeanIou": out_mean_iou,
"OutWrong": out_wrong,
"OutCorrect": out_correct
},
attrs={"num_classes": num_classes})
return out_mean_iou, out_wrong, out_correct
def crop(x, shape=None, offsets=None, name=None):
"""
Crop input into output, as specified by offsets and shape.
**Warning:** THIS OP IS DEPRECATED. It will be removed in the future version.
Instructions for updating: Use :ref:`api_fluid_layers_crop_tensor` instead.
.. code-block:: text
* Case 1:
Given
X = [[0, 1, 2, 0, 0]
[0, 3, 4, 0, 0]
[0, 0, 0, 0, 0]],
and
shape = [2, 2],
offsets = [0, 1],
output is:
Out = [[1, 2],
[3, 4]].
* Case 2:
Given
X = [[0, 1, 2, 5, 0]
[0, 3, 4, 6, 0]
[0, 0, 0, 0, 0]],
and shape is tensor
shape = [[0, 0, 0]
[0, 0, 0]]
and
offsets = [0, 1],
output is:
Out = [[1, 2, 5],
[3, 4, 6]].
Parameters:
x (Variable): Tensor, data type can be float32 or float64.
shape (Variable|list/tuple of integers): The output shape is specified
by `shape`, which can be a Tensor or a list/tuple of integers.
If it is a Tensor, it's rank must be the same as `x` , only
it's shape will be used, and the value of it will be ignored. This way
is suitable for the case that the output shape may be changed each
iteration. If it is a list/tuple of integers, it's length must be the same
as the rank of `x`
offsets (Variable|list/tuple of integers|None): Specifies the cropping
offsets at each dimension. It can be a Tensor or a list/tuple
of integers. If it is a Tensor, it's rank must be the same as `x`.
This way is suitable for the case that the offsets may be changed
each iteration. If it is a list/tuple of integers, it's length must be the
same as the rank of `x`. If None, the offsets are 0 at each dimension.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name` . Usually name is no need to set and
None by default.
Returns:
The cropped Tensor, which has the same rank and data type with `x`
Return Type:
Variable
Raises:
ValueError: If shape is not a list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[3, 3, 5], dtype="float32")
y = fluid.data(name="y", shape=[2, 2, 3], dtype="float32")
crop = fluid.layers.crop(x, shape=y)
# or
z = fluid.data(name="z", shape=[3, 3, 5], dtype="float32")
crop = fluid.layers.crop(z, shape=[2, 2, 3])
"""
check_variable_and_dtype(x, 'x', ['float32'], 'crop')
check_type(shape, 'shape', (list, tuple, Variable), 'crop')
helper = LayerHelper('crop', **locals())
if offsets is None:
offsets = [0] * len(x.shape)
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}
if isinstance(shape, Variable):
ipts['Y'] = shape
else:
attrs['shape'] = shape
if isinstance(offsets, Variable):
ipts['Offsets'] = offsets
else:
attrs['offsets'] = offsets
helper.append_op(
type='crop',
inputs=ipts,
outputs={'Out': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def crop_tensor(x, shape=None, offsets=None, name=None):
"""
:alias_main: paddle.crop_tensor
:alias: paddle.crop_tensor,paddle.tensor.crop_tensor,paddle.tensor.creation.crop_tensor
:old_api: paddle.fluid.layers.crop_tensor
Crop input into output, as specified by offsets and shape.
.. code-block:: text
* Case 1 (input is a 2-D Tensor):
Input:
X.shape = [3, 5]
X.data = [[0, 1, 2, 0, 0],
[0, 3, 4, 0, 0],
[0, 0, 0, 0, 0]]
Parameters:
shape = [2, 2]
offsets = [0, 1]
Output:
Out.shape = [2, 2]
Out.data = [[1, 2],
[3, 4]]
* Case 2 (input is a 3-D Tensor):
Input:
X.shape = [2, 3, 4]
X.data = [[[0, 1, 2, 3],
[0, 5, 6, 7],
[0, 0, 0, 0]],
[[0, 3, 4, 5],
[0, 6, 7, 8],
[0, 0, 0, 0]]]
Parameters:
shape = [2, 2, -1]
offsets = [0, 0, 1]
Output:
Out.shape = [2, 2, 3]
Out.data = [[[1, 2, 3],
[5, 6, 7]],
[[3, 4, 5],
[6, 7, 8]]]
Parameters:
x (Variable): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64.
shape (list|tuple|Variable): The output shape is specified
by `shape`. Its data type is int32. If a list/tuple, it's length must be
the same as the dimension size of `x`. If a Variable, it should be a 1-D Tensor.
When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the shape may
be changed each iteration.
offsets (list|tuple|Variable, optional): Specifies the cropping
offsets at each dimension. Its data type is int32. If a list/tuple, it's length
must be the same as the dimension size of `x`. If a Variable, it should be a 1-D
Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the offsets may be changed
each iteration. Default: None, the offsets are 0 at each dimension.
name(str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: The cropped Tensor has same data type with `x`.
Raises:
TypeError: If the data type of `x` is not in: float32, float64, int32, int64.
TypeError: If `shape` is not a list, tuple or Variable.
TypeError: If the data type of `shape` is not int32.
TypeError: If `offsets` is not None and not a list, tuple or Variable.
TypeError: If the data type of `offsets` is not int32.
ValueError: If the element in `offsets` is less than zero.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[None, 3, 5], dtype="float32")
# x.shape = [-1, 3, 5], where -1 indicates batch size, and it will get the exact value in runtime.
# shape is a 1-D Tensor
crop_shape = fluid.data(name="crop_shape", shape=[3], dtype="int32")
crop0 = fluid.layers.crop_tensor(x, shape=crop_shape)
# crop0.shape = [-1, -1, -1], it means crop0.shape[0] = x.shape[0] in runtime.
# or shape is a list in which each element is a constant
crop1 = fluid.layers.crop_tensor(x, shape=[-1, -1, 3], offsets=[0, 1, 0])
# crop1.shape = [-1, 2, 3]
# or shape is a list in which each element is a constant or Variable
y = fluid.data(name="y", shape=[3, 8, 8], dtype="float32")
dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
crop2 = fluid.layers.crop_tensor(y, shape=[3, dim1, 4])
# crop2.shape = [3, -1, 4]
# offsets is a 1-D Tensor
crop_offsets = fluid.data(name="crop_offsets", shape=[3], dtype="int32")
crop3 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=crop_offsets)
# crop3.shape = [-1, 2, 3]
# offsets is a list in which each element is a constant or Variable
offsets_var = fluid.data(name="dim1", shape=[1], dtype="int32")
crop4 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=[0, 1, offsets_var])
# crop4.shape = [-1, 2, 3]
"""
helper = LayerHelper('crop_tensor', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'crop_tensor')
check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor')
check_type(offsets, 'offsets', (list, tuple, Variable, type(None)),
'crop_tensor')
if offsets is None:
offsets = [0] * len(x.shape)
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}
def _attr_shape_check(shape_val):
if not isinstance(shape_val, int):
raise TypeError(
"Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(shape_val))
if shape_val == 0:
raise ValueError(
"Attr(shape) of Op(crop_tensor) should not be zero, but received: %s."
% str(shape_val))
if shape_val < -1:
raise ValueError(
"When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s."
% str(shape_val))
def _attr_offsets_check(offset_val):
if not isinstance(offset_val, int):
raise TypeError(
"Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(offset_val))
if offset_val < 0:
raise ValueError(
"Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s."
% str(offset_val))
if isinstance(offsets, Variable):
offsets.stop_gradient = True
ipts['Offsets'] = offsets
attrs['offsets'] = [-1] * len(x.shape)
elif utils._contain_var(offsets):
new_offsets_tensor = []
offsets_attr = []
for dim in offsets:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_offsets_tensor.append(dim)
offsets_attr.append(-1)
else:
_attr_offsets_check(dim)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_offsets_tensor.append(temp_out)
offsets_attr.append(dim)
ipts['OffsetsTensor'] = new_offsets_tensor
attrs['offsets'] = offsets_attr
else:
for offset in offsets:
_attr_offsets_check(offset)
attrs['offsets'] = offsets
if isinstance(shape, Variable):
shape.stop_gradient = True
ipts['Shape'] = shape
elif utils._contain_var(shape):
new_shape_tensor = []
shape_attr = []
for dim_size in shape:
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
new_shape_tensor.append(dim_size)
shape_attr.append(0)
else:
_attr_shape_check(dim_size)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
shape_attr.append(dim_size)
ipts['ShapeTensor'] = new_shape_tensor
attrs['shape'] = shape_attr
else:
for dim_size in shape:
_attr_shape_check(dim_size)
attrs['shape'] = shape
helper.append_op(
type='crop_tensor',
inputs=ipts,
outputs={'Out': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def affine_grid(theta, out_shape, name=None):
"""
:alias_main: paddle.nn.functional.affine_grid
:alias: paddle.nn.functional.affine_grid,paddle.nn.functional.vision.affine_grid
:old_api: paddle.fluid.layers.affine_grid
It generates a grid of (x,y) coordinates using the parameters of
the affine transformation that correspond to a set of points where
the input feature map should be sampled to produce the transformed
output feature map.
Args:
theta (Variable) - A Tensor with shape [N, 2, 3]. It contains a batch of affine transform parameters.
The data type can be float32 or float64.
out_shape (Variable | list | tuple): The shape of target output with format [batch_size, channel, height, width].
``out_shape`` can be a Tensor or a list or tuple. The data
type must be int32.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: A Tensor with shape [batch_size, H, W, 2] while 'H' and 'W' are the height and width of feature map in affine transformation. The data type is the same as `theta`.
Raises:
ValueError: If the type of arguments is not supported.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
place = fluid.CPUPlace()
theta = fluid.data(name="x", shape=[None, 2, 3], dtype="float32")
out_shape = fluid.data(name="y", shape=[4], dtype="int32")
grid_0 = fluid.layers.affine_grid(theta, out_shape)
grid_1 = fluid.layers.affine_grid(theta, [5, 3, 28, 28])
batch_size=2
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
output= exe.run(feed={"x": np.random.rand(batch_size,2,3).astype("float32"),
"y": np.array([5, 3, 28, 28]).astype("int32")},
fetch_list=[grid_0.name, grid_1.name])
print(output[0])
print(output[1])
"""
helper = LayerHelper('affine_grid')
check_variable_and_dtype(theta, 'theta', ['float32', 'float64'],
'affine_grid')
if not (isinstance(out_shape, list) or isinstance(out_shape, tuple) or \
isinstance(out_shape, Variable)):
raise ValueError("The out_shape should be a list, tuple or Variable.")
if not isinstance(theta, Variable):
raise ValueError("The theta should be a Variable.")
out = helper.create_variable_for_type_inference(theta.dtype)
ipts = {'Theta': theta}
attrs = {}
if isinstance(out_shape, Variable):
ipts['OutputShape'] = out_shape
check_variable_and_dtype(out_shape, 'out_shape', ['int32'],
'affine_grid')
else:
attrs['output_shape'] = out_shape
helper.append_op(
type='affine_grid',
inputs=ipts,
outputs={'Output': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def pad2d(input,
paddings=[0, 0, 0, 0],
mode='constant',
pad_value=0.0,
data_format="NCHW",
name=None):
"""
:alias_main: paddle.nn.functional.pad2d
:alias: paddle.nn.functional.pad2d,paddle.nn.functional.common.pad2d
:old_api: paddle.fluid.layers.pad2d
Pad 2-d images according to 'paddings' and 'mode'.
If mode is 'reflect', paddings[0] and paddings[1] must be no greater
than height-1. And the width dimension has the same condition.
Parameters:
input (Tensor): The input image with [N, C, H, W] format or [N, H, W, C] format, which is a 4-D Tensor with data type float32.
paddings (Tensor | List[int32]): The padding size. If padding is a List, it must
contain four integers, (padding_top, padding_bottom, padding_left, padding_right).
Otherwise, it is a 1-D Tensor with shape [4]. Data type is int32.
Default is [0, 0, 0, 0].
mode (str): Three modes: 'constant' (default), 'reflect', 'edge' .
When in 'constant' mode, this op uses a constant value to pad the input tensor.
When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
When in 'edge' mode, uses input boundaries to pad the input tensor.
Default is 'constant'
pad_value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0
data_format (str): An string from: "NHWC", "NCHW". Specify the data format of
the input data.
Default is "NCHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns: Tensor, a 4-D Tensor padded according to paddings and mode and data type is same as input.
Examples:
.. code-block:: text
Input = [[[[1., 2., 3.],
[4., 5., 6.]]]]
Case 0:
paddings = [0, 1, 2, 3],
mode = 'constant'
pad_value = 0
Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.],
[0., 0., 4., 5., 6., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]]]]
Case 1:
paddings = [0, 1, 2, 1],
mode = 'reflect'
Out = [[[[3., 2., 1., 2., 3., 2.],
[6., 5., 4., 5., 6., 5.],
[3., 2., 1., 2., 3., 2.]]]]
Case 2:
paddings = [0, 1, 2, 1],
mode = 'edge'
Out = [[[[1., 1., 1., 2., 3., 3.],
[4., 4., 4., 5., 6., 6.],
[4., 4., 4., 5., 6., 6.]]]]
Code Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
# example 1
x_shape = (1, 1, 3, 4)
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1
tensor_x = paddle.to_tensor(x)
y = F.pad2d(tensor_x, paddings=[1, 2, 2, 1], pad_value=1, mode='constant')
print(y.numpy())
# [[[[ 1. 1. 1. 1. 1. 1. 1.]
# [ 1. 1. 1. 2. 3. 4. 1.]
# [ 1. 1. 5. 6. 7. 8. 1.]
# [ 1. 1. 9. 10. 11. 12. 1.]
# [ 1. 1. 1. 1. 1. 1. 1.]
# [ 1. 1. 1. 1. 1. 1. 1.]]]]
# example 2
x_shape = (1, 1, 2, 3)
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1
tensor_x = paddle.to_tensor(x)
y = F.pad2d(tensor_x, paddings=[1, 1, 1, 1], mode='reflect')
print(y.numpy())
# [[[[5. 4. 5. 6. 5.]
# [2. 1. 2. 3. 2.]
# [5. 4. 5. 6. 5.]
# [2. 1. 2. 3. 2.]]]]
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
"pad2d")
if in_dygraph_mode():
_paddings = paddings.numpy().tolist() if isinstance(
paddings, Variable) else paddings
return core.ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
'data_format', data_format, 'paddings', _paddings)
attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format}
inputs = {'X': [input]}
if isinstance(paddings, Variable):
inputs['Paddings'] = [paddings]
attrs['paddings'] = []
else:
attrs['paddings'] = paddings
helper = LayerHelper('pad2d', **locals())
assert mode in ['reflect', 'edge', 'constant'
], "mode should be one of constant, reflect, edge."
dtype = helper.input_dtype(input_param_name='input')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad2d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.elu")
def elu(x, alpha=1.0, name=None):
"""
:alias_main: paddle.nn.functional.elu
:alias: paddle.nn.functional.elu,paddle.nn.functional.activation.elu
:old_api: paddle.fluid.layers.elu
${comment}
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|1.0): ${alpha_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
input_elu = np.array([[-1,6],[1,15.6]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input_elu)
y = fluid.layers.elu(x, alpha=0.2)
print(y.numpy())
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""
helper = LayerHelper('elu', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='elu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu6")
def relu6(x, threshold=6.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
threshold(float, optional): ${threshold_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
output(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[2.5,7.8]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu6(x=x1, threshold=6.0)
print(out1.numpy())
# [[0. 0. ]
# [2.5 6. ]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')
helper = LayerHelper('relu6', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='relu6',
inputs={'X': x},
outputs={'Out': out},
attrs={
'threshold': threshold,
'use_mkldnn': core.globals()["FLAGS_use_mkldnn"]
})
return out
@templatedoc()
def pow(x, factor=1.0, name=None):
"""
This is Pow Activation Operator.
:math:`out = x^{factor}`
Args:
x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. The exponential factor of Pow. Default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[32,32], dtype="float32")
# example 1: argument factor is float
y_1 = fluid.layers.pow(x, factor=2.0)
# y_1 is x^{2.0}
# example 2: argument factor is Variable
factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
y_2 = fluid.layers.pow(x, factor=factor_tensor)
# y_2 is x^{3.0}
"""
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'],
'pow')
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {}
if isinstance(factor, Variable):
check_variable_and_dtype(factor, 'factor', ['float32'], 'pow')
factor.stop_gradient = True
inputs['FactorTensor'] = factor
else:
attrs['factor'] = factor
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
@templatedoc()
def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
:alias_main: paddle.stanh
:alias: paddle.stanh,paddle.tensor.stanh,paddle.tensor.math.stanh
:old_api: paddle.fluid.layers.stanh
${comment}
Args:
x(${x_type}): ${x_comment}
scale_a(${scale_a_type}|2.0 / 3.0): ${scale_a_comment}
scale_b(${scale_b_type}|1.7159): ${scale_b_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
output(${out_type}): ${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name="input", shape=[-1, 3])
result = fluid.layers.stanh(data,scale_a=0.67, scale_b=1.72)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.random(size=(3, 3)).astype('float32')
output= exe.run(feed={"input": x},
fetch_list=[result])
print(output)
#[array([[0.626466 , 0.89842904, 0.7501062 ],
# [0.25147712, 0.7484996 , 0.22902708],
# [0.62705994, 0.23110689, 0.56902856]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
helper = LayerHelper('stanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='stanh',
inputs={'X': x},
outputs={'Out': out},
attrs={'scale_a': scale_a,
'scale_b': scale_b})
return out
@templatedoc()
def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
"""
:alias_main: paddle.nn.functional.hard_sigmoid
:alias: paddle.nn.functional.hard_sigmoid,paddle.nn.functional.activation.hard_sigmoid
:old_api: paddle.fluid.layers.hard_sigmoid
${comment}
Parameters:
x (${x_type}): ${x_comment}
slope (float, optional): ${slope_comment}
offset (float, optional): ${offset_comment}
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.fill_constant(shape=[3, 2], value=0.5, dtype='float32') # [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_sigmoid')
helper = LayerHelper('hard_sigmoid', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='hard_sigmoid',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': slope,
'offset': offset})
return out
@templatedoc()
def swish(x, beta=1.0, name=None):
"""
:alias_main: paddle.nn.functional.swish
:alias: paddle.nn.functional.swish,paddle.nn.functional.activation.swish
:old_api: paddle.fluid.layers.swish
Elementwise swish activation function. See `Searching for Activation Functions <https://arxiv.org/abs/1710.05941>`_ for more details.
Equation:
.. math::
out = \\frac{x}{1 + e^{- beta * x}}
Args:
x(Variable): Tensor or LoDTensor, dtype: float32 or float64, the input of swish activation.
beta(float): Constant beta of swish operator, default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Output of the swish activation, Tensor or LoDTensor, with the same dtype and shape with the input x.
Examples:
.. code-block:: python
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
y = fluid.layers.swish(x, beta=2.0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
data = np.random.randn(2, 3).astype("float32")
exe.run(start)
y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
data
# array([[-1.1239197 , 1.3391294 , 0.03921051],
# [ 1.1970421 , 0.02440812, 1.2055548 ]], dtype=float32)
y_np
# array([[-0.2756806 , 1.0610548 , 0.01998957],
# [ 0.9193261 , 0.01235299, 0.9276883 ]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
data = np.random.randn(2, 3).astype("float32")
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = dg.to_variable(data)
y = fluid.layers.swish(x)
y_np = y.numpy()
data
# array([[-0.0816701 , 1.1603649 , -0.88325626],
# [ 0.7522361 , 1.0978601 , 0.12987892]], dtype=float32)
y_np
# array([[-0.03916847, 0.8835007 , -0.25835553],
# [ 0.51126915, 0.82324016, 0.06915068]], dtype=float32)
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')
helper = LayerHelper('swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': beta})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.prelu")
def prelu(x, mode, param_attr=None, name=None):
"""
:api_attr: Static Graph
Equation:
.. math::
y = \max(0, x) + \\alpha * \min(0, x)
There are three modes for the activation:
.. code-block:: text
all: All elements share same alpha.
channel: Elements in same channel share same alpha.
element: All elements do not share alpha. Each element has its own alpha.
Args:
x (Variable): The input Tensor or LoDTensor with data type float32.
mode (str): The mode for weight sharing.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight (alpha), it can be create by ParamAttr. None by default.
For detailed information, please refer to :ref:`api_fluid_ParamAttr`.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The tensor or LoDTensor with the same shape as input.
The data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32")
mode = 'channel'
output = fluid.layers.prelu(
x,mode,param_attr=ParamAttr(name='alpha'))
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'prelu')
helper = LayerHelper('prelu', **locals())
if mode not in ['all', 'channel', 'element']:
raise ValueError('mode should be one of all, channel, element.')
alpha_shape = [1]
# NOTE(): The input of this API should be ``N,C,...`` format,
# which means x.shape[0] is batch_size and x.shape[0] is channel.
if mode == 'channel':
assert len(
x.shape
) >= 2, "The size of input shape should be equal or larger than 2 in prelu() when mode is 'channel'"
#NOTE(zhiqiu): The alpha_shape should be [1, channel] + [1] * len(x.shape[2:]).
# To be consistent with Prelu, it is simplified.
#NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version.
alpha_shape = [1, x.shape[1], 1, 1]
elif mode == 'element':
assert len(
x.shape
) >= 1, "The size of input shape should be equal or larger than 1 in prelu() when mode is 'element'"
alpha_shape = [1] + list(x.shape)[1:]
dtype = helper.input_dtype(input_param_name='x')
alpha = helper.create_parameter(
attr=helper.param_attr,
shape=alpha_shape,
dtype='float32',
is_bias=False,
default_initializer=Constant(0.25))
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prelu",
inputs={"X": x,
'Alpha': alpha},
attrs={"mode": mode},
outputs={"Out": out})
return out
@templatedoc()
def brelu(x, t_min=0.0, t_max=24.0, name=None):
"""
:alias_main: paddle.nn.functional.brelu
:alias: paddle.nn.functional.brelu,paddle.nn.functional.activation.brelu
:old_api: paddle.fluid.layers.brelu
${comment}
Args:
x(${x_type}): ${x_comment}
t_min(${t_min_type}|0.0): ${t_min_comment}
t_max(${t_max_type}|24.0): ${t_max_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
input_brelu = np.array([[-1,6],[1,15.6]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input_brelu)
y = fluid.layers.brelu(x, t_min=1.0, t_max=10.0)
print(y.numpy())
#[[ 1. 6.]
#[ 1. 10.]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu')
helper = LayerHelper('brelu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='brelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'t_min': t_min,
't_max': t_max})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.leaky_relu")
@templatedoc()
def leaky_relu(x, alpha=0.02, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|0.02): ${alpha_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
output(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.layers.data(name="x", shape=[2], dtype="float32")
res = fluid.layers.leaky_relu(x, alpha=0.1)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[-1, 2], [3, -4]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[-0.1, 2], [3, -0.4]]
"""
return paddle.nn.functional.leaky_relu(x, alpha, name)
def soft_relu(x, threshold=40.0, name=None):
"""
:alias_main: paddle.nn.functional.soft_relu
:alias: paddle.nn.functional.soft_relu,paddle.nn.functional.activation.soft_relu
:old_api: paddle.fluid.layers.soft_relu
SoftRelu Activation Operator.
$out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$
Args:
x(Variable): Input of soft_relu operator. Data type can be float32, float64.
threshold(float, optional): The threshold value of soft_relu, default value being 40.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable(Tensor|LoDTensor)): Output of soft_relu operator, shape and LoD same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
output = fluid.layers.soft_relu(inputs, threshold=20.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[0, 1],[2, 3]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[0.6931472, 1.3132616], [2.126928 , 3.0485873]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'soft_relu')
helper = LayerHelper('soft_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='soft_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def flatten(x, axis=1, name=None):
"""
**Flatten op**
Flatten the input tensor into a 2D matrix.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
axis = 2
We get:
Out.shape = (3 * 100, 4 * 100)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
axis = 0
We get:
Out.shape = (1, 3 * 100 * 100 * 4)
Args:
x (Variable): A tensor of rank >= axis. A tensor with type float32,
float64, int8, int32, int64.
axis (int): Indicate up to which input dimensions (exclusive) should
be flattened to the outer dimension of the output.
The value for axis must be in the range [0, R], where R
is the rank of the input tensor. Default: 1.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: A 2D tensor with the contents of the input tensor, with input \
dimensions up to axis flattened to the outer dimension of \
the output and remaining input dimensions flattened into the \
inner dimension of the output. A Tensor with type same as input x.
Raises:
ValueError: If x is not a variable.
ValueError: If axis is not in range [0, rank(x)].
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[4, 4, 3], dtype="float32")
# x shape is [4, 4, 3]
out = fluid.layers.flatten(x=x, axis=2)
# out shape is [16, 3]
"""
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64'], 'flatten')
helper = LayerHelper('flatten', **locals())
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Variable")
if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0:
raise ValueError("The axis should be a int, and in range [0, rank(x)]")
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten2',
inputs={"X": x},
outputs={'Out': out,
'XShape': x_shape},
attrs={"axis": axis})
return out
def stack(x, axis=0, name=None):
"""
This OP stacks all the inputs :code:`x` along axis.
.. code-block:: text
Case 1:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 0
Output:
Out.dims = [3, 1, 2]
Out.data =[ [ [1.0, 2.0] ],
[ [3.0, 4.0] ],
[ [5.0, 6.0] ] ]
Case 2:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 1 or axis = -2
Output:
Out.shape = [1, 3, 2]
Out.data =[ [ [1.0, 2.0]
[3.0, 4.0]
[5.0, 6.0] ] ]
Args:
x (list(Variable)|tuple(Variable)): Input :code:`x` can be a :code:`list` or :code:`tuple` of Tensors, the shapes of all these Tensors
must be the same. Supposing input is N dims
Tensors :math:`[d_0, d_1, ..., d_{n-1}]`, the output is N+1 dims
Tensor :math:`[d_0, d_1, d_{axis-1}, len(x), d_{axis}, ..., d_{n-1}]`.
Supported data types: float32, float64, int32, int64.
axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``,
where ``R`` is the number of dimensions of the first input tensor ``x[0]``.
If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Variable: The stacked Tensor, has same data type with input Tensors. Output dim is :math:`rank(x[0])+1`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
x1 = fluid.data(name='x1', shape=[None, 1, 2], dtype='int32')
x2 = fluid.data(name='x2', shape=[None, 1, 2], dtype='int32')
# stack Tensor list
data = layers.stack([x1,x2]) # stack according to axis 0, data.shape=[2, None, 1, 2]
data = layers.stack([x1,x2], axis=1) # stack according to axis 1, data.shape=[None, 2, 1, 2]
"""
axis = 0 if axis is None else axis
if in_dygraph_mode():
return core.ops.stack(x, 'axis', axis)
if not isinstance(x, list) and not isinstance(x, tuple):
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
# In that case, Variable is array of tensors indeed.
if isinstance(x, Variable) and x.desc.type(
) == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
x = [x]
else:
raise TypeError("The type of '%s' in %s must be %s, but received %s"
% ('x', 'stack',
'list[Tensor], tuple[Tensor] or TensorArray',
type(x)))
helper = LayerHelper('stack', **locals())
out = helper.create_variable_for_type_inference(x[0].dtype)
if x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \
"number of the elements must be 1, but received %s." % len(x)
out_index = helper.create_variable_for_type_inference(dtype="int32")
for i in x:
check_variable_and_dtype(i, 'x', \
['float16', 'float32', 'float64', 'int32', 'int64'], 'stack')
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': x[0]},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': True})
else:
helper.append_op(
type='stack',
inputs={'X': x},
outputs={'Y': out},
attrs={'axis': axis})
return out
@templatedoc(op_type="filter_by_instag")
def filter_by_instag(ins, ins_tag, filter_tag, is_lod, out_val_if_empty=0):
"""
**Filter By Instag Layer**
This function filter a batch of ins by instag,
There are multiple ins, and every ins belongs to some tags.
We can specify some tags we want. So the ins which belongs to that tags
remains in the output, and others removed.
For example, one batch has 4 ins. Every ins has its tag list.
| Ins | Ins_Tag |
|:-----:|:------:|
| 0 | 0, 1 |
| 1 | 1, 3 |
| 2 | 0, 3 |
| 3 | 2, 6 |
And Lod is [1,1,1,1]
And the filter tags [1]
From the definition above, ins which has tag 1 can pass the filter
So Ins 0 and Ins 1 can pass and be seen in the output,
Ins 2 and 3 cannot pass because they do not has tag 1.
Actually, if is_lod is false, it is normal tensor that equals to
lod_tensor with all 1, similar to the example above.
Args:
ins (Variable): Input Variable (LoDTensor), usually it is 2D tensor
And first dimension can have lod info or not.
ins_tag (Variable): Input Variable (LoDTensor), usually it is 1D list
And split them by lod info
filter_tag (Variable): Input Variable (1D Tensor/List), usually it is
list that holds the tags.
is_lod (Bool): Boolean value to indicate ins is lod tensor or not.
out_val_if_empty(Int64): If the output after filter is empty, this value
will be set to Output tensor.
Returns:
Variable: filtered ins (LoDTensor) and loss weight (Tensor)
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
ins = layers.data(name='Ins', shape=[-1,32], lod_level=0, dtype='float64')
ins_tag = layers.data(name='Ins_tag', shape=[-1,16], lod_level=0, dtype='int64')
filter_tag = layers.data(name='Filter_tag', shape=[-1,16], dtype='int64')
out, loss_weight = layers.filter_by_instag(ins, ins_tag, filter_tag, True)
"""
helper = LayerHelper('filter_by_instag', **locals())
out = helper.create_variable_for_type_inference(dtype=ins.dtype)
loss_weight = helper.create_variable_for_type_inference(dtype=np.float64)
mmap = helper.create_variable_for_type_inference(dtype=ins_tag.dtype)
helper.append_op(
type='filter_by_instag',
inputs={'Ins': ins,
'Ins_tag': ins_tag,
'Filter_tag': filter_tag},
outputs={'Out': out,
'LossWeight': loss_weight,
'IndexMap': mmap},
attrs={'is_lod': is_lod,
'out_val_if_empty': out_val_if_empty})
return [out, loss_weight]
def unstack(x, axis=0, num=None):
"""
:alias_main: paddle.unstack
:alias: paddle.unstack,paddle.tensor.unstack,paddle.tensor.manipulation.unstack
:old_api: paddle.fluid.layers.unstack
**UnStack Layer**
This layer unstacks input Tensor :code:`x` into several Tensors along :code:`axis`.
If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x)`.
If :code:`num` is None, it would be inferred from :code:`x.shape[axis]`,
and if :code:`x.shape[axis]` <= 0 or is unknown, :code:`ValueError` is
raised.
Args:
x (Tensor): Input Tensor. It is a N-D Tensors of data types float32, float64, int32, int64.
axis (int): The axis along which the input is unstacked.
num (int|None): The number of output variables.
Returns:
list(Tensor): The unstacked Tensors list. The list elements are N-D Tensors of data types float32, float64, int32, int64.
Raises:
ValueError: If x.shape[axis] <= 0 or axis is not in range [-D, D).
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3, 5], dtype='float32') # create a tensor with shape=[2, 3, 5]
y = fluid.layers.unstack(x, axis=1) # unstack with second axis, which results 3 tensors with shape=[2, 5]
"""
helper = LayerHelper('unstack', **locals())
if num is None:
if axis is None or x.shape[axis] <= 0:
raise ValueError('unknown unstack number')
else:
num = x.shape[axis]
outs = []
for _ in range(num):
outs.append(helper.create_variable_for_type_inference(x.dtype))
helper.append_op(
type='unstack',
inputs={'X': [x]},
outputs={'Y': outs},
attrs={'axis': axis,
'num': num})
return outs
@deprecated(since='2.0.0', update_to="paddle.expand")
def expand(x, expand_times, name=None):
"""
:alias_main: paddle.expand
:alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand
:old_api: paddle.fluid.layers.expand
This operation tiles ``x`` multiple times according to the parameter ``expand_times``.
The times number for each dimension of ``x`` is set by the parameter ``expand_times``.
The rank of ``x`` should be less than or equal to 6. Please note that size of ``expand_times`` must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
Attr(expand_times): [1, 2, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A ``Tensor`` or ``LoDTensor`` with dimension in [1, 6]. The data type is ``bool``, ``float32``, ``float64`` or ``int32`` .
expand_times (list|tuple|Variable): The data type is ``int32`` . If ``expand_times`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``expand_times`` is an Variable, it should be an 1-D Tensor.
Expand times number for each dimension of ``x`` .
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. After expanding, size of each dimension of output is equal to the size of the corresponding dimension of ``x`` multiplying the corresponding value given by ``expand_times`` .
Raises:
TypeError: The type of ``expand_times`` must be list, tuple or Variable.
ValueError: The elements of ``expand_times`` cannot be negative.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0)
expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2])
# the shape of expanded_1 is [2, 6, 2].
# example 2:
data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3)
expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4)
expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times)
# the shape of expanded_2 is [48, 56].
"""
if in_dygraph_mode():
if isinstance(expand_times, (list, tuple)):
expand_times = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in expand_times
]
return core.ops.expand(x, 'expand_times', expand_times)
inputs = {"X": [x]}
attrs = {}
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand')
check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True:
raise ValueError(
"expand op bool date type must set the stop_gradient to be False")
helper = LayerHelper('expand', input=x, **locals())
def get_attr_expand_times(list_expand_times):
attrs_expand_times = []
for idx, times in enumerate(list_expand_times):
if isinstance(times, Variable):
attrs_expand_times.append(-1)
else:
attrs_expand_times.append(times)
assert times > 0, (
"Each element given in expand_times must not be negative.")
return attrs_expand_times
if isinstance(expand_times, Variable):
expand_times.stop_gradient = True
inputs['ExpandTimes'] = expand_times
elif isinstance(expand_times, (list, tuple)):
attrs['expand_times'] = get_attr_expand_times(expand_times)
if utils._contain_var(expand_times):
inputs['expand_times_tensor'] = utils._convert_to_tensor_list(
expand_times)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
@deprecated(since='2.0.0', update_to="paddle.expand_as")
def expand_as(x, target_tensor, name=None):
"""
:alias_main: paddle.expand_as
:alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as
:old_api: paddle.fluid.layers.expand_as
expand_as operator tiles to the input by given expand tensor. You should set expand tensor
for each dimension by providing tensor 'target_tensor'. The rank of X
should be in [1, 6]. Please note that size of 'target_tensor' must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
target_tensor's shape: [2, 6, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A Tensor with dtype float64, float32, int32.
A tensor with rank in [1, 6].
target_tensor (Variable): A Tensor with dtype float64, float32, int32.
target_tensor for expanding to Input(X). Only use target_tensor'shape.
Returns:
Variable: A Tensor with dtype float64, float32, int32.
After expanding, size of each dimension of Output(Out) is equal to the size
of the corresponding dimension of target_tensor multiplying the corresponding
value given by target_tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64')
target_tensor = fluid.layers.data(
name="target_tensor", shape=[-1,20], dtype='float64')
result = fluid.layers.expand_as(x=data, target_tensor=target_tensor)
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,10)
y = np.random.rand(3,20)
output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name])
print(output[0].shape)
#(3,20)
"""
if in_dygraph_mode():
return core.ops.expand_as(x, target_tensor)
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as')
check_variable_and_dtype(target_tensor, 'target_tensor',
['float32', 'float64', 'int32', 'int64', 'bool'],
'expand_as')
helper = LayerHelper('expand_as', input=x, **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
inputs = {'X': x, 'target_tensor': target_tensor}
helper.append_op(type='expand_as', inputs=inputs, outputs={'Out': out})
return out
from paddle.fluid.framework import convert_np_dtype_to_dtype_
@deprecated(since='1.8.0', update_to="paddle.uniform")
@templatedoc()
def uniform_random_batch_size_like(input,
shape,
dtype='float32',
input_dim_idx=0,
output_dim_idx=0,
min=-1.0,
max=1.0,
seed=0):
"""
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension.
.. code-block:: text
*Case 1:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 0,
input_dim_idx = 0,
result.shape[0] = input.shape[0],
then:
result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4]
*Case 2:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
input_dim_idx=1
output_dim_idx=1
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 1,
input_dim_idx = 1,
result.shape[1] = input.shape[1],
then:
result=[[-0.23133647, -0.84195036, 0.21441269],
[-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3]
Args:
input (Variable): A Tensor. Supported data types: float32, float64.
shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int.
input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default 0.
output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32.
Returns:
Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
input = fluid.data(name="input", shape=[1, 3], dtype='float32')
out_1 = fluid.layers.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4]
# example 2:
out_2 = fluid.layers.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3]
"""
check_variable_and_dtype(input, 'Input', ("float32", 'float64'),
'uniform_random_batch_size_like')
check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like')
check_dtype(dtype, 'dtype', ('float32', 'float64'),
'uniform_random_batch_size_like')
helper = LayerHelper('uniform_random_batch_size_like', **locals())
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='uniform_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'min': min,
'max': max,
'seed': seed,
'dtype': c_dtype
})
return out
@deprecated(since="2.0.0", update_to="paddle.normal")
@templatedoc()
def gaussian_random(shape,
mean=0.0,
std=1.0,
seed=0,
dtype='float32',
name=None):
"""
This OP returns a Tensor filled with random values sampled from a Gaussian
distribution, with ``shape`` and ``dtype``.
Args:
shape(list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
mean(float|int, optional): Mean of the output tensor, default is 0.0.
std(float|int, optional): Standard deviation of the output tensor, default
is 1.0.
seed(int, optional): ${seed_comment}
dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of
the output Tensor. Supported data types: float32, float64.
Default is float32.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a Gaussian
distribution, with ``shape`` and ``dtype``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
# attr shape is a list which doesn't contain Tensor.
result_1 = fluid.layers.gaussian_random(shape=[3, 4])
# [[-0.31261674, 1.8736548, -0.6274357, 0.96988016],
# [-0.12294637, 0.9554768, 1.5690808, -1.2894802 ],
# [-0.60082096, -0.61138713, 1.5345167, -0.21834975]]
# example 2:
# attr shape is a list which contains Tensor.
dim_1 = fluid.layers.fill_constant([1], "int64", 2)
dim_2 = fluid.layers.fill_constant([1], "int32", 3)
result_2 = fluid.layers.gaussian_random(shape=[dim_1, dim_2])
# [[ 0.51398206, -0.3389769, 0.23597084],
# [ 1.0388143, -1.2015356, -1.0499583 ]]
# example 3:
# attr shape is a Tensor, the data type must be int64 or int32.
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = fluid.layers.gaussian_random(var_shape)
# if var_shape's value is [2, 3]
# result_3 is:
# [[-0.12310527, 0.8187662, 1.923219 ]
# [ 0.70721835, 0.5210541, -0.03214082]]
.. code-block:: python
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.layers.gaussian_random((2, 3), std=2., seed=10)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
exe.run(start)
x_np, = exe.run(main, feed={}, fetch_list=[x])
x_np
# array([[2.3060477, 2.676496 , 3.9911983],
# [0.9990833, 2.8675377, 2.2279181]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = fluid.layers.gaussian_random((2, 4), mean=2., dtype="float32", seed=10)
x_np = x.numpy()
x_np
# array([[2.3060477 , 2.676496 , 3.9911983 , 0.9990833 ],
# [2.8675377 , 2.2279181 , 0.79029655, 2.8447366 ]], dtype=float32)
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return core.ops.gaussian_random('shape', shape, 'mean',
float(mean), 'std',
float(std), 'seed', seed, 'dtype',
dtype)
check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random/randn')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random/randn')
inputs = {}
attrs = {
'mean': mean,
'std': std,
'seed': seed,
'dtype': dtype,
'use_mkldnn': False
}
utils.get_shape_tensor_inputs(
inputs=inputs,
attrs=attrs,
shape=shape,
op_type='gaussian_random/randn')
helper = LayerHelper('gaussian_random', **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='gaussian_random',
inputs=inputs,
outputs={'Out': out},
attrs=attrs)
return out
@templatedoc()
def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
"""
This op is used for sampling id from multinomial distribution from the input, sampling one id for one sample.
Parameters:
x (Variable): 2-D tensor, [batch_size, input_feature_dimensions]
min (Float): minimum , default 0.0.
max (Float): maximum, default 1.0.
seed (Float): Random seed, default 0. if seed is not 0, will generate same number every time.
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc
Returns:
Variable: sampling tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name="X",
shape=[13, 11],
dtype='float32')
out = fluid.layers.sampling_id(x)
"""
helper = LayerHelper('sampling_id', **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='sampling_id',
inputs={'X': x},
outputs={'Out': out},
attrs={'min': min,
'max': max,
'seed': seed})
return out
@deprecated(since='1.8.0', update_to="paddle.normal")
@templatedoc()
def gaussian_random_batch_size_like(input,
shape,
input_dim_idx=0,
output_dim_idx=0,
mean=0.0,
std=1.0,
seed=0,
dtype='float32'):
"""
${comment}
Args:
input (Variable): ${input_comment}
shape (tuple|list): ${shape_comment}
input_dim_idx (int): ${input_dim_idx_comment}
output_dim_idx (int): ${output_dim_idx_comment}
mean (float): ${mean_comment}
std (float): ${std_comment}
seed (int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data, float32 or float_64.
Returns:
out (Variable): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[13, 11], dtype='float32')
out = fluid.layers.gaussian_random_batch_size_like(
input, shape=[-1, 11], mean=1.0, std=2.0)
"""
helper = LayerHelper('gaussian_random_batch_size_like', **locals())
check_type(input, 'input', (Variable),
'fluid.layers.gaussian_random_batch_size_like')
check_type(shape, 'shape', (list, tuple),
'fluid.layers.gaussian_random_batch_size_like')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'int'],
'fluid.layers.gaussian_random_batch_size_like')
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='gaussian_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'mean': mean,
'std': std,
'seed': seed,
'dtype': c_dtype
})
return out
@templatedoc()
def sum(x):
"""
${comment}
Case 1:
::
Input:
Input. Shape = [2, 3]
Input = [[1, 2, 3],
[4, 5, 6]]
Output:
The output. Shape = [2, 3]
Output = [[1, 2, 3],
[4, 5, 6]]
Case 2:
::
Input:
First input:
Input1. Shape = [2, 3]
Input1 = [[1, 2, 3],
[4, 5, 6]]
The second input:
Input2. Shape = [2, 3]
Input2 = [[7, 8, 9],
[10, 11, 12]]
Output:
The output. Shape = [2, 3]
Output = [[8, 10, 12],
[14, 16, 18]]
Args:
x (Variable|list(Variable)): ${x_comment}
Returns:
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5)
input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3)
sum = fluid.layers.sum([input0, input1])
# You can print out 'sum' via executor.
out = fluid.layers.Print(sum, message="the sum of input0 and input1: ")
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_main_program())
# The printed result is:
# 1570701754 the sum of input0 and input1: The place is:CPUPlace
# Tensor[sum_0.tmp_0]
# shape: [2,3,]
# dtype: l
# data: 8,8,8,8,8,8,
# the sum of input0 and input1 is 2-D Tensor with shape [2,3].
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
return paddle.elementwise_sum(x)
@templatedoc()
def slice(input, axes, starts, ends):
"""
:alias_main: paddle.slice
:alias: paddle.slice,paddle.tensor.slice,paddle.tensor.manipulation.slice
:old_api: paddle.fluid.layers.slice
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` (here 0 is the initial position).
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``.
Following examples will explain how slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000] # -1 denotes the reverse 0th position of dimension 0.
Then:
result = [ [2, 3, 4], ] # result = data[0:1, 1:4]
Args:
input (Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to .
starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Variable.
TypeError: The type of ``ends`` must be list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name="input", shape=[4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor Variable.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
sliced_1 = fluid.layers.slice(input, axes=axes, starts=starts, ends=ends)
# sliced_1 is input[0:3, 0:2, 2:4].
# example 2:
# attr starts is a list which contain tensor Variable.
minus_3 = fluid.layers.fill_constant([1], "int32", -3)
sliced_2 = fluid.layers.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends)
# sliced_2 is input[0:3, 0:2, 2:4].
"""
if in_dygraph_mode():
infer_flags = list(1 for i in range(len(axes)))
if isinstance(starts, (list, tuple)) and isinstance(ends,
(list, tuple)):
starts = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in starts
]
ends = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in ends
]
return core.ops.slice(input, 'axes', axes, 'starts', starts, 'ends',
ends, 'infer_flags', infer_flags)
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
"Input starts must be an Variable, python list or tuple.")
if not isinstance(ends, (list, tuple, Variable)):
raise ValueError(
"Input ends must be an Variable, python list or tuple.")
helper = LayerHelper('slice', **locals())
inputs = {'Input': input}
attrs = {'axes': axes}
infer_flags = list(1 for i in range(len(axes)))
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if utils._contain_var(starts):
inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if utils._contain_var(ends):
inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# infer_flags
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op(
type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
@templatedoc()
def strided_slice(input, axes, starts, ends, strides):
"""
:alias_main: paddle.strided_slice
:alias: paddle.strided_slice,paddle.tensor.strided_slice,paddle.tensor.manipulation.strided_slice
:old_api: paddle.fluid.layers.strided_slice
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of
slicing and if the ``strides`` is negative, slice operation is in the opposite direction.
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.
Following examples will explain how strided_slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
strides = [1, 1]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [2, 0]
strides = [1, -1]
Then:
result = [ [8, 7, 6], ]
Case3:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000]
strides = [1, 3]
Then:
result = [ [2], ]
Args:
input (Variable): An N-D ``Tensor`` or ``LoDTensor`` . The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
strides (list|tuple|Variable): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``strides`` is an Variable, it should be an 1-D Tensor .
It represents slice step of corresponding axis in ``axes``.
Returns:
Variable: A ``Tensor`` or ``LoDTensor`` with the same dimension as ``input``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Variable.
TypeError: The type of ``ends`` must be list, tuple or Variable.
TypeError: The type of ``strides`` must be list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name="input", shape=[3, 4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor Variable.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
strides_1 = [1, 1, 1]
strides_2 = [1, 1, 2]
sliced_1 = fluid.layers.strided_slice(input, axes=axes, starts=starts, ends=ends, strides=strides_1)
# sliced_1 is input[:, 0:3:1, 0:2:1, 2:4:1].
# example 2:
# attr starts is a list which contain tensor Variable.
minus_3 = fluid.layers.fill_constant([1], "int32", -3)
sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
# sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2].
"""
helper = LayerHelper('strided_slice', **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64'],
'strided_slice')
check_type(axes, 'axes', (list, tuple), 'strided_slice')
check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice')
check_type(ends, 'ends', (list, tuple, Variable), 'strided_slice')
check_type(strides, 'strides', (list, tuple, Variable), 'strided_slice')
def check_list_elements_dtype(list_input, input_name):
if isinstance(list_input, Variable):
check_dtype(list_input.dtype, input_name, ['int32'],
'strided_slice')
else:
for i, var in enumerate(list_input):
var_name = input_name + '[' + str(i) + ']'
if isinstance(var, Variable):
check_dtype(var.dtype, var_name, ['int32'], 'strided_slice')
check_list_elements_dtype(axes, 'axes')
check_list_elements_dtype(starts, 'starts')
check_list_elements_dtype(ends, 'ends')
check_list_elements_dtype(strides, 'strides')
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': input}
attrs = {'axes': axes}
infer_flags = list(1 for i in range(len(axes)))
if in_dygraph_mode():
inputs = {'Input': input}
attrs = {
'axes': axes,
'starts': starts,
'ends': ends,
'strides': strides,
'infer_flags': infer_flags
}
else:
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if utils._contain_var(starts):
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if utils._contain_var(ends):
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# strides
if isinstance(strides, Variable):
strides.stop_gradient = True
inputs['StridesTensor'] = strides
elif isinstance(strides, (list, tuple)):
attrs['strides'] = []
if utils._contain_var(strides):
inputs['StridesTensorList'] = get_new_list_tensor(strides)
for i, dim in enumerate(strides):
if isinstance(dim, Variable):
attrs['strides'].append(-1)
infer_flags[i] = -1
else:
attrs['strides'].append(dim)
else:
attrs['strides'] = strides
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op(
type='strided_slice', inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
def shape(input):
"""
:alias_main: paddle.shape
:alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape
:old_api: paddle.fluid.layers.shape
**Shape Layer**
Get the shape of the input.
.. code-block:: text
Case1:
Given N-D Tensor:
input = [ [1, 2, 3, 4], [5, 6, 7, 8] ]
Then:
input.shape = [2, 4]
Case2:
Given SelectedRows:
input.rows = [0, 4, 19]
input.height = 20
input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor
Then:
input.shape = [3, 2]
Args:
input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, float16, float32, float64, int32, int64.
If input variable is type of SelectedRows, returns the shape of it's inner tensor.
Returns:
Variable (Tensor): The shape of the input variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32")
output = fluid.layers.shape(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.ones((3, 100, 100)).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([ 3, 100, 100], dtype=int32)]
"""
check_variable_and_dtype(
input, 'input',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'shape')
helper = LayerHelper('shape', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='shape', inputs={'Input': input}, outputs={'Out': out})
return out
def rank(input):
"""
:alias_main: paddle.rank
:alias: paddle.rank,paddle.tensor.rank,paddle.tensor.attribute.rank
:old_api: paddle.fluid.layers.rank
The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.
Args:
input (Variable): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.
Returns:
Variable, the output data type is int32.: The 0-D tensor with the dimensions of the input variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[3, 100, 100], dtype="float32")
rank = fluid.layers.rank(input) # rank=(3,)
"""
check_type(input, 'input', (Variable), 'input')
ndims = len(input.shape)
out = assign(np.array(ndims, 'int32'))
return out
@deprecated(since="2.0.0", update_to="paddle.numel")
def size(input):
"""
**Size Layer**
Returns the number of elements for a tensor, which is a int64 Tensor with shape [1].
Args:
input (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.
Returns:
Tensor: The number of elements for the input Tensor.
Raises:
TypeError: ``input`` must be a Tensor and the data type of ``input`` must be one of bool, float16, float32, float64, int32, int64.
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
input = layers.data(
name="input", shape=[3, 100], dtype="float32", append_batch_size=False)
rank = layers.size(input) # 300
"""
if in_dygraph_mode():
return core.ops.size(x)
check_variable_and_dtype(
x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
"size")
helper = LayerHelper('size', **locals())
out = helper.create_variable_for_type_inference(dtype='int64')
helper.append_op(type='size', inputs={'Input': input}, outputs={'Out': out})
return out
def _elementwise_op(helper):
op_type = helper.layer_type
x = helper.kwargs.get('x', None)
y = helper.kwargs.get('y', None)
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type=op_type,
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'axis': axis,
'use_mkldnn': use_mkldnn})
return helper.append_activation(out)
def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
:alias_main: paddle.scale
:alias: paddle.scale,paddle.tensor.scale,paddle.tensor.math.scale
:old_api: paddle.fluid.layers.scale
Scale operator.
Putting scale and bias to the input Tensor as following:
``bias_after_scale`` is True:
.. math::
Out=scale*X+bias
``bias_after_scale`` is False:
.. math::
Out=scale*(X+bias)
Args:
x(Variable): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8.
scale(float|Variable): The scale factor of the input, it should be a float number or a Variable with shape [1] and data type as float32.
bias(float): The bias to be put on the input.
bias_after_scale(bool): Apply bias addition after or before scaling. It is useful for numeric stability in some circumstances.
act(str, optional): Activation applied to the output such as tanh, softmax, sigmoid, relu.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable(Tensor|LoDTensor): Output tensor of scale operator, with shape and data type same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32')
output = fluid.layers.scale(inputs, scale = 2.0, bias = 1.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)]
.. code-block:: python
# scale with parameter scale as Variable
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32')
scale = fluid.layers.data(name="scale", shape=[1], dtype='float32',
append_batch_size=False)
output = fluid.layers.scale(inputs, scale = scale, bias = 1.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
scale_np = np.array([2.]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img, 'scale':scale_np}, fetch_list=[output])
print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)]
"""
if in_dygraph_mode():
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
out = core.ops.scale(x, 'scale',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
return dygraph_utils._append_activation_in_dygraph(out)
check_variable_and_dtype(x, "x", [
'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64',
'uint8'
], "scale")
inputs = {'X': [x]}
attrs = {
'bias': float(bias),
'bias_after_scale': bias_after_scale,
}
if isinstance(scale, Variable):
inputs['ScaleTensor'] = [scale]
else:
attrs['scale'] = float(scale)
helper = LayerHelper('scale', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return helper.append_activation(out)
def elementwise_add(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_add
:alias: paddle.elementwise_add,paddle.tensor.elementwise_add,paddle.tensor.math.elementwise_add
:old_api: paddle.fluid.layers.elementwise_add
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_add(x, y)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [3., 8., 6.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_add(x, y, axis=1)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_add(x, y, axis=3)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x,
y,
axis=axis,
act=act,
op_name='elementwise_add',
use_mkldnn=core.globals()["FLAGS_use_mkldnn"])
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
@deprecated(since="2.0.0", update_to="paddle.divide")
def elementwise_div(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_div
:alias: paddle.elementwise_div,paddle.tensor.elementwise_div,paddle.tensor.math.elementwise_div
:old_api: paddle.fluid.layers.elementwise_div
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_div(x, y)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [2., 0.6, 2.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=1)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=3)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_div')
return _elementwise_op(LayerHelper('elementwise_div', **locals()))
def elementwise_sub(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_sub
:alias: paddle.elementwise_sub,paddle.tensor.elementwise_sub,paddle.tensor.math.elementwise_sub
:old_api: paddle.fluid.layers.elementwise_sub
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_sub(x, y)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [1., -2., 2.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=1)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=3)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_sub')
return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
@deprecated(since="2.0.0", update_to="paddle.multiply")
def elementwise_mul(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_mul
:alias: paddle.elementwise_mul,paddle.tensor.elementwise_mul,paddle.tensor.math.elementwise_mul
:old_api: paddle.fluid.layers.elementwise_mul
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_mul(x, y)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [2., 15., 8.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_mul(x, y, axis=1)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_mul(x, y, axis=3)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mul')
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
def elementwise_max(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_max
:alias: paddle.elementwise_max,paddle.tensor.elementwise_max,paddle.tensor.math.elementwise_max
:old_api: paddle.fluid.layers.elementwise_max
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_max(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2, 5, 4]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_max(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_max')
return _elementwise_op(LayerHelper('elementwise_max', **locals()))
def elementwise_min(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_min
:alias: paddle.elementwise_min,paddle.tensor.elementwise_min,paddle.tensor.math.elementwise_min
:old_api: paddle.fluid.layers.elementwise_min
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_min(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1, 3, 2]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_min(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_min')
return _elementwise_op(LayerHelper('elementwise_min', **locals()))
def elementwise_pow(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_pow
:alias: paddle.elementwise_pow,paddle.tensor.elementwise_pow,paddle.tensor.math.elementwise_pow
:old_api: paddle.fluid.layers.elementwise_pow
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_pow(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2, 243, 16]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_pow')
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
@deprecated(since="2.0.0", update_to="paddle.remainder")
def elementwise_mod(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_mod
:alias: paddle.elementwise_mod,paddle.tensor.elementwise_mod,paddle.tensor.math.elementwise_mod
:old_api: paddle.fluid.layers.elementwise_mod
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([10, 15, 8]).astype('int32'),
"y": np.array([3, 6, 5]).astype('int32')
}
x = fluid.data(name="x", shape=[3], dtype='int32')
y = fluid.data(name="y", shape=[3], dtype='int32')
z = fluid.layers.elementwise_mod(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1, 3, 3]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mod')
return _elementwise_op(LayerHelper('elementwise_mod', **locals()))
@deprecated(since="2.0.0", update_to="paddle.floor_divide")
def elementwise_floordiv(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_floordiv
:alias: paddle.elementwise_floordiv,paddle.tensor.elementwise_floordiv,paddle.tensor.math.elementwise_floordiv
:old_api: paddle.fluid.layers.elementwise_floordiv
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([10, 15, 8]).astype('int32'),
"y": np.array([3, 7, 5]).astype('int32')
}
x = fluid.data(name="x", shape=[3], dtype='int32')
y = fluid.data(name="y", shape=[3], dtype='int32')
z = fluid.layers.elementwise_floordiv(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[3, 2, 1]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_floordiv')
return _elementwise_op(LayerHelper('elementwise_floordiv', **locals()))
for func in [
elementwise_add,
elementwise_div,
elementwise_sub,
elementwise_mul,
elementwise_max,
elementwise_pow,
elementwise_min,
elementwise_mod,
elementwise_floordiv,
]:
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
# insert the c++ doc string on top of python doc string
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"axis (int32, optional): If X.dimension != Y.dimension, \
Y.dimension must be a subsequence of x.dimension. \
And axis is the start dimension index for broadcasting Y onto X. ",
"act (string, optional): Activation applied to the output. \
Default is None. Details: :ref:`api_guide_activations_en` ",
"name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` "
],
skip_attrs_set={
"x_data_format", "y_data_format", "axis", "use_quantizer",
"mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out"
}) + """\n""" + str(func.__doc__)
doc_list = func.__doc__.splitlines()
for idx, val in enumerate(doc_list):
if val.startswith("Warning: ") and val.endswith(
" instead."
) and "and will be removed in future versions." in val:
doc_list.insert(0, doc_list.pop(idx))
func.__doc__ = "\n" + "\n".join(i for i in doc_list)
break
for func in []:
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"act (basestring|None): Activation applied to the output.",
"name (basestring|None): Name of the output."
])
func.__doc__ = func.__doc__ + """
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1: shape(x) = (2, 3, 4, 5), shape(y) = (2, 3, 4, 5)
x0 = fluid.layers.data(name="x0", shape=[2, 3, 4, 5], dtype='float32')
y0 = fluid.layers.data(name="y0", shape=[2, 3, 4, 5], dtype='float32')
z0 = fluid.layers.%s(x0, y0)
# example 2: shape(X) = (2, 3, 4, 5), shape(Y) = (5)
x1 = fluid.layers.data(name="x1", shape=[2, 3, 4, 5], dtype='float32')
y1 = fluid.layers.data(name="y1", shape=[5], dtype='float32')
z1 = fluid.layers.%s(x1, y1)
# example 3: shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
x2 = fluid.layers.data(name="x2", shape=[2, 3, 4, 5], dtype='float32')
y2 = fluid.layers.data(name="y2", shape=[4, 5], dtype='float32')
z2 = fluid.layers.%s(x2, y2, axis=2)
# example 4: shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
x3 = fluid.layers.data(name="x3", shape=[2, 3, 4, 5], dtype='float32')
y3 = fluid.layers.data(name="y3", shape=[3, 4], dtype='float32')
z3 = fluid.layers.%s(x3, y3, axis=1)
# example 5: shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
x4 = fluid.layers.data(name="x4", shape=[2, 3, 4, 5], dtype='float32')
y4 = fluid.layers.data(name="y4", shape=[2], dtype='float32')
z4 = fluid.layers.%s(x4, y4, axis=0)
# example 6: shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
x5 = fluid.layers.data(name="x5", shape=[2, 3, 4, 5], dtype='float32')
y5 = fluid.layers.data(name="y5", shape=[2], dtype='float32')
z5 = fluid.layers.%s(x5, y5, axis=0)
""" % (func.__name__, func.__name__, func.__name__, func.__name__,
func.__name__, func.__name__)
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if in_dygraph_mode():
op = getattr(core.ops, op_name)
if binary_op:
return op(x, y)
else:
return op(x)
check_variable_and_dtype(x, "x", ["bool"], op_name)
if y is not None:
check_variable_and_dtype(y, "y", ["bool"], op_name)
if out is not None:
check_type(out, "out", Variable, op_name)
helper = LayerHelper(op_name, **locals())
if binary_op:
assert x.dtype == y.dtype
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if binary_op:
helper.append_op(
type=op_name, inputs={"X": x,
"Y": y}, outputs={"Out": out})
else:
helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out})
return out
def logical_and(x, y, out=None, name=None):
"""
``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by
.. math::
out = x \&\& y
.. note::
``paddle.logical_and`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args:
x (Tensor): the input tensor, it's data type should be bool.
y (Tensor): the input tensor, it's data type should be bool.
out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([True])
y = paddle.to_tensor([True, False, True, False])
res = paddle.logical_and(x, y)
print(res.numpy()) # [True False True False]
"""
return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)
def logical_or(x, y, out=None, name=None):
"""
``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by
.. math::
out = x || y
.. note::
``paddle.logical_or`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args:
x (Tensor): the input tensor, it's data type should be bool.
y (Tensor): the input tensor, it's data type should be bool.
out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([True, False], dtype=np.bool).reshape(2, 1)
y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
res = paddle.logical_or(x, y)
print(res.numpy()) # [[ True True] [ True False]]
"""
return _logical_op(
op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True)
def logical_xor(x, y, out=None, name=None):
"""
``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by
.. math::
out = (x || y) \&\& !(x \&\& y)
.. note::
``paddle.logical_xor`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args:
x (Tensor): the input tensor, it's data type should be bool.
y (Tensor): the input tensor, it's data type should be bool.
out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([True, False], dtype=np.bool).reshape([2, 1])
y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2])
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
res = paddle.logical_xor(x, y)
print(res.numpy()) # [[False, True], [ True, False]]
"""
return _logical_op(
op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_not(x, out=None, name=None):
"""
:alias_main: paddle.logical_not
:alias: paddle.logical_not, paddle.tensor.logical_not, paddle.tensor.logic.logical_not
:old_api: paddle.fluid.layers.logical_not
``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``x`` and ``out`` are N-dim boolean ``Variable``.
Each element of ``out`` is calculated by
.. math::
out = !x
Args:
x(${x_type}): ${x_comment}.
out(Variable): The ``Variable`` that specifies the output of the operator, which can be any ``Variable`` that has been created in the program. The default value is None, and a new ``Variable` will be created to save the output.
name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([True, False, True, False])
res = paddle.logical_not(x)
print(res.numpy()) # [False True False True]
"""
return _logical_op(
op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False)
@templatedoc()
def clip(x, min, max, name=None):
"""
:old_api: paddle.fluid.layers.clip
${comment}
Args:
x(${x_type}): ${x_comment}
min(float): ${min_comment}
max(float): ${max_comment}
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_comment}
Return Type:
${out_type}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data', shape=[1], dtype='float32')
reward = fluid.layers.clip(x=input, min=-1.0, max=1.0)
"""
helper = LayerHelper("clip", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'clip')
if name is None:
name = unique_name.generate_with_ignorable_key(".".join(
[helper.name, 'tmp']))
out = helper.create_variable(
type=x.type, name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="clip",
inputs={"X": x},
attrs={"min": min,
"max": max},
outputs={"Out": out})
return out
@templatedoc()
def clip_by_norm(x, max_norm, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
max_norm(${max_norm_type}): ${max_norm_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data', shape=[None, 1], dtype='float32')
reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0)
"""
helper = LayerHelper("clip_by_norm", **locals())
check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm')
check_type(max_norm, 'max_norm', (float), 'clip_by_norm')
if name is None:
name = unique_name.generate_with_ignorable_key(".".join(
[helper.name, 'tmp']))
out = helper.create_variable(
type=x.type, name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="clip_by_norm",
inputs={"X": x},
attrs={"max_norm": max_norm},
outputs={"Out": out})
return out
@deprecated(since="2.0.0", update_to="paddle.mean")
@templatedoc()
def mean(x, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(
name='data', shape=[2, 3], dtype='float32')
mean = fluid.layers.mean(input)
"""
if in_dygraph_mode():
return core.ops.mean(x)
helper = LayerHelper("mean", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out})
return out
@templatedoc()
def merge_selected_rows(x, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
b = fluid.default_main_program().global_block()
var = b.create_var(
name="X", dtype="float32", persistable=True,
type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
y = fluid.layers.merge_selected_rows(var)
"""
helper = LayerHelper("merge_selected_rows", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="merge_selected_rows",
inputs={"X": x},
attrs={},
outputs={"Out": out})
return out
def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
"""
Mul Operator.
This operator is used to perform matrix multiplication for input $x$ and $y$.
The equation is:
.. math::
Out = x * y
Both the input $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $x$.
Args:
x (Variable): The first input Tensor/LoDTensor of mul_op.
y (Variable): The second input Tensor/LoDTensor of mul_op.
x_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $x$ is a tensor with more than two dimensions, $x$ will be flattened into a two-dimensional matrix first. The flattening rule is: the first `num_col_dims` will be flattened to form the first dimension of the final matrix (the height of the matrix), and the rest `rank(x) - num_col_dims` dimensions are flattened to form the second dimension of the final matrix (the width of the matrix). As a result, height of the flattened matrix is equal to the product of $x$'s first `x_num_col_dims` dimensions' sizes, and width of the flattened matrix is equal to the product of $x$'s last `rank(x) - num_col_dims` dimensions' size. For example, suppose $x$ is a 6-dimensional tensor with the shape [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default is 1.
y_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $y$ is a tensor with more than two dimensions, $y$ will be flattened into a two-dimensional matrix first. The attribute `y_num_col_dims` determines how $y$ is flattened. See comments of `x_num_col_dims` for more details. Default is 1.
name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of mul op.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataX = fluid.layers.data(name="dataX", append_batch_size = False, shape=[2, 5], dtype="float32")
dataY = fluid.layers.data(name="dataY", append_batch_size = False, shape=[5, 3], dtype="float32")
output = fluid.layers.mul(dataX, dataY,
x_num_col_dims = 1,
y_num_col_dims = 1)
"""
if in_dygraph_mode():
return core.ops.mul(x, y, 'x_num_col_dims', x_num_col_dims,
'y_num_col_dims', y_num_col_dims)
inputs = {"X": [x], "Y": [y]}
attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims}
helper = LayerHelper("mul", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="mul", inputs={"X": x,
"Y": y}, attrs=attrs, outputs={"Out": out})
return out
@templatedoc()
def maxout(x, groups, name=None, axis=1):
"""
:alias_main: paddle.nn.functional.maxout
:alias: paddle.nn.functional.maxout,paddle.nn.functional.activation.maxout
:old_api: paddle.fluid.layers.maxout
${comment}
Args:
x(${x_type}): ${x_comment}
groups(int): ${groups_comment}
axis(int, optional): ${axis_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: ${out_comment}
Raises:
ValueError: If `axis` is not 1, -1 or 3.
ValueError: If the number of input channels can not be divisible by `groups`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data',
shape=[None, 256, 32, 32],
dtype='float32')
out = fluid.layers.maxout(input, groups=2)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'maxout')
helper = LayerHelper("maxout", **locals())
if axis not in [1, -1, 3]:
raise ValueError(
"Attr(axis) should be 1 when data format is NCHW, -1 or 3 when data format is NHWC. Received "
"Attr(axis): %s." % str(axis))
if axis == -1:
axis = 3
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="maxout",
inputs={"X": x},
attrs={"groups": groups,
"axis": axis},
outputs={"Out": out})
return out
def space_to_depth(x, blocksize, name=None):
"""
:alias_main: paddle.nn.functional.space_to_depth
:alias: paddle.nn.functional.space_to_depth,paddle.nn.functional.vision.space_to_depth
:old_api: paddle.fluid.layers.space_to_depth
Gives a blocksize to space_to_depth the input LoDtensor with Layout: [batch, channel, height, width]
This op rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of \
theinput LoDtensor where values from the height and width dimensions are moved to the channel \
dimension.
The attr blocksize indicates the input block size.
space_to_depth will reorganize the elements of input with shape[batch, channel, height, width] \
according to blocksize to construct output with shape \
[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]:
- Non-overlapping blocks of size block_size x block size are rearranged into depth at each location.
- The Y, X coordinates within each block of the input become the high order component of the output channel index
- channel should be divisible by square of blocksize
- height, width should be divsible by blocksize
This OP is useful for resizing the activations between convolutions \
(but keeping all data)
.. code-block:: text
Given the input x with the shape [1, 1, 4, 4]:
x.data = [[[[1, 2, 5, 6],
[3, 4, 7, 8],
[9, 10, 13, 14],
[11, 12, 15, 16]]]]
blocksize = 2
then get the output with the shape [1, 4, 2, 2]:
out.data = [[[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]],
[[13, 14], [15, 16]]]]
Args:
x (Variable): The input, which should be 4 dims Tensor or LodTensor, with the shape \
[batch, channel, height, width]
blocksize (int): The blocksize to select the element on each feature map should be > 2
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The output, which should be 4 dims Tensor or LodTensor, with the shape \
[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]
Return Type: Variable
Raises:
TypeError: blocksize type must be int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(
name='data', shape=[1, 4, 2, 2], dtype='float32')
space_to_depthed = fluid.layers.space_to_depth(
x=data, blocksize=2)
exe = fluid.Executor(fluid.CPUPlace())
data_np = np.arange(0,16).reshape((1,4,2,2)).astype('float32')
print(data_np)
#array([[[[ 0., 1.], [ 2., 3.]],
# [[ 4., 5.], [ 6., 7.]],
# [[ 8., 9.], [10., 11.]],
# [[12., 13.], [14., 15.]]]], dtype=float32)
out_main = exe.run(fluid.default_main_program(),
feed={'data': data_np},
fetch_list=[space_to_depthed])
print(out_main)
#[array([[[[ 0.]], [[ 4.]], [[ 1.]], [[ 5.]],
# [[ 8.]], [[12.]], [[ 9.]], [[13.]],
# [[ 2.]], [[ 6.]], [[ 3.]], [[ 7.]],
# [[10.]], [[14.]], [[11.]], [[15.]]]], dtype=float32)]
"""
helper = LayerHelper("space_to_depth", **locals())
if not (isinstance(blocksize, int)):
raise ValueError("blocksize must be a python Int")
check_variable_and_dtype(x, 'x', \
['float16', 'float32', 'float64', 'int32', 'int64'], 'space_to_depth')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="space_to_depth",
inputs={"X": x},
attrs={"blocksize": blocksize},
outputs={"Out": out})
return out
def affine_channel(x,
scale=None,
bias=None,
data_layout='NCHW',
name=None,
act=None):
"""
:alias_main: paddle.nn.functional.affine_channel
:alias: paddle.nn.functional.affine_channel,paddle.nn.functional.vision.affine_channel
:old_api: paddle.fluid.layers.affine_channel
Applies a separate affine transformation to each channel of the input.
Useful for replacing spatial batch norm with its equivalent fixed
transformation. The input also can be 2D tensor and applies a affine
transformation in second dimension.
Args:
x (Variable): Feature map input can be a 4D tensor with order NCHW
or NHWC. It also can be a 2D tensor and the affine transformation
is applied in the second dimension.The data type is float32 or float64.
scale (Variable): 1D input of shape (C), the c-th element is the scale
factor of the affine transformation for the c-th channel of
the input.The data type is float32 or float64.
bias (Variable): 1D input of shape (C), the c-th element is the bias
of the affine transformation for the c-th channel of the input.
The data type is float32 or float64.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. If input is 2D Tensor, you can ignore
data_layout.
name (str, default None): The name of this layer. For more information,
please refer to :ref:`api_guide_Name` .
act (str, default None): Activation to be applied to the output of this layer.
Returns:
Variable: A tensor which has the same shape, data layout and data type with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
data = fluid.data(name='data', shape=[None, 1, 2, 2], dtype='float32')
input_scale = fluid.layers.create_parameter(shape=[1], dtype="float32",
default_initializer=fluid.initializer.Constant(2.0))
input_bias = fluid.layers.create_parameter(shape=[1],dtype="float32",
default_initializer=fluid.initializer.Constant(0.5))
out = fluid.layers.affine_channel(data,scale=input_scale,
bias=input_bias)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_array] = exe.run(test_program,
fetch_list=out,
feed={'data': np.ones([1,1,2,2]).astype('float32')})
# out_array is [[[[2.5, 2.5],
# [2.5, 2.5]]]] with shape: [1, 1, 2, 2]
"""
helper = LayerHelper("affine_channel", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'affine_channel')
check_type(scale, 'scale', (Variable, type(None)), 'affine_channel')
check_type(bias, 'bias', (Variable, type(None)), 'affine_channel')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="affine_channel",
inputs={"X": x,
'Scale': scale,
'Bias': bias},
attrs={"data_layout": data_layout},
outputs={"Out": out})
return helper.append_activation(out)
def similarity_focus(input, axis, indexes, name=None):
"""
SimilarityFocus Operator
Generate a similarity focus mask with the same shape of input using the following method:
1. Extract the 3-D tensor(here the first dimension is BatchSize) corresponding
to the axis according to the indexes. For example, if axis=1 and indexes=[a],
it will get the matrix T=X[:, a, :, :]. In this case, if the shape of input X
is (BatchSize, A, B, C), the shape of tensor T is (BatchSize, B, C).
2. For each index, find the largest numbers in the tensor T, so that the same
row and same column has at most one number(what it means is that if the
largest number has been found in the i-th row and the j-th column, then
the numbers in the i-th row or j-th column will be skipped. And then the
next largest number will be selected from the remaining numbers. Obviously
there will be min(B, C) numbers), and mark the corresponding position of the
3-D similarity focus mask as 1, otherwise as 0. Do elementwise-or for
each index.
3. Broadcast the 3-D similarity focus mask to the same shape of input X.
Refer to `Similarity Focus Layer <http://www.aclweb.org/anthology/N16-1108>`_
.. code-block:: text
* Example :
Given a 4-D tensor x with the shape (BatchSize, C, A, B), where C is
the number of channels and the shape of feature map is (A, B):
x.shape = (2, 3, 2, 2)
x.data = [[[[0.8, 0.1],
[0.4, 0.5]],
[[0.9, 0.7],
[0.9, 0.9]],
[[0.8, 0.9],
[0.1, 0.2]]],
[[[0.2, 0.5],
[0.3, 0.4]],
[[0.9, 0.7],
[0.8, 0.4]],
[[0.0, 0.2],
[0.4, 0.7]]]]
Given axis: 1 (the axis of the channel)
Given indexes: [0]
then we get a 4-D tensor out with the same shape of input x:
out.shape = (2, 3, 2, 2)
out.data = [[[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]]],
[[[0.0, 1.0],
[1.0, 0.0]],
[[0.0, 1.0],
[1.0, 0.0]],
[[0.0, 1.0],
[1.0, 0.0]]]]
Args:
input(Variable): The input tensor variable(default float). It should
be a 4-D tensor with shape [BatchSize, A, B, C]. Data type is
float32 or float64.
axis(int): Indicating the dimension to be selected. It can only be
1, 2 or 3.
indexes(list): Indicating the indexes of the selected dimension.
Returns:
Variable: A tensor variable with the same shape and same type \
as the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(
name='data', shape=[-1, 3, 2, 2], dtype='float32')
fluid.layers.similarity_focus(input=data, axis=1, indexes=[0])
"""
helper = LayerHelper('similarity_focus', **locals())
# check attrs
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
"similarity_focus")
check_type(axis, 'axis', int, "similarity_focus")
check_type(indexes, 'indexes', list, "similarity_focus")
if axis != 1 and axis != 2 and axis != 3:
raise ValueError("axis must be 1, 2 or 3.")
if len(indexes) == 0:
raise ValueError("indexes can not be empty.")
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='similarity_focus',
inputs={'X': input},
outputs={'Out': out},
attrs={"axis": axis,
"indexes": indexes})
return out
def hash(input, hash_size, num_hash=1, name=None):
"""
:alias_main: paddle.nn.functional.hash
:alias: paddle.nn.functional.hash,paddle.nn.functional.lod.hash
:old_api: paddle.fluid.layers.hash
This OP hash the input to an integer less than the hash_size.
The hash algorithm we used was xxHash - Extremely fast hash algorithm
(https://github.com/Cyan4973/xxHash/tree/v0.6.5)
Args:
input(Variable): A **Two-Dimensional** LoDTensor with type int32, int64.
**Only support LoDTensor**.
num_hash(int, optional): The times of hash, default is 1.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: A LoDTensor with the same data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
place = fluid.core.CPUPlace()
x = fluid.data(name="x", shape=[2,2], dtype="int32", lod_level=1)
res = fluid.layers.hash(name="res", input=x, hash_size=1000, num_hash=4)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
in1 = np.array([[1,2],[3,4]]).astype("int32")
print(in1)
x_i = fluid.create_lod_tensor(in1, [[0, 2]], place)
res = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res], return_numpy=False)
print(np.array(res[0]))
# [[[722]
# [407]
# [337]
# [395]]
# [[603]
# [590]
# [386]
# [901]]]
"""
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'hash')
check_type(hash_size, 'hash_size', int, 'hash')
check_type(num_hash, 'num_hash', int, 'hash')
helper = LayerHelper('hash', **locals())
out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True)
helper.append_op(
type='hash',
inputs={'X': input},
outputs={'Out': out},
attrs={'num_hash': num_hash,
'mod_by': hash_size})
return out
@templatedoc()
def grid_sampler(x, grid, name=None):
"""
:alias_main: paddle.nn.functional.grid_sampler
:alias: paddle.nn.functional.grid_sampler,paddle.nn.functional.vision.grid_sampler
:old_api: paddle.fluid.layers.grid_sampler
This operation samples input X by using bilinear interpolation based on
flow field grid, which is usually generated by :code:`affine_grid` . The grid of
shape [N, H, W, 2] is the concatenation of (x, y) coordinates
with shape [N, H, W] each, where x is indexing the 4th dimension
(in width dimension) of input data x and y is indexing the 3rd
dimension (in height dimension), finally results is the bilinear
interpolation value of 4 nearest corner points. The output tensor
shape will be [N, C, H, W].
.. code-block:: text
Step 1:
Get (x, y) grid coordinates and scale to [0, H-1/W-1].
.. code-block:: text
grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1)
grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1)
Step 2:
Indices input data X with grid (x, y) in each [H, W] area, and bilinear
interpolate point value by 4 nearest points.
wn ------- y_n ------- en
| | |
| d_n |
| | |
x_w --d_w-- grid--d_e-- x_e
| | |
| d_s |
| | |
ws ------- y_s ------- wn
x_w = floor(x) // west side x coord
x_e = x_w + 1 // east side x coord
y_n = floor(y) // north side y coord
y_s = y_s + 1 // south side y coord
d_w = grid_x - x_w // distance to west side
d_e = x_e - grid_x // distance to east side
d_n = grid_y - y_n // distance to north side
d_s = y_s - grid_y // distance to south side
wn = X[:, :, y_n, x_w] // north-west point value
en = X[:, :, y_n, x_e] // north-east point value
ws = X[:, :, y_s, x_w] // south-east point value
es = X[:, :, y_s, x_w] // north-east point value
output = wn * d_e * d_s + en * d_w * d_s
+ ws * d_e * d_n + es * d_w * d_n
Args:
x(Variable): The input tensor, which is a 4-D tensor with shape
[N, C, H, W], N is the batch size, C is the channel
number, H and W is the feature height and width.
The data type is float32 or float64.
grid(Variable): Input grid tensor of shape [N, H, W, 2]. The
data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: Output of shape [N, C, H, W] data samples input X
using bilnear interpolation based on input grid.
The data type is same as input tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# use with affine_grid
x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32')
theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32')
grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32])
out = fluid.layers.grid_sampler(x=x, grid=grid)
"""
helper = LayerHelper("grid_sampler", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler')
check_variable_and_dtype(grid, 'grid', ['float32', 'float64'],
'grid_sampler')
if not isinstance(x, Variable):
return ValueError("The x should be a Variable")
if not isinstance(grid, Variable):
return ValueError("The grid should be a Variable")
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x, 'Grid': grid}
helper.append_op(type='grid_sampler', inputs=ipts, outputs={'Output': out})
return out
def log_loss(input, label, epsilon=1e-4, name=None):
"""
:alias_main: paddle.nn.functional.log_loss
:alias: paddle.nn.functional.log_loss,paddle.nn.functional.loss.log_loss
:old_api: paddle.fluid.layers.log_loss
**Negative Log Loss Layer**
This layer accepts input predictions and target label and returns the
negative log loss.
.. math::
Out = -label * \\log{(input + \\epsilon)}
- (1 - label) * \\log{(1 - input + \\epsilon)}
Args:
input (Variable|list): A 2-D tensor with shape [N x 1], where N is the
batch size. This input is a probability computed
by the previous operator. Data type float32.
label (Variable|list): The ground truth which is a 2-D tensor with
shape [N x 1], where N is the batch size.
Data type float32.
epsilon (float, optional): A small number for numerical stability. Default 1e-4.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: A 2-D tensor with shape [N x 1], the negative log loss.
Examples:
.. code-block:: python
import paddle.fluid as fluid
label = fluid.data(name='label', shape=[None, 1], dtype='float32')
prob = fluid.data(name='prob', shape=[None, 1], dtype='float32')
cost = fluid.layers.log_loss(input=prob, label=label)
"""
helper = LayerHelper('log_loss', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'log_loss')
check_variable_and_dtype(label, 'label', ['float32'], 'log_loss')
loss = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='log_loss',
inputs={'Predicted': [input],
'Labels': [label]},
outputs={'Loss': [loss]},
attrs={'epsilon': epsilon})
return loss
def add_position_encoding(input, alpha, beta, name=None):
"""
:alias_main: paddle.nn.functional.add_position_encoding
:alias: paddle.nn.functional.add_position_encoding,paddle.nn.functional.extension.add_position_encoding
:old_api: paddle.fluid.layers.add_position_encoding
This operator performs weighted sum of input feature at each position
(position in the sequence) and the corresponding position encoding.
For more details of position encoding, please refer to `Attention Is All You
Need <http://arxiv.org/pdf/1706.03762.pdf>`_ .
The formula is as follows:
.. math::
PE(pos, 2i) &= \\sin{(pos / 10000^{2i / P})} \\\\
PE(pos, 2i + 1) &= \\cos{(pos / 10000^{2i / P})} \\\\
Out(:, pos, i) &= \\alpha * input(:, pos, i) + \\beta * PE(pos, i)
Where:
- :math:`PE(pos, 2i)` : the value at even index `2i` for encoding of position `pos`.
- :math:`PE(pos, 2i + 1)` : the value at odd index `2i+1` for encoding of position `pos`
Args:
input(Variable): A Tensor or LoDTensor (lod level is 1). If it is a
Tensor, the shape should be `[N, M, P]`, where `N` stands for
batch size, `M` for sequence length, `P` for the size of feature
dimension. If it is a LoDTensor, the shape should be `[N, P]`,
where `N` stands for the total sequence lengths in this mini-batch,
`P` for the size of feature. The data type should be float32 or float64.
alpha(float): Indicate the weight coefficient for `input` when performing
weighted sum.
beta(float): Indicate the weight coefficient for position encoding when
performing weighted sum.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: A Tensor or LoDTensor. It has the same shape, data type and lod as `input`.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
tensor = np.random.randn(16, 32, 64)
tensor = paddle.to_tensor(tensor)
position_tensor = F.add_position_encoding(
input=tensor, alpha=1.0, beta=1.0)
"""
if in_dygraph_mode():
return core.ops.add_position_encoding(input, "alpha", alpha, "beta",
beta)
helper = LayerHelper('add_position_encoding', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
"add_position_encoding")
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type="add_position_encoding",
inputs={"X": input},
outputs={"Out": out},
attrs={"alpha": alpha,
"beta": beta})
return out
def bilinear_tensor_product(x,
y,
size,
act=None,
name=None,
param_attr=None,
bias_attr=None):
"""
:api_attr: Static Graph
**Bilinear Tensor Product Layer**
This layer performs bilinear tensor product on two inputs.
For example:
.. math::
out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
In this formula:
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N].
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.
Args:
x (Variable): 2-D input tensor with shape [batch_size, M]. Data type
is float32 or float64.
y (Variable): 2-D input tensor with shape [batch_size, N]. Data type
should be same as **x**.
size (int): The dimension of this layer.
act (str|None): Activation to be applied to the output of this layer. Default None.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
param_attr (ParamAttr|None): To specify the weight parameter attribute.
Default: None, which means the default weight parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
bias_attr (ParamAttr|None): To specify the bias parameter attribute.
Default: None, which means the default bias parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
Returns:
Variable: A 2-D Tensor of shape [batch_size, size]. Data type is the same as input **x**.
Examples:
.. code-block:: python
import paddle.fluid as fluid
layer1 = fluid.data("t1", shape=[-1, 5], dtype="float32")
layer2 = fluid.data("t2", shape=[-1, 4], dtype="float32")
tensor = fluid.layers.bilinear_tensor_product(x=layer1, y=layer2, size=1000)
"""
helper = LayerHelper('bilinear_tensor_product', **locals())
dtype = helper.input_dtype('x')
param_shape = [size, x.shape[1], y.shape[1]]
w = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False)
out = helper.create_variable_for_type_inference(dtype=dtype)
inputs = {"X": x, "Y": y, "Weight": w}
if helper.bias_attr:
bias_size = [1, size]
bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
inputs["Bias"] = bias
helper.append_op(
type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out})
# add activation
return helper.append_activation(out)
@templatedoc()
def get_tensor_from_selected_rows(x, name=None):
"""
This operator gets tensor data from input with SelectedRows type, and outputs a LoDTensor.
.. code-block:: text
input x is SelectedRows:
x.rows = [0, 5, 5, 4, 19]
x.height = 20
x.value = [[1, 1] [2, 2] [2, 2] [3, 3] [6, 6]]
Ouput is LoDTensor:
out.shape = [5, 2]
out.data = [[1, 1],
[2, 2],
[2, 2],
[3, 3],
[6, 6]]
Args:
x(SelectedRows): Input with SelectedRows type. The data type is float32, float64, int32 or int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: LoDTensor transformed from SelectedRows. The data type is same with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
b = fluid.default_main_program().global_block()
input = b.create_var(name="X", dtype="float32", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
out = fluid.layers.get_tensor_from_selected_rows(input)
"""
check_type(x, 'x', Variable, 'get_tensor_from_selected_rows')
if x.type != core.VarDesc.VarType.SELECTED_ROWS:
raise TypeError(
"The type of 'x' in get_tensor_from_selected_rows must be SELECTED_ROWS."
)
helper = LayerHelper('get_tensor_from_selected_rows', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='get_tensor_from_selected_rows',
inputs={'X': x},
outputs={'Out': out},
attrs={})
return out
def shuffle_channel(x, group, name=None):
"""
This operator shuffles the channels of input x.
It divide the input channels in each group into :attr:`group` subgroups,
and obtain a new order by selecting element from every subgroup one by one.
Please refer to the paper
https://arxiv.org/pdf/1707.01083.pdf
.. code-block:: text
Given a 4-D tensor input with the shape (N, C, H, W):
input.shape = (1, 4, 2, 2)
input.data =[[[[0.1, 0.2],
[0.2, 0.3]],
[[0.3, 0.4],
[0.4, 0.5]],
[[0.5, 0.6],
[0.6, 0.7]],
[[0.7, 0.8],
[0.8, 0.9]]]]
Given group: 2
then we get a 4-D tensor out whth the same shape of input:
out.shape = (1, 4, 2, 2)
out.data = [[[[0.1, 0.2],
[0.2, 0.3]],
[[0.5, 0.6],
[0.6, 0.7]],
[[0.3, 0.4],
[0.4, 0.5]],
[[0.7, 0.8],
[0.8, 0.9]]]]
Args:
x(Variable): The input tensor variable. It should be a 4-D tensor with shape [N, C, H, W]
group(int): Indicating the counts of subgroups, It should divide the number of channels.
Returns:
out(Variable): the channels shuffling result is a tensor variable with the
same shape and same type as the input.
Raises:
ValueError: If group is not an int type variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32')
out = fluid.layers.shuffle_channel(x=input, group=2)
"""
helper = LayerHelper("shuffle_channel", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(group, int):
raise TypeError("group must be int type")
helper.append_op(
type="shuffle_channel",
inputs={"X": x},
outputs={"Out": out},
attrs={"group": group})
return out
@templatedoc()
def temporal_shift(x, seg_num, shift_ratio=0.25, name=None):
"""
:alias_main: paddle.nn.functional.temporal_shift
:alias: paddle.nn.functional.temporal_shift,paddle.nn.functional.extension.temporal_shift
:old_api: paddle.fluid.layers.temporal_shift
**Temporal Shift Operator**
${comment}
Args:
x(Variable): ${x_comment}
seg_num(int): ${seg_num_comment}
shift_ratio(float): ${shift_ratio_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
out(Variable): The temporal shifting result is a tensor variable with the
same shape and same data type as the input.
Raises:
TypeError: seg_num must be int type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32')
out = fluid.layers.temporal_shift(x=input, seg_num=2, shift_ratio=0.2)
"""
helper = LayerHelper("temporal_shift", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
check_type(seg_num, 'seg_num', int, 'temporal_shift')
check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(seg_num, int):
raise TypeError("seg_num must be int type.")
helper.append_op(
type="temporal_shift",
inputs={"X": x},
outputs={"Out": out},
attrs={"seg_num": seg_num,
"shift_ratio": shift_ratio})
return out
class PyFuncRegistry(object):
_register_funcs = []
def __init__(self, func):
if func is None or not callable(func):
raise TypeError('func must be a Python function')
self._func = func
# find named args using reflection
args = inspect.getargspec(self._func)
if len(args[0]) == 0 and args[1] is None and args[2] is None:
# Function with no inputs
self._named_args = None
else:
self._named_args = args[0]
self._id = core._append_python_callable_object_and_return_id(self)
'''
Why record self here?
1. For debug usage. Users can call
:code:`py_func.registered_func(idx)` method
to find the registered function corresponding
to :code:`idx`.
2. For increasing reference count of self.
It seems that to release Python object
whose reference count is 1 would cause
segmentation fault error in C++ side.
May be lack of Python GC in C++ side?
'''
PyFuncRegistry._register_funcs.append(self)
@classmethod
def registered_func(cls, idx):
return cls._register_funcs[idx]._func
@classmethod
def registered_func_num(cls):
return len(cls._register_funcs)
@property
def id(self):
return self._id
def __call__(self, *args):
if self._named_args is None:
func_ret = self._func()
else:
kwargs = dict()
idx = 0
for arg in self._named_args:
kwargs[arg] = args[idx]
idx += 1
func_ret = self._func(*args[idx:], **kwargs)
if not isinstance(func_ret, (list, tuple)):
func_ret = (func_ret, )
ret = []
for each_ret in func_ret:
if each_ret is None or isinstance(each_ret, core.LoDTensor):
ret.append(each_ret)
continue
if not isinstance(each_ret, np.ndarray):
each_ret = np.array(each_ret)
tensor = core.LoDTensor()
tensor.set(each_ret, core.CPUPlace())
ret.append(tensor)
return tuple(ret)
@templatedoc()
def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
"""
:api_attr: Static Graph
This OP is used to register customized Python OP to Paddle Fluid. The design
principe of py_func is that LodTensor and numpy array can be converted to each
other easily. So you can use Python and numpy API to register a python OP.
The forward function of the registered OP is ``func`` and the backward function
of that is ``backward_func``. Paddle will call ``func`` at forward runtime and
call ``backward_func`` at backward runtime(if ``backward_func`` is not None).
``x`` is the input of ``func``, whose type must be LoDTensor; ``out`` is
the output of ``func``, whose type can be either LoDTensor or numpy array.
The input of the backward function ``backward_func`` is ``x``, ``out`` and
the gradient of ``out``. If some variables of ``out`` have no gradient, the
relevant input variable of ``backward_func`` is None. If some variables of
``x`` do not have a gradient, the user should return None in ``backward_func``.
The data type and shape of ``out`` should also be set correctly before this
API is called, and the data type and shape of the gradient of ``out`` and
``x`` will be inferred automatically.
This API can also be used to debug the neural network by setting the ``func``
as a function that only print variables.
Args:
func (callable): The forward function of the registered OP. When the network
is running, the forward output ``out`` will be calculated according to this
function and the forward input ``x``. In ``func`` , it's suggested that we
actively convert LoDTensor into a numpy array, so that we can use Python and
numpy API arbitrarily. If not, some operations of numpy may not be compatible.
x (Variable|tuple(Variale)|list[Variale]): The input of the forward function ``func``.
It can be Variable|tuple(Variale)|list[Variale], where Variable is LoDTensor or
Tenosor. In addition, Multiple Variable should be passed in the form of tuple(Variale)
or list[Variale].
out (Variable|tuple(Variale)|list[Variale]): The output of the forward function ``func``,
it can be Variable|tuple(Variale)|list[Variale], where Variable can be either LoDTensor
or numpy array. Since Paddle cannot automatically infer the shape and type of ``out``,
you must create ``out`` in advance.
backward_func (callable, optional): The backward function of the registered OP.
Its default value is None, which means there is no reverse calculation. If
it is not None, ``backward_func`` is called to calculate the gradient of
``x`` when the network is at backward runtime.
skip_vars_in_backward_input (Variable, optional): It's used to limit the input
variable list of ``backward_func``, and it can be Variable|tuple(Variale)|list[Variale].
It must belong to either ``x`` or ``out``. The default value is None, which means
that no variables need to be removed from ``x`` and ``out``. If it is not None,
these variables will not be the input of ``backward_func``. This parameter is only
useful when ``backward_func`` is not None.
Returns:
Variable|tuple(Variale)|list[Variale]: The output ``out`` of the forward function ``func``.
Examples:
.. code-block:: python
# example 1:
import paddle.fluid as fluid
import six
# Creates a forward function, LodTensor can be input directly without
# being converted into numpy array.
def tanh(x):
return np.tanh(x)
# Skip x in backward function and return the gradient of x
# LodTensor must be actively converted to numpy array, otherwise,
# operations such as +/- can't be used.
def tanh_grad(y, dy):
return np.array(dy) * (1 - np.square(np.array(y)))
# Creates a forward function for debugging running networks(print value)
def debug_func(x):
print(x)
def create_tmp_var(name, dtype, shape):
return fluid.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def simple_net(img, label):
hidden = img
for idx in six.moves.range(4):
hidden = fluid.layers.fc(hidden, size=200)
new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
dtype=hidden.dtype, shape=hidden.shape)
# User-defined forward and backward
hidden = fluid.layers.py_func(func=tanh, x=hidden,
out=new_hidden, backward_func=tanh_grad,
skip_vars_in_backward_input=hidden)
# User-defined debug functions that print out the input LodTensor
fluid.layers.py_func(func=debug_func, x=hidden, out=None)
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
return fluid.layers.mean(loss)
# example 2:
# This example shows how to turn LoDTensor into numpy array and
# use numpy API to register an Python OP
import paddle.fluid as fluid
import numpy as np
def element_wise_add(x, y):
# LodTensor must be actively converted to numpy array, otherwise,
# numpy.shape can't be used.
x = np.array(x)
y = np.array(y)
if x.shape != y.shape:
raise AssertionError("the shape of inputs must be the same!")
result = np.zeros(x.shape, dtype='int32')
for i in range(len(x)):
for j in range(len(x[0])):
result[i][j] = x[i][j] + y[i][j]
return result
def create_tmp_var(name, dtype, shape):
return fluid.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def py_func_demo():
start_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
# Input of the forward function
x = fluid.data(name='x', shape=[2,3], dtype='int32')
y = fluid.data(name='y', shape=[2,3], dtype='int32')
# Output of the forward function, name/dtype/shape must be specified
output = create_tmp_var('output','int32', [3,1])
# Multiple Variable should be passed in the form of tuple(Variale) or list[Variale]
fluid.layers.py_func(func=element_wise_add, x=[x,y], out=output)
exe=fluid.Executor(fluid.CPUPlace())
exe.run(start_program)
# Feed numpy array to main_program
input1 = np.random.randint(1, 10, size=[2,3], dtype='int32')
input2 = np.random.randint(1, 10, size=[2,3], dtype='int32')
out = exe.run(main_program,
feed={'x':input1, 'y':input2},
fetch_list=[output.name])
print("{0} + {1} = {2}".format(input1, input2, out))
py_func_demo()
# Reference output:
# [[5, 9, 9] + [[7, 8, 4] = [array([[12, 17, 13]
# [7, 5, 2]] [1, 3, 3]] [8, 8, 5]], dtype=int32)]
"""
helper = LayerHelper('py_func', **locals())
check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func')
if x is None:
x = []
elif isinstance(x, Variable):
x = [x]
elif isinstance(x, tuple):
x = list(x)
elif not isinstance(x, (list, tuple, Variable)):
raise TypeError('Input must be Variable/list(Variable)/tuple(Variable)')
check_type(out, 'Out', (list, tuple, Variable, type(None)), 'py_func')
if out is None:
out_list = []
elif isinstance(out, Variable):
out_list = [out]
elif isinstance(out, tuple):
out_list = list(out)
elif isinstance(out, list):
out_list = out
else:
raise TypeError(
'Output must be Variable/list(Variable)/tuple(Variable)')
fwd_func_id = PyFuncRegistry(func).id
bwd_func_id = PyFuncRegistry(
backward_func).id if backward_func is not None else -1
for each_out in out_list:
if len(each_out.shape) == 0:
raise ValueError(
'Output shapes of py_func op should be provided by users manually'
)
backward_skip_vars = set()
if backward_func is not None and skip_vars_in_backward_input is not None:
if isinstance(skip_vars_in_backward_input, Variable):
skip_vars_in_backward_input = [skip_vars_in_backward_input]
fwd_in_out = [v.name for v in x]
fwd_in_out.extend([v.name for v in out_list])
fwd_in_out = set(fwd_in_out)
backward_skip_vars = set()
for v in skip_vars_in_backward_input:
if not v.name in fwd_in_out:
raise ValueError(
'Variable {} is not found in forward inputs and outputs'
.format(v.name))
backward_skip_vars.add(v.name)
helper.append_op(
type='py_func',
inputs={'X': x},
outputs={'Out': out_list},
attrs={
'forward_callable_id': fwd_func_id,
'backward_callable_id': bwd_func_id,
'backward_skip_vars': list(backward_skip_vars)
})
return out
# For debug usage
py_func.registered_func = PyFuncRegistry.registered_func
py_func.registered_func_num = PyFuncRegistry.registered_func_num
@templatedoc()
def psroi_pool(input,
rois,
output_channels,
spatial_scale,
pooled_height,
pooled_width,
name=None):
"""
:alias_main: paddle.nn.functional.psroi_pool
:alias: paddle.nn.functional.psroi_pool,paddle.nn.functional.vision.psroi_pool
:old_api: paddle.fluid.layers.psroi_pool
${comment}
Parameters:
input (Variable): ${x_comment}
rois (Variable): LoDTensor, ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor of shape (num_rois, 4), the lod level
is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates. The data type is the same as `input`
output_channels (int): ${output_channels_comment}
spatial_scale (float): ${spatial_scale_comment} Default: 1.0
pooled_height (int): ${pooled_height_comment} Default: 1
pooled_width (int): ${pooled_width_comment} Default: 1
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_comment}.
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 490, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
pool_out = fluid.layers.psroi_pool(x, rois, 10, 1.0, 7, 7)
"""
helper = LayerHelper('psroi_pool', **locals())
# check attrs
if not isinstance(output_channels, int):
raise TypeError("output_channels must be int type")
if not isinstance(spatial_scale, float):
raise TypeError("spatial_scale must be float type")
if not isinstance(pooled_height, int):
raise TypeError("pooled_height must be int type")
if not isinstance(pooled_width, int):
raise TypeError("pooled_width must be int type")
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='psroi_pool',
inputs={'X': input,
'ROIs': rois},
outputs={'Out': out},
attrs={
'output_channels': output_channels,
'spatial_scale': spatial_scale,
'pooled_height': pooled_height,
'pooled_width': pooled_width
})
return out
@templatedoc()
def prroi_pool(input,
rois,
spatial_scale=1.0,
pooled_height=1,
pooled_width=1,
batch_roi_nums=None,
name=None):
"""
:alias_main: paddle.nn.functional.prroi_pool
:alias: paddle.nn.functional.prroi_pool,paddle.nn.functional.vision.prroi_pool
:old_api: paddle.fluid.layers.prroi_pool
The precise roi pooling implementation for paddle. Reference: https://arxiv.org/pdf/1807.11590.pdf
Args:
input (Variable):The input of precise roi pooliing.The shape of input tensor is
[N,C,H,W]. Where N is batch size,C is number of input channels,H
is height of the feature, and W is the width of the feature.
rois (Variable): ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor or Tensor of shape (num_rois, 4), the lod level
is 1 when it is LoDTensor. The LoD include the rois's batch index
information. If rois is Tensor, its batch index information should
be provided by batch_index.
Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates.
spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width).
Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
pooled_height (integer): The pooled output height. Default: 1.
pooled_width (integer): The pooled output width. Default: 1.
batch_roi_nums (Variable): The number of roi for each image in batch. It
should be 1-D Tensor, with shape [N] and dtype int64,
where N is the batch size. Default: None. Be note: The lod of input should be
empty when batch_roi_nums has values;
name (str, default None): The name of this operation.
Returns:
Variable(Tensor):The shape of the returned Tensor is (N, C, pooled_height, pooled_width), with value type float32,float16. N, C denote batch_size and channels of input respectively.
Examples:
.. code-block:: python
## prroi_pool without batch_roi_num
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 490, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
pool_out = fluid.layers.prroi_pool(x, rois, 1.0, 7, 7)
## prroi_pool with batch_roi_num
batchsize=4
x2 = fluid.data(name='x2', shape=[batchsize, 490, 28, 28], dtype='float32')
rois2 = fluid.data(name='rois2', shape=[batchsize, 4], dtype='float32')
batch_rois_num = fluid.data(name='rois_nums', shape=[batchsize], dtype='int64')
pool_out2 = fluid.layers.prroi_pool(x2, rois2, 1.0, 7, 7, batch_roi_nums=batch_rois_num)
"""
check_variable_and_dtype(input, 'input', ['float32'], 'prroi_pool')
check_variable_and_dtype(rois, 'rois', ['float32'], 'prroi_pool')
helper = LayerHelper('prroi_pool', **locals())
# check attrs
if not isinstance(spatial_scale, float):
raise TypeError("spatial_scale must be float type")
if not isinstance(pooled_height, int):
raise TypeError("pooled_height must be int type")
if not isinstance(pooled_width, int):
raise TypeError("pooled_width must be int type")
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
inputs_op = {'X': input, 'ROIs': rois}
if batch_roi_nums is not None:
inputs_op['BatchRoINums'] = batch_roi_nums
helper.append_op(
type='prroi_pool',
inputs=inputs_op,
outputs={'Out': out},
attrs={
'spatial_scale': spatial_scale,
'pooled_height': pooled_height,
'pooled_width': pooled_width
})
return out
def pixel_shuffle(x, upscale_factor):
"""
This op rearranges elements in a tensor of shape [N, C, H, W]
to a tensor of shape [N, C/r**2, H*r, W*r].
This is useful for implementing efficient sub-pixel convolution
with a stride of 1/r.
Please refer to the paper: `Real-Time Single Image and Video Super-Resolution
Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158v2>`_ .
by Shi et. al (2016) for more details.
Parameters:
x(Variable): 4-D tensor, the data type should be float32 or float64.
upscale_factor(int): factor to increase spatial resolution.
Returns:
Out(Variable): Reshaped tensor according to the new dimension.
Raises:
ValueError: If the square of upscale_factor cannot divide the channels of input.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[2,9,4,4])
output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,9,4,4).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
# print(output.shape)
# (2L, 1L, 12L, 12L)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_shuffle')
helper = LayerHelper("pixel_shuffle", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(upscale_factor, int):
raise TypeError("upscale factor must be int type")
helper.append_op(
type="pixel_shuffle",
inputs={"X": x},
outputs={"Out": out},
attrs={"upscale_factor": upscale_factor})
return out
def fsp_matrix(x, y):
"""
**FSP matrix op**
This op is used to calculate the flow of solution procedure (FSP) matrix of two 4-D Tensor feature maps.
Given feature map x with shape [x_channel, h, w] and feature map y with shape
[y_channel, h, w], we can get the fsp matrix of x and y in two steps:
1. reshape x into matrix with shape [x_channel, h * w] and reshape and
transpose y into matrix with shape [h * w, y_channel].
2. multiply x and y to get fsp matrix with shape [x_channel, y_channel].
The output is a batch of fsp matrices.
Args:
x (Variable): A 4-D Tensor feature map with shape [batch_size, x_channel, height, width].
A Tensor with type float32, float64.
y (Variable): A 4-D Tensor feature map with shape [batch_size, y_channel, height, width].
The y_channel can be different with the x_channel of Input(X)
while the other dimensions must be the same with Input(X)'s. A Tensor with
type float32, float64.
Returns:
fsp matrix (Variable): The output of FSP op with shape [batch_size, x_channel, y_channel].
The x_channel is the channel of x and the y_channel is the channel of y. A Tensor with
type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32])
feature_map_0 = fluid.layers.conv2d(data, num_filters=2,
filter_size=3)
feature_map_1 = fluid.layers.conv2d(feature_map_0, num_filters=2,
filter_size=1)
loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix')
helper = LayerHelper('fsp_matrix', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype(
input_param_name='x'))
helper.append_op(type='fsp', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
def continuous_value_model(input, cvm, use_cvm=True):
"""
**continuous_value_model layers**
Now, this OP is used in CTR project to remove or dispose show and click value in :attr:`input`.
:attr:`input` is an embedding vector including show and click value, whose shape is :math:`[N, D]` (N is batch size. D is `2 + embedding dim` ).
Show and click at first two dims of embedding vector D.
If :attr:`use_cvm` is True, it will calculate :math:`log(show)` and :math:`log(click)` , and output shape is :math:`[N, D]` .
If :attr:`use_cvm` is False, it will remove show and click from :attr:`input` , and output shape is :math:`[N, D - 2]` .
:attr:`cvm` is show_click info, whose shape is :math:`[N, 2]` .
Args:
input (Variable): The input variable. A 2-D LoDTensor with shape :math:`[N, D]` , where N is the batch size, D is `2 + the embedding dim` . `lod level = 1` .
A Tensor with type float32, float64.
cvm (Variable): Show and click variable. A 2-D Tensor with shape :math:`[N, 2]` , where N is the batch size, 2 is show and click.
A Tensor with type float32, float64.
use_cvm (bool): Use show_click or not. if use, the output dim is the same as input.
if not use, the output dim is `input dim - 2` (remove show and click)
Returns:
Variable: A 2-D LodTensor with shape :math:`[N, M]` . if :attr:`use_cvm` = True, M is equal to input dim D. if False, M is equal to `D - 2`. \
A Tensor with same type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[64, 1], dtype="int64")
label = fluid.data(name="label", shape=[64, 1], dtype="int64")
embed = fluid.layers.embedding(
input=input,
size=[100, 11],
dtype='float32')
ones = fluid.layers.fill_constant_batch_size_like(input=label, shape=[-1, 1], dtype="int64", value=1)
show_clk = fluid.layers.cast(fluid.layers.concat([ones, label], axis=1), dtype='float32')
show_clk.stop_gradient = True
input_with_cvm = fluid.layers.continuous_value_model(embed, show_clk, True)
"""
helper = LayerHelper('cvm', **locals())
out = helper.create_variable(dtype=input.dtype)
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'cvm')
helper.append_op(
type='cvm',
inputs={'X': [input],
'CVM': [cvm]},
outputs={'Y': [out]},
attrs={"use_cvm": use_cvm})
return out
def where(condition):
"""
Return an int64 tensor with rank 2, specifying the coordinate of true element in `condition`.
Args:
condition(Variable): A bool tensor with rank at least 1, the data type is bool.
Returns:
Variable, the output data type is int64. : The tensor variable storing a 2-D tensor, which involves all coordinate.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# condition is a tensor [True, False, True]
condition = layers.assign(np.array([1, 0, 1], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[0], [2]]
# condition is a tensor [[True, False], [False, True]]
condition = layers.assign(np.array([[1, 0], [0, 1]], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[0, 0], [1, 1]]
# condition is a tensor [False, False, False]
condition = layers.assign(np.array([0, 0, 0], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[]]
"""
helper = LayerHelper("where_index", **locals())
if in_dygraph_mode():
return core.ops.where_index(condition)
out = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
helper.append_op(
type='where_index',
inputs={'Condition': condition},
outputs={'Out': [out]})
return out
@deprecated(since="2.0.0", update_to="paddle.sign")
def sign(x):
"""
This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
Args:
x(Variable|numpy.ndarray): The input variable could be N-D tensor or N-D numpy array, \
the input data type is float32 or float64.
Returns:
Variable, the output data type is the same as input data type. : The output sign tensor with identical shape to input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# [1.0, 0.0, -1.0]
data = fluid.layers.sign(np.array([3.0, 0.0, -2.0], dtype='float32'))
"""
helper = LayerHelper("sign", **locals())
check_type(x, 'x', (Variable, np.ndarray), 'sign')
if isinstance(x, np.ndarray):
x = assign(x)
check_dtype(x.dtype, 'x', ['float16', 'float32', 'float64'], 'sign')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})
return out
def unique(x, dtype='int32'):
"""
Return a unique tensor for `x` and an index tensor pointing to this unique tensor.
Args:
x(Tensor): A 1-D input tensor, it's data type should be float32, float64, int32, int64.
dtype(np.dtype|str, optional): The type of index tensor: int32, int64. Default: int32.
Returns:
tuple: (out, index). `out` is the unique tensor for `x`, with identical dtype to `x`, and \
`index` is an index tensor pointing to `out`, by which user can recover the original `x` tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
out, index = fluid.layers.unique(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
"""
check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'],
"unique")
helper = LayerHelper("unique", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
index = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='unique',
inputs={'X': x},
attrs={'dtype': convert_np_dtype_to_dtype_(dtype)},
outputs={'Out': [out],
'Index': [index]})
return out, index
def unique_with_counts(x, dtype='int32'):
"""
This OP return a unique tensor for `x` , and count tensor that the count of unique result in raw input, \
and an index tensor pointing to this unique tensor.
**NOTICE**: This op support the variable type of Tensor only.
Args:
x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64.
dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Defalut value is int32.
Returns:
tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \
and data type of output :attr:`index` and :attr:`count` will be int32 or int64.: The :attr:`out` is unique tensor for input :attr:`x`,\
the data shape is :math:`[K]`, the `K` may be different to the `N` in shape of :attr:`x`. :attr:`index` is an index tensor pointing\
to :attr:`out`, the data shape is :math:`[N]` , the data shape is the same as input :attr:`x`. :attr:`count` is count of unique element in\
the :attr:`x`, the data shape is :math:`[K]`, the data shape is the same as output :attr:`out`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
out, index, count = fluid.layers.unique_with_counts(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
# count is [1, 3, 1, 1]
# x.shape=(6,) out.shape=(4,), index.shape=(6,), count.shape=(4,)
"""
check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'],
"unique_with_counts")
if not (dtype == 'int32' or dtype == 'int64'):
raise TypeError(
"Op unique_with_counts, index dtype must be int32 or int64")
if x is None or len(x.shape) != 1:
raise ValueError(
"Op unique_with_counts, x must not be null and size of dim must be 1"
)
helper = LayerHelper("unique_with_counts", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
index = helper.create_variable_for_type_inference(dtype)
count = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='unique_with_counts',
inputs={'X': x},
attrs={'dtype': convert_np_dtype_to_dtype_(dtype)},
outputs={'Out': [out],
'Index': [index],
'Count': [count]})
return out, index, count
def deformable_conv(input,
offset,
mask,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
deformable_groups=None,
im2col_step=None,
param_attr=None,
bias_attr=None,
modulated=True,
name=None):
"""
:api_attr: Static Graph
**Deformable Convolution op**
Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
Deformable Convolution v2:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
Deformable Convolution v1:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`
Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Variable): The input image with [N, C, H, W] format. A Tensor with type
float32, float64.
offset (Variable): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
Mask (Variable, Optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int|tuple): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: stride = 1.
padding (int|tuple): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: padding = 0.
dilation (int|tuple): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: dilation = 1.
groups (int): The groups number of the deformable conv layer. According to
grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
deformable_groups (int): The number of deformable group partitions.
Default: deformable_groups = 1.
im2col_step (int): Maximum number of images per im2col computation;
The total batch size should be devisable by this value or smaller
than this value; if you face out of memory problem, you can try
to use a smaller value here.
Default: im2col_step = 64.
param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
of deformable conv. If it is set to None or one attribute of ParamAttr,
deformable conv will create ParamAttr as param_attr.
If the Initializer of the param_attr is not set, the parameter is
initialized with :math:`Normal(0.0, std)`, and the
:math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
deformable conv layer. If it is set to False, no bias will be added
to the output units. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \
used while True. Default: True.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The tensor variable storing the deformable convolution \
result. A Tensor with type float32, float64.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
#deformable conv v2:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask,
num_filters=2, filter_size=filter_size, padding=1, modulated=True)
#deformable conv v1:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1, modulated=False)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'deformable_conv')
check_variable_and_dtype(offset, "offset", ['float32', 'float64'],
'deformable_conv')
check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv')
num_channels = input.shape[1]
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper('deformable_conv', **locals())
dtype = helper.input_dtype()
if not isinstance(input, Variable):
raise TypeError("Input of deformable_conv must be Variable")
if not isinstance(offset, Variable):
raise TypeError("Input Offset of deformable_conv must be Variable")
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
padding = utils.convert_to_list(padding, 2, 'padding')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
input_shape = input.shape
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
if modulated:
helper.append_op(
type='deformable_conv',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
'Mask': mask,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
else:
helper.append_op(
type='deformable_conv_v1',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
return output
def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
"""
:alias_main: paddle.nn.functional.unfold
:alias: paddle.nn.functional.unfold,paddle.nn.functional.common.unfold
:old_api: paddle.fluid.layers.unfold
This op returns a col buffer of sliding local blocks of input x, also known
as im2col for batched 2D image tensors. For each block under the convolution filter,
all element will be rearranged as a column. While the convolution filter sliding over
the input feature map, a series of such columns will be formed.
For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout]
can be calculated as following.
.. math::
dkernel[0] &= dilations[0] \\times (kernel\_sizes[0] - 1) + 1
dkernel[1] &= dilations[1] \\times (kernel\_sizes[1] - 1) + 1
hout &= \\frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1
wout &= \\frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1
Cout &= C \\times kernel\_sizes[0] \\times kernel\_sizes[1]
Lout &= hout \\times wout
Parameters:
x(Varaible): 4-D Tensor, input tensor of format [N, C, H, W],
data type can be float32 or float64
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w]
or an integer k treated as [k, k].
strides(int|list): The strides, should be [stride_h, stride_w]
or an integer stride treated as [sride, stride].
For default, strides will be [1, 1].
paddings(int|list): The paddings of each dimension, should be
[padding_top, padding_left, padding_bottom, padding_right]
or [padding_h, padding_w] or an integer padding.
If [padding_h, padding_w] was given, it will expanded to
[padding_h, padding_w, padding_h, padding_w]. If an integer
padding was given, [padding, padding, padding, padding] will
be used. For default, paddings will be [0, 0, 0, 0]
dilations(int|list): the dilations of convolution kernel, should be
[dilation_h, dilation_w], or an integer dilation treated as
[dilation, dilation]. For default, it will be [1, 1].
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The tensor variable corresponding to the sliding local blocks.
The output shape is [N, Cout, Lout] as decriabled above.
Cout is the total number of values within each block,
and Lout is the total number of such blocks.
The data type of output is the same as the input :math:`x`
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name = 'data', shape = [100, 3, 224, 224], dtype = 'float32')
y = fluid.layers.unfold(x, [3, 3], 1, 1, 1)
"""
helper = LayerHelper("unfold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold')
assert len(x.shape) == 4, \
"input should be the format of [N, C, H, W]"
if isinstance(kernel_sizes, int):
kernel_sizes = [kernel_sizes, kernel_sizes]
else:
assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \
"kernel_sizes should either be an integer or a list of two integers"
if isinstance(strides, int):
strides = [strides, strides]
else:
assert isinstance(strides, list) and (len(strides) == 2), \
"strides should either be an integer or a list of two integers"
if isinstance(dilations, int):
dilations = [dilations, dilations]
else:
assert isinstance(dilations, list) and (len(dilations) == 2), \
"dilations should either be an integer or a list of two integers"
if isinstance(paddings, int):
paddings = [paddings] * 4
elif isinstance(paddings, list):
if len(paddings) == 2:
paddings = paddings * 2
elif len(paddings) == 4:
pass
else:
raise ValueError(
"paddings should either be an integer or a list of 2 or 4 integers"
)
else:
raise ValueError(
"Unexpected type of paddings, it should be either an integer or a list"
"of 2 or 4 integers")
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="unfold",
inputs={"X": x},
outputs={"Y": out},
attrs={
"kernel_sizes": kernel_sizes,
"strides": strides,
"paddings": paddings,
"dilations": dilations
})
return out
def deformable_roi_pooling(input,
rois,
trans,
no_trans=False,
spatial_scale=1.0,
group_size=[1, 1],
pooled_height=1,
pooled_width=1,
part_size=None,
sample_per_part=1,
trans_std=0.1,
position_sensitive=False,
name=None):
"""
:alias_main: paddle.nn.functional.deformable_roi_pooling
:alias: paddle.nn.functional.deformable_roi_pooling,paddle.nn.functional.vision.deformable_roi_pooling
:old_api: paddle.fluid.layers.deformable_roi_pooling
Deformable ROI Pooling Layer
Performs deformable region-of-interest pooling on inputs. As described
in `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_, it will get offset for each bin after
roi pooling so that pooling at correct region. Batch_size will change to the number of region bounding boxes after deformable_roi_pooling.
The operation has three steps:
1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height.
2. Add offset to pixel in ROI to get new location and the new value which are computed directly through
bilinear interpolation with four nearest pixel.
3. Sample several points in each bin to get average values as output.
Args:
input (Variable):The input of deformable roi pooling and it is tensor which value type is float32. The shape of input is
[N, C, H, W]. Where N is batch size, C is number of input channels,
H is height of the feature, and W is the width of the feature.
rois (Variable): ROIs (Regions of Interest) with type float32 to pool over. It should be
a 2-D LoDTensor of shape (num_rois, 4), and the lod level
is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates, which value type is float32.
trans (Variable): Offset of features on ROIs while pooling which value type is float32. The format is [N, C, H, W], where
N is number of ROIs, C is number of channels, which indicate the offset distance
in the x and y directions, H is pooled height, and W is pooled width.
no_trans (bool): Whether to add offset to get new value or not while roi pooling, which value with type bool is True or False.
If value is True, no offset will be added in operation. Default: False.
spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width), which value type is float32.
Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
group_size (list|tuple): The number of groups which input channels are divided and the input is list or tuple, which value type is int32. (eg.number of input channels
is k1 * k2 * (C + 1), which k1 and k2 are group width and height and C+1 is number of output
channels.) eg.(4, 6), which 4 is height of group and 6 is width of group. Default: [1, 1].
pooled_height (int): The pooled output height which value type is int32. Default: 1.
pooled_width (int): The pooled output width which value type is int32. Default: 1.
part_size (list|tuple): The height and width of offset which values in list or tuple is int32, eg.(4, 6), which height is 4 and width is 6, and values always equal to pooled_height \
and pooled_width. Default: if None, default value is [pooled_height, pooled_width].
sample_per_part (int): The number of samples in each bin which value type is int32. If value is bigger, it will consume more performance. Default: 1.
trans_std (float): Coefficient of offset which value type is float32. It controls weight of offset. Default: 0.1.
position_sensitive (bool): Whether to choose deformable psroi pooling mode or not, and value type is bool(True or False). If value is False, input dimension equals to output dimension. \
If value is True, input dimension should be output dimension * pooled_height * pooled_width. Default: False.
name (str|None): Name of layer. Default: None.
Returns:
Variable: Output of deformable roi pooling is that, if position sensitive is False, input dimension equals to output dimension. If position sensitive is True,\
input dimension should be the result of output dimension divided by pooled height and pooled width.
Examples:
.. code-block:: python
# position_sensitive=True
import paddle.fluid as fluid
input = fluid.data(name="input",
shape=[2, 192, 64, 64],
dtype='float32')
rois = fluid.data(name="rois",
shape=[-1, 4],
dtype='float32',
lod_level=1)
trans = fluid.data(name="trans",
shape=[2, 384, 64, 64],
dtype='float32')
x = fluid.layers.deformable_roi_pooling(input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
position_sensitive=True)
# position_sensitive=False
import paddle.fluid as fluid
input = fluid.data(name="input",
shape=[2, 192, 64, 64],
dtype='float32')
rois = fluid.data(name="rois",
shape=[-1, 4],
dtype='float32',
lod_level=1)
trans = fluid.data(name="trans",
shape=[2, 384, 64, 64],
dtype='float32')
x = fluid.layers.deformable_roi_pooling(input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
position_sensitive=False)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'deformable_roi_pooling')
check_variable_and_dtype(rois, 'rois', ['float32', 'float64'],
'deformable_roi_pooling')
check_variable_and_dtype(trans, 'trans', ['float32', 'float64'],
'deformable_roi_pooling')
check_type(group_size, 'group_size', (list, tuple),
'deformable_roi_pooling')
if part_size is not None:
check_type(part_size, 'part_size', (list, tuple),
'deformable_roi_pooling')
input_channels = input.shape[1]
if position_sensitive == False:
output_channels = input_channels
else:
output_channels = input_channels / pooled_height / pooled_width
if part_size is None:
part_height = pooled_height
part_width = pooled_width
part_size = [part_height, part_width]
part_size = utils.convert_to_list(part_size, 2, 'part_size')
group_size = utils.convert_to_list(group_size, 2, 'group_size')
helper = LayerHelper('deformable_psroi_pooling', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
top_count = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="deformable_psroi_pooling",
inputs={"Input": input,
"ROIs": rois,
"Trans": trans},
outputs={"Output": output,
"TopCount": top_count},
attrs={
"no_trans": no_trans,
"spatial_scale": spatial_scale,
"output_dim": output_channels,
"group_size": group_size,
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"part_size": part_size,
"sample_per_part": sample_per_part,
"trans_std": trans_std
})
return output
def shard_index(input, index_num, nshards, shard_id, ignore_value=-1):
"""
This operator recomputes the `input` indices according to the offset of the
shard. The length of the indices is evenly divided into N shards, and if
the `shard_id` matches the shard with the input index inside, the index is
recomputed on the basis of the shard offset, elsewise it is set to
`ignore_value`. The detail is as follows:
::
shard_size = (index_num + nshards - 1) // nshards
y = x % shard_size if x // shard_size == shard_id else ignore_value
NOTE: If the length of indices cannot be evely divided by the shard number,
the size of the last shard will be less than the calculated `shard_size`
Examples:
::
Input:
X.shape = [4, 1]
X.data = [[1], [6], [12], [19]]
index_num = 20
nshards = 2
ignore_value = -1
if shard_id == 0, we get:
Out.shape = [4, 1]
Out.data = [[1], [6], [-1], [-1]]
if shard_id == 1, we get:
Out.shape = [4, 1]
Out.data = [[-1], [-1], [2], [9]]
Args:
- **input** (Variable): Input indices, last dimension must be 1.
- **index_num** (scalar): An integer defining the range of the index.
- **nshards** (scalar): The number of shards
- **shard_id** (scalar): The index of the current shard
- **ignore_value** (scalar): An integer value out of sharded index range
Returns:
Variable: The sharded index of input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
batch_size = 32
label = fluid.data(name="label", shape=[batch_size, 1], dtype="int64")
shard_label = fluid.layers.shard_index(input=label,
index_num=20,
nshards=2,
shard_id=0)
"""
check_variable_and_dtype(input, 'input', ['int64'], 'shard_index')
op_type = 'shard_index'
helper = LayerHelper(op_type, **locals())
if shard_id < 0 or shard_id >= nshards:
raise ValueError('The shard_id(%d) should be in [0, %d)' %
(shard_id, nshards))
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'X': [input]},
outputs={'Out': out},
attrs={
'index_num': index_num,
'nshards': nshards,
'shard_id': shard_id,
'ignore_value': ignore_value
},
stop_gradient=True)
return out
@templatedoc()
def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
"""
:alias_main: paddle.nn.functional.hard_swish
:alias: paddle.nn.functional.hard_swish,paddle.nn.functional.activation.hard_swish
:old_api: paddle.fluid.layers.hard_swish
This operator implements the hard_swish activation function.
Hard_swish is proposed in MobileNetV3, and performs better in computational stability and efficiency compared to swish function.
For more details please refer to: https://arxiv.org/pdf/1905.02244.pdf
The formula is as follows:
.. math::
out = \\frac{x * (min(max(0, x+offset), threshold))}{scale}
In the above equation:
``threshold`` and ``scale`` should be positive, ``offset`` can be positive or negative. It is recommended to use default parameters.
Args:
x (Variable): Input feature, multi-dimensional Tensor. The data type should be float32 or float64.
threshold (float, optional): The threshold in Relu function. Default: 6.0
scale (float, optional): The scale factor. Default: 6.0
offset (float, optional): The offset factor. Default: 3.0
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output tensor with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
DATATYPE='float32'
x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)
x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE)
y = fluid.layers.hard_swish(x)
place = fluid.CPUPlace()
#place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
print(out) # [[0.66666667, 1.66666667,3., 4.]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_swish')
helper = LayerHelper('hard_swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='hard_swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold,
'scale': scale,
'offset': offset})
return out
@templatedoc()
def mish(x, threshold=20, name=None):
"""
This operator implements the mish activation function.
Refer to `Mish: A Self Regularized Non-Monotonic Neural
Activation Function <https://arxiv.org/abs/1908.08681>`_
The formula is as follows if :attr:`threshold` is :code:`None` or negative:
.. math::
out = x * \\tanh(\\ln(1 + e^{x}))
The formula is as follows if :attr:`threshold` is set as positive value:
.. math::
out = \\begin{cases}
x \\ast \\tanh(x), \\text{if } x > \\text{threshold} \\\\
x \\ast \\tanh(e^{x}), \\text{if } x < -\\text{threshold} \\\\
x \\ast \\tanh(\\ln(1 + e^{x})), \\text{otherwise}
\\end{cases}
Args:
x (Variable): Input feature, multi-dimensional Tensor. The data type
should be float16, float32 or float64.
threshold (float|None): threshold for softplus in Mish operator.
Approximate value of softplus will be used if absolute value
of input is greater than :attr:threshold and :attr:threshold
is set as positive value. For none or negative threshold,
approximate value is not used. Default 20.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`
Returns:
Variable: The output tensor with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
DATATYPE='float32'
x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)
x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE)
y = fluid.layers.mish(x)
place = fluid.CPUPlace()
# place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
print(out) # [[0.66666667, 1.66666667, 3., 4.]]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'mish')
check_type(threshold, 'threshold', (float, int), 'mish')
assert threshold > 0, "threshold of mish should be greater than 0, " \
"but got {}".format(threshold)
helper = LayerHelper('mish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='mish',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold or -1})
return out
def gather_tree(ids, parents):
"""
To be used after beam search. After beam search, we get selected ids at
each time step and the corresponding parents in the search tree. Both ids
and parents have the layout :attr:`[max_time, batch_size, beam_size]`. Then
:attr:`gather_tree` is used to backtrace from the last time step and
generate the full sequences by collecting selected ids.
Here is an example:
.. code-block:: text
Given:
ids = [[[2 2]
[6 1]]
[[3 9]
[6 1]]
[[0 1]
[9 0]]]
parents = [[[0 0]
[1 1]]
[[1 0]
[1 0]]
[[0 0]
[0 1]]]
Then:
gather_tree(ids, parents)
= [[[2 2]
[1 6]]
[[3 3]
[6 1]]
[[0 1]
[9 0]]]
Args:
ids(Variable): A Tensor with shape :attr:`[length, batch_size, beam_size]`
and data type :attr:`int32` or :attr:`int64`. It contains the selected
ids of all time steps.
parents(Variable): A Tensor with the same shape and data type as :attr:`ids`,
It contains the parents corresponding to selected ids when searching
among beams.
Returns:
Variable: A Tensor with the same shape and data type as :attr:`ids`. \
It contains the full sequences. The sequences are collected from \
:attr:`ids` by backtracing according to :attr:`parents`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
ids = fluid.layers.data(name='ids',
shape=[5, 2, 2],
dtype='int64',
append_batch_size=False)
parents = fluid.layers.data(name='parents',
shape=[5, 2, 2],
dtype='int64',
append_batch_size=False)
final_sequences = fluid.layers.gather_tree(ids, parents)
"""
helper = LayerHelper('gather_tree', **locals())
check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')
check_variable_and_dtype(parents, 'parents', ['int32', 'int64'],
'gather_tree')
out = helper.create_variable_for_type_inference(dtype=ids.dtype)
helper.append_op(
type="gather_tree",
inputs={"Ids": ids,
"Parents": parents},
outputs={"Out": out})
return out
@deprecated(since="2.0.0", update_to="paddle.uniform")
@templatedoc()
def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0,
name=None):
"""
This OP returns a Tensor filled with random values sampled from a uniform
distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``.
Examples:
::
Input:
shape = [1, 2]
Output:
result=[[0.8505902, 0.8397286]]
Args:
shape(list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of
the output Tensor. Supported data types: float32, float64.
Default is float32.
min(float|int, optional): The lower bound on the range of random values
to generate, ``min`` is included in the range. Default is -1.0.
max(float|int, optional): The upper bound on the range of random values
to generate, ``max`` is excluded in the range. Default is 1.0.
seed(int, optional): Random seed used for generating samples. 0 means
use a seed generated by the system. Note that if seed is not 0,
this operator will always generate the same random numbers every
time. Default is 0.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a uniform
distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``.
Raises:
TypeError: If ``shape`` is not list, tuple, Tensor.
TypeError: If ``dtype`` is not float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
# attr shape is a list which doesn't contain Tensor.
result_1 = fluid.layers.uniform_random(shape=[3, 4])
# [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357],
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249],
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]]
# example 2:
# attr shape is a list which contains Tensor.
dim_1 = fluid.layers.fill_constant([1], "int64", 2)
dim_2 = fluid.layers.fill_constant([1], "int32", 3)
result_2 = fluid.layers.uniform_random(shape=[dim_1, dim_2])
# [[-0.9951253, 0.30757582, 0.9899647 ],
# [ 0.5864527, 0.6607096, -0.8886161 ]]
# example 3:
# attr shape is a Tensor, the data type must be int64 or int32.
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = fluid.layers.uniform_random(var_shape)
# if var_shape's value is [2, 3]
# result_3 is:
# [[-0.8517412, -0.4006908, 0.2551912 ],
# [ 0.3364414, 0.36278176, -0.16085452]]
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return core.ops.uniform_random('shape', shape, 'min',
float(min), 'max',
float(max), 'seed', seed, 'dtype', dtype)
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand')
check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform_random/rand')
inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='uniform_random/rand')
helper = LayerHelper("uniform_random", **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="uniform_random", inputs=inputs, attrs=attrs,
outputs={"Out": out})
return out
def unbind(input, axis=0):
"""
Removes a tensor dimension, then split the input tensor into multiple sub-Tensors.
Args:
input (Variable): The input variable which is an N-D Tensor, data type being float32, float64, int32 or int64.
axis (int32|int64, optional): A scalar with type ``int32|int64`` shape [1]. The dimension along which to unbind. If :math:`axis < 0`, the
dimension to unbind along is :math:`rank(input) + axis`. Default is 0.
Returns:
list(Variable): The list of segmented Tensor variables.
Example:
.. code-block:: python
import paddle
# input is a variable which shape is [3, 4, 5]
input = paddle.fluid.data(
name="input", shape=[3, 4, 5], dtype="float32")
[x0, x1, x2] = paddle.tensor.unbind(input, axis=0)
# x0.shape [4, 5]
# x1.shape [4, 5]
# x2.shape [4, 5]
[x0, x1, x2, x3] = paddle.tensor.unbind(input, axis=1)
# x0.shape [3, 5]
# x1.shape [3, 5]
# x2.shape [3, 5]
# x3.shape [3, 5]
"""
helper = LayerHelper("unbind", **locals())
check_type(input, 'input', (Variable), 'unbind')
dtype = helper.input_dtype()
check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'],
'unbind')
if not isinstance(axis, (int)):
raise TypeError("The type of 'axis' must be int, but received %s." %
(type(axis)))
if isinstance(axis, np.generic):
axis = np.asscalar(axis)
input_shape = input.shape
axis_ = axis if axis >= 0 else len(input_shape) + axis
num = input_shape[axis_]
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type="unbind",
inputs={"X": input},
outputs={"Out": outs},
attrs={"axis": axis})
return outs
| 40.035843
| 946
| 0.58213
|
4a15edafcbb84c1ebd75e34457942d20c19ace82
| 14,830
|
py
|
Python
|
kedro/template/{{ cookiecutter.repo_name }}/kedro_cli.py
|
LTHODAVDOPL/kedro
|
d2c9648794cdda5ad28ea99cfc2661c076876932
|
[
"Apache-2.0"
] | 1
|
2019-09-28T22:38:00.000Z
|
2019-09-28T22:38:00.000Z
|
kedro/template/{{ cookiecutter.repo_name }}/kedro_cli.py
|
LTHODAVDOPL/kedro
|
d2c9648794cdda5ad28ea99cfc2661c076876932
|
[
"Apache-2.0"
] | null | null | null |
kedro/template/{{ cookiecutter.repo_name }}/kedro_cli.py
|
LTHODAVDOPL/kedro
|
d2c9648794cdda5ad28ea99cfc2661c076876932
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line tools for manipulating a Kedro project.
Intended to be invoked via `kedro`."""
import os
import shutil
import subprocess
import sys
from collections import Counter
from glob import iglob
from pathlib import Path
import click
from click import secho, style
from kedro.cli import main as kedro_main
from kedro.cli.utils import (
KedroCliError,
call,
forward_command,
python_call,
export_nodes,
)
from kedro.utils import load_obj
from kedro.runner import SequentialRunner
from kedro.context import load_context
from typing import Iterable, List, Dict
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
# get our package onto the python path
PROJ_PATH = Path(__file__).resolve().parent
os.environ["IPYTHONDIR"] = str(PROJ_PATH / ".ipython")
NO_PYTEST_MESSAGE = """
pytest is not installed. Please make sure pytest is in
src/requirements.txt and run `kedro install`.
"""
NO_NBSTRIPOUT_MESSAGE = """
nbstripout is not installed. Please make sure nbstripout is in
`src/requirements.txt` and run `kedro install`.
"""
TAG_ARG_HELP = """Construct the pipeline using only nodes which have this tag
attached. Option can be used multiple times, what results in a
pipeline constructed from nodes having any of those tags."""
PIPELINE_ARG_HELP = """Name of the modular pipeline to run.
If not set, the project pipeline is run by default."""
ENV_ARG_HELP = """Run the pipeline in a configured environment. If not specified,
pipeline will run using environment `local`."""
NODE_ARG_HELP = """Run only nodes with specified names."""
FROM_NODES_HELP = """A list of node names which should be used as a starting point."""
TO_NODES_HELP = """A list of node names which should be used as an end point."""
FROM_INPUTS_HELP = (
"""A list of dataset names which should be used as a starting point."""
)
PARALLEL_ARG_HELP = """Run the pipeline using the `ParallelRunner`.
If not specified, use the `SequentialRunner`. This flag cannot be used together
with --runner."""
RUNNER_ARG_HELP = """Specify a runner that you want to run the pipeline with.
This option cannot be used together with --parallel."""
CONVERT_ALL_HELP = """Extract the nodes from all notebooks in the Kedro project directory,
including sub-folders."""
OVERWRITE_HELP = """If Python file already exists for the equivalent notebook,
overwrite its contents."""
LOAD_VERSION_HELP = """Specify a particular dataset version (timestamp) for loading."""
def _split_string(ctx, param, value):
return [item for item in value.split(",") if item]
def _reformat_load_versions(ctx, param, value) -> Dict[str, str]:
"""Reformat data structure from tuple to dictionary for `load-version`.
E.g ('dataset1:time1', 'dataset2:time2') -> {"dataset1": "time1", "dataset2": "time2"}.
"""
load_version_separator = ":"
load_versions_dict = {}
for load_version in value:
load_version_list = load_version.split(load_version_separator)
if len(load_version_list) != 2:
raise ValueError(
"Expected the form of `load_version` to be "
"`dataset_name:YYYY-MM-DDThh.mm.ss.sssZ`,"
"found {} instead".format(load_version)
)
load_versions_dict[load_version_list[0]] = load_version_list[1]
return load_versions_dict
@click.group(context_settings=CONTEXT_SETTINGS, name=__file__)
def cli():
"""Command line tools for manipulating a Kedro project."""
@cli.command()
@click.option(
"--from-inputs", type=str, default="", help=FROM_INPUTS_HELP, callback=_split_string
)
@click.option(
"--from-nodes", type=str, default="", help=FROM_NODES_HELP, callback=_split_string
)
@click.option(
"--to-nodes", type=str, default="", help=TO_NODES_HELP, callback=_split_string
)
@click.option("--node", "-n", "node_names", type=str, multiple=True, help=NODE_ARG_HELP)
@click.option(
"--runner", "-r", type=str, default=None, multiple=False, help=RUNNER_ARG_HELP
)
@click.option("--parallel", "-p", is_flag=True, multiple=False, help=PARALLEL_ARG_HELP)
@click.option("--env", "-e", type=str, default=None, multiple=False, help=ENV_ARG_HELP)
@click.option("--tag", "-t", type=str, multiple=True, help=TAG_ARG_HELP)
@click.option(
"--load-version",
"-lv",
type=str,
multiple=True,
help=LOAD_VERSION_HELP,
callback=_reformat_load_versions,
)
@click.option("--pipeline", type=str, default=None, help=PIPELINE_ARG_HELP)
def run(
tag,
env,
parallel,
runner,
node_names,
to_nodes,
from_nodes,
from_inputs,
load_version,
pipeline,
):
"""Run the pipeline."""
if parallel and runner:
raise KedroCliError(
"Both --parallel and --runner options cannot be used together. "
"Please use either --parallel or --runner."
)
if parallel:
runner = "ParallelRunner"
runner_class = load_obj(runner, "kedro.runner") if runner else SequentialRunner
context = load_context(Path.cwd(), env=env)
context.run(
tags=tag,
runner=runner_class(),
node_names=node_names,
from_nodes=from_nodes,
to_nodes=to_nodes,
from_inputs=from_inputs,
load_versions=load_version,
pipeline_name=pipeline,
)
@forward_command(cli, forward_help=True)
def test(args):
"""Run the test suite."""
try:
import pytest # pylint: disable=unused-import
except ImportError:
raise KedroCliError(NO_PYTEST_MESSAGE)
else:
python_call("pytest", args)
@cli.command()
def install():
"""Install project dependencies from both requirements.txt and environment.yml (optional)."""
if (Path.cwd() / "src" / "environment.yml").is_file():
call(["conda", "install", "--file", "src/environment.yml", "--yes"])
python_call("pip", ["install", "-U", "-r", "src/requirements.txt"])
@forward_command(cli, forward_help=True)
def ipython(args):
"""Open IPython with project specific variables loaded."""
if "-h" not in args and "--help" not in args:
ipython_message()
call(["ipython"] + list(args))
@cli.command()
def package():
"""Package the project as a Python egg and wheel."""
call([sys.executable, "setup.py", "clean", "--all", "bdist_egg"], cwd="src")
call([sys.executable, "setup.py", "clean", "--all", "bdist_wheel"], cwd="src")
@cli.command("build-docs")
def build_docs():
"""Build the project documentation."""
python_call("pip", ["install", "src/[docs]"])
python_call("pip", ["install", "-r", "src/requirements.txt"])
python_call(
"ipykernel", ["install", "--user", "--name={{ cookiecutter.python_package }}"]
)
shutil.rmtree("docs/build", ignore_errors=True)
call(
[
"sphinx-apidoc",
"--module-first",
"-o",
"docs/source",
"src/{{ cookiecutter.python_package }}",
]
)
call(["sphinx-build", "-M", "html", "docs/source", "docs/build", "-a"])
@cli.command("build-reqs")
def build_reqs():
"""Build the project dependency requirements."""
requirements_path = Path.cwd() / "src" / "requirements.in"
if not requirements_path.is_file():
secho("No requirements.in found. Copying contents from requirements.txt...")
contents = (Path.cwd() / "src" / "requirements.txt").read_text()
requirements_path.write_text(contents)
python_call("piptools", ["compile", str(requirements_path)])
secho(
(
"Requirements built! Please update requirements.in "
"if you'd like to make a change in your project's dependencies, "
"and re-run build-reqs to generate the new requirements.txt."
)
)
@cli.command("activate-nbstripout")
def activate_nbstripout():
"""Install the nbstripout git hook to automatically clean notebooks."""
secho(
(
"Notebook output cells will be automatically cleared before committing"
" to git."
),
fg="yellow",
)
try:
import nbstripout # pylint: disable=unused-import
except ImportError:
raise KedroCliError(NO_NBSTRIPOUT_MESSAGE)
try:
res = subprocess.run(
["git", "rev-parse", "--git-dir"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if res.returncode:
raise KedroCliError("Not a git repository. Run `git init` first.")
except FileNotFoundError:
raise KedroCliError("Git executable not found. Install Git first.")
call(["nbstripout", "--install"])
def _build_jupyter_command(
base: str, ip: str, all_kernels: bool, args: Iterable[str]
) -> List[str]:
cmd = [base, "--ip=" + ip]
if not all_kernels:
cmd.append("--KernelSpecManager.whitelist=['python3']")
return cmd + list(args)
@cli.group()
def jupyter():
"""Open Jupyter Notebook / Lab with project specific variables loaded, or
convert notebooks into Kedro code.
"""
@forward_command(jupyter, "notebook", forward_help=True)
@click.option("--ip", type=str, default="127.0.0.1")
@click.option("--all-kernels", is_flag=True, default=False)
def jupyter_notebook(ip, all_kernels, args):
"""Open Jupyter Notebook with project specific variables loaded."""
if "-h" not in args and "--help" not in args:
ipython_message(all_kernels)
call(
_build_jupyter_command(
"jupyter-notebook", ip=ip, all_kernels=all_kernels, args=args
)
)
@forward_command(jupyter, "lab", forward_help=True)
@click.option("--ip", type=str, default="127.0.0.1")
@click.option("--all-kernels", is_flag=True, default=False)
def jupyter_lab(ip, all_kernels, args):
"""Open Jupyter Lab with project specific variables loaded."""
if "-h" not in args and "--help" not in args:
ipython_message(all_kernels)
call(
_build_jupyter_command("jupyter-lab", ip=ip, all_kernels=all_kernels, args=args)
)
@jupyter.command("convert")
@click.option("--all", "all_flag", is_flag=True, help=CONVERT_ALL_HELP)
@click.option("-y", "overwrite_flag", is_flag=True, help=OVERWRITE_HELP)
@click.argument(
"filepath",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
required=False,
nargs=-1,
)
def convert_notebook(all_flag, overwrite_flag, filepath):
"""Convert selected or all notebooks found in a Kedro project
to Kedro code, by exporting code from the appropriately-tagged cells:
Cells tagged as `node` will be copied over to a Python file matching
the name of the notebook, under `src/<package_name>/nodes`.
*Note*: Make sure your notebooks have unique names!
FILEPATH: Path(s) to exact notebook file(s) to be converted. Both
relative and absolute paths are accepted.
Should not be provided if --all flag is already present.
"""
context = load_context(Path.cwd())
if not filepath and not all_flag:
secho(
"Please specify a notebook filepath "
"or add '--all' to convert all notebooks."
)
sys.exit(1)
kedro_project_path = context.project_path
kedro_package_name = "{{cookiecutter.python_package}}"
if all_flag:
# pathlib glob does not ignore hidden directories,
# whereas Python glob does, which is more useful in
# ensuring checkpoints will not be included
pattern = kedro_project_path / "**" / "*.ipynb"
notebooks = sorted(Path(p) for p in iglob(str(pattern), recursive=True))
else:
notebooks = [Path(f) for f in filepath]
counter = Counter(n.stem for n in notebooks)
non_unique_names = [name for name, counts in counter.items() if counts > 1]
if non_unique_names:
raise KedroCliError(
"Found non-unique notebook names! "
"Please rename the following: {}".format(", ".join(non_unique_names))
)
for notebook in notebooks:
secho("Converting notebook '{}'...".format(str(notebook)))
output_path = (
kedro_project_path
/ "src"
/ kedro_package_name
/ "nodes"
/ "{}.py".format(notebook.stem)
)
if output_path.is_file():
overwrite = overwrite_flag or click.confirm(
"Output file {} already exists. Overwrite?".format(str(output_path)),
default=False,
)
if overwrite:
export_nodes(notebook, output_path)
else:
export_nodes(notebook, output_path)
secho("Done!")
def ipython_message(all_kernels=True):
"""Show a message saying how we have configured the IPython env."""
ipy_vars = ["startup_error", "context"]
secho("-" * 79, fg="cyan")
secho("Starting a Kedro session with the following variables in scope")
secho(", ".join(ipy_vars), fg="green")
secho(
"Use the line magic {} to refresh them".format(
style("%reload_kedro", fg="green")
)
)
secho("or to see the error message if they are undefined")
if not all_kernels:
secho("The choice of kernels is limited to the default one.", fg="yellow")
secho("(restart with --all-kernels to get access to others)", fg="yellow")
secho("-" * 79, fg="cyan")
if __name__ == "__main__":
os.chdir(str(PROJ_PATH))
kedro_main()
| 33.552036
| 97
| 0.668105
|
4a15edee3fa31ac384a6eb60262aa7235c970c64
| 667
|
py
|
Python
|
vectorformats/Formats/Format.py
|
AstunTechnology/featureserver
|
0697730de12b7bc4c8d90bab829d95a865253e77
|
[
"BSD-3-Clause-Open-MPI",
"MIT"
] | 55
|
2015-01-20T14:29:59.000Z
|
2020-12-13T12:54:28.000Z
|
vectorformats/Formats/Format.py
|
makinacorpus/featureserver
|
379c1a7f51e75517ae7237751e1908f45c0c4d9a
|
[
"BSD-3-Clause-Open-MPI",
"MIT"
] | 3
|
2015-06-24T23:34:03.000Z
|
2017-02-05T02:16:19.000Z
|
vectorformats/Formats/Format.py
|
makinacorpus/featureserver
|
379c1a7f51e75517ae7237751e1908f45c0c4d9a
|
[
"BSD-3-Clause-Open-MPI",
"MIT"
] | 19
|
2015-02-08T12:32:25.000Z
|
2021-12-01T08:14:32.000Z
|
class Format(object):
"""Base Format class. To set properties on your subclasses, you can
pass them as kwargs to your format constructor."""
def __init__(self, *args, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def getFormatedAttributName(self, name):
attrib_name = name
attrib_pos = name.find(' as "')
if attrib_pos >= 0:
attrib_name = name[attrib_pos+5:-1]
return attrib_name
def escapeSQL(self, value):
newValue = value
newValue = value.replace("'", "''")
return newValue
| 30.318182
| 71
| 0.554723
|
4a15ee4d7bf765ef330be0ad229c700613610459
| 7,514
|
py
|
Python
|
ml_belt/prep.py
|
adrianogfreitas/ml_belt
|
a013abe772e5479c3c72d6b4d464fdf06827db57
|
[
"MIT"
] | 1
|
2019-12-04T20:29:31.000Z
|
2019-12-04T20:29:31.000Z
|
ml_belt/prep.py
|
adrianogfreitas/ml_belt
|
a013abe772e5479c3c72d6b4d464fdf06827db57
|
[
"MIT"
] | null | null | null |
ml_belt/prep.py
|
adrianogfreitas/ml_belt
|
a013abe772e5479c3c72d6b4d464fdf06827db57
|
[
"MIT"
] | null | null | null |
"""Module for common preprocessing tasks."""
import time
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
# TODO: acertar docstrings
# TODO: drop_by
# TODO: apply_custom_item_level (escolher axis)
# TODO: colocar um acompanhamento de progresso
class Prep(object):
"""Preprocessing / preparing data.
Attributes:
data (pandas DataFrame): dataframe with all transformations
"""
def __init__(self, df: pd.DataFrame):
"""Create new object.
Args:
- df (DataFrame): a pandas dataframe to performs preprocessing tasks.
Al tasks are performed on a copy of this DataFrame
"""
self._data = df.copy()
self._le = {}
self._scaler = None
@property
def df(self):
"""Get the actual version of modified df."""
return self._data.copy()
@df.setter
def df(self, df):
"""Set a new dataframe to be modified."""
self._data = df.copy()
return self
def apply_custom(self, fn, args={}):
"""Apply a custom function to the dataframe.
Args:
- fn: custom function to apply. Should receive the dataframe and returns the modified dataframe
Returns:
self
"""
self._data = fn(self._data, **args)
return self
def drop_nulls(self, cols: list = None):
"""Drop all rows with nulls.
Args:
- cols (list): list of columns or None to all dataframe
Returns:
self
"""
if cols == None:
self._data.dropna(inplace=True)
else:
cols = [c for c in cols if c in self._data.columns]
self._data.dropna(subset=cols, inplace=True)
return self
def drop_not_nulls(self, cols: list):
"""Drop all rows with not null values for each column in cols.
Args:
- cols (list): list of columns
Returns:
self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data = self._data[self._data[col].isnull()]
return self
def drop_null_cols(self):
"""Drop colls with all null values.
Returns:
self
"""
self._data.dropna(index=1, how='all')
return self
def drop_cols(self, cols: list):
"""Drop all listed columns.
Args:
- cols (list): list of cols to drop
Returns:
self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data.drop(col, axis=1, inplace=True)
return self
def bool_to_int(self, cols: list):
"""Transform bool into 1 and 0.
Args:
- cols (list): list of cols to transform
Returns:
Self
"""
if cols == None:
self._data.applymap(lambda x: 1 if x else 0)
else:
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data[col] = self._data[col].apply(lambda x: 1 if x else 0)
return self
# TODO: Salvar label encoder em pickle
def encode(self, cols: list):
"""Encode categorical vars into numeric ones.
Args:
- cols (list): list of columns to encode
Returns:
Self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data[col].fillna('N/A-ENC', inplace=True)
self._le[col] = LabelEncoder()
self._data[col] = self._le[col].fit_transform(self._data[col])
return self
def inverse_encode(self, cols: list):
"""Encode categorical vars into numeric ones.
Args:
- cols (list): list of columns to encode
Returns:
Self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data[col] = self._le[col].inverse_transform(self._data[col])
return self
def fill_null_with(self, val, cols=None):
"""Fill all null with a same value.
Args:
- val: can be `mean` to replace null with the mean of the columns
or any value to put in place of nulls.
- cols (list): list of columns or None to all dataframe
Returns:
self
"""
if cols == None:
self._data.fillna(val, inplace=True)
else:
cols = [c for c in cols if c in self._data.columns]
if isinstance(val, str):
if val == 'mean':
for col in cols:
self._data[col].fillna((self._data[col].mean()),
inplace=True)
else:
for col in cols:
self._data[col].fillna(val, inplace=True)
else:
for col in cols:
self._data[col].fillna(val, inplace=True)
return self
def dummify(self, columns: list, drop_first: bool = True):
"""Create dummies for selected columns
Args:
columns (list): list of columns to dummify
drop_first (bool, optional): select if the first class will be dropped. Defaults to True
Returns:
pd.DataFrame
"""
for col in columns:
dummy = pd.get_dummies(self._data[col], drop_first=drop_first)
self._data = pd.concat([self._data, dummy], axis=1)
self._data.drop(columns, axis=1, inplace=True)
return self
def col_2_time(self, cols: list):
"""Summary
Args:
cols (list): Description
Returns:
pd.DataFrame: Description
"""
for column in cols:
self._data[column] = pd.to_datetime(self._data[column])
return self
def time_2_float(self, cols: list):
"""Summary
Args:
cols (list): Description
Returns:
pd.DataFrame: Description
"""
for column in cols:
self._data[column] = self._data[column].apply(
lambda x: time.mktime(x.timetuple()))
return self
def scale(self, cols: list = None):
self._scaler = MinMaxScaler()
if cols == None:
cols = self._data.columns
else:
cols = [c for c in cols if c in self._data.columns]
self._data[cols] = self._scaler.fit_transform(self._data[cols])
return self
def inverse_scale(self, cols: list = None):
if cols == None:
cols = self._data.columns
else:
cols = [c for c in cols if c in self._data.columns]
self._data[cols] = self._scaler.inverse_transform(self._data[cols])
return self
# TODO:
# unit test: testar se modifica o dataframe original nos metodos acima,
# ou seja, criar assert para verificar se os objetos são diferentes mesmo com o mesmo valor
# Separar colunas numericas de colunas categóricas (describe separa colunas numéricas)
# aplicar por padrão a função dummify na lista de colunas categóricas
# aplicar por padrão a função scale nas colunas numéricas
# criar properties pra isso ???
# cria coluna baseado em calculo de outras ???
# preencher null com média de outras colunas
| 27.82963
| 107
| 0.553234
|
4a15ee77dc7d39c301ec2fe8c20b1ff3c7889f2c
| 7,252
|
py
|
Python
|
tests/test_user_model.py
|
StanZhouyu/stanblog
|
5ff202297f72d1c17bd5572c6ada7f30dee4bdc6
|
[
"MIT"
] | null | null | null |
tests/test_user_model.py
|
StanZhouyu/stanblog
|
5ff202297f72d1c17bd5572c6ada7f30dee4bdc6
|
[
"MIT"
] | 4
|
2020-03-24T15:48:27.000Z
|
2022-03-08T21:09:18.000Z
|
tests/test_user_model.py
|
StanZhouyu/stanblog
|
5ff202297f72d1c17bd5572c6ada7f30dee4bdc6
|
[
"MIT"
] | null | null | null |
import unittest
import time
from app import create_app, db
from app.models import User, AnonymousUser, Role, Permission, Follow
from datetime import datetime
class UserModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_setter(self):
u = User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password = 'cat')
with self.assertRaises(AttributeError):
u.password
def test_password_verification(self):
u = User(password = 'cat')
self.assertTrue(u.verify_password('cat'))
self.assertFalse(u.verify_password('dog'))
def test_password_salts_are_random(self):
u = User(password = 'cat')
u2 = User(password = 'cat')
self.assertTrue(u.password_hash != u2.password_hash)
def test_valid_confirmation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token()
self.assertTrue(u.confirm(token))
def test_invalid_confirmation_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_confirmation_token()
self.assertFalse(u2.confirm(token))
def test_expired_confimation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token(1)
time.sleep(2)
self.assertFalse(u.confirm(token))
def test_valid_reset_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_reset_token()
self.assertTrue(u.reset_password(token, 'dog'))
self.assertTrue(u.verify_password('dog'))
def test_invalid_reset_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_reset_token()
self.assertFalse(u2.reset_password(token, 'horse'))
self.assertTrue(u2.verify_password('dog'))
def test_valid_email_change_token(self):
u = User(email='john@example.com', password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_email_change_token('susan@example.org')
self.assertTrue(u.change_email(token))
self.assertTrue(u.email == 'susan@example.org')
def teset_invalid_email_change_token(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
token = u1.generate_email_change_token('david@example.net')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == 'susan@example.org')
def test_duplicate_email_change_token(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
token = u2.generate_email_change_token('john@example.com')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == 'susan@example.org')
def test_user_role(self):
u = User(email='john@example.com', password='cat')
self.assertTrue(u.can(Permission.FOLLOW))
self.assertTrue(u.can(Permission.COMMENT))
self.assertFalse(u.can(Permission.WRITE_ARTICLES))
self.assertFalse(u.can(Permission.MODERATE_COMMENTS))
self.assertFalse(u.can(Permission.ADMINISTER))
def test_moderator_role(self):
r = Role.query.filter_by(name='Moderator').first()
u = User(email='john@example.com', password='cat', role=r)
self.assertTrue(u.can(Permission.FOLLOW))
self.assertTrue(u.can(Permission.COMMENT))
self.assertFalse(u.can(Permission.WRITE_ARTICLES))
self.assertTrue(u.can(Permission.MODERATE_COMMENTS))
self.assertFalse(u.can(Permission.ADMINISTER))
def test_user_role(self):
r = Role.query.filter_by(name='Administrator').first()
u = User(email='john@example.com', password='cat', role=r)
self.assertTrue(u.can(Permission.FOLLOW))
self.assertTrue(u.can(Permission.COMMENT))
self.assertTrue(u.can(Permission.WRITE_ARTICLES))
self.assertTrue(u.can(Permission.MODERATE_COMMENTS))
self.assertTrue(u.can(Permission.ADMINISTER))
def test_anonymous_user(self):
u = AnonymousUser()
self.assertFalse(u.can(Permission.FOLLOW))
def test_timestamps(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
self.assertTrue((datetime.utcnow() - u.member_since).total_seconds() < 3)
self.assertTrue((datetime.utcnow() - u.last_seen).total_seconds() < 3)
def test_ping(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
time.sleep(2)
last_seen_before = u.last_seen
u.ping()
self.assertTrue(u.last_seen > last_seen_before)
def test_follows(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertFalse(u1.is_followed_by(u2))
u1.follow(u2)
db.session.add(u1)
db.session.commit()
self.assertTrue(u1.is_following(u2))
self.assertFalse(u1.is_followed_by(u2))
self.assertTrue(u2.is_followed_by(u1))
self.assertTrue(u1.followed.count() == 2)
self.assertTrue(u2.followers.count() == 2)
f = u1.followed.all()[-1]
self.assertTrue(f.followed == u2)
f = u2.followers.all()[0]
self.assertTrue(f.follower == u1)
u1.unfollow(u2)
db.session.add(u1)
db.session.commit()
self.assertTrue(u1.followed.count() == 1)
self.assertTrue(u2.followers.count() == 1)
self.assertTrue(Follow.query.count() == 2)
u2.follow(u1)
db.session.add(u1)
db.session.add(u2)
db.session.commit()
db.session.delete(u2)
db.session.commit()
self.assertTrue(Follow.query.count() == 1)
def test_to_json(self):
u = User(email='john@example.com', password='cat')
db.session.add(u)
db.session.commit()
json_user = u.to_json()
expected_keys = ['url', 'username', 'member_since', 'last_seen',
'posts', 'followed_comments', 'post_count']
self.assertEqual(sorted(json_user.keys()), sorted(expected_keys))
self.assertTrue('api/v1.0/users/' in json_user['url'])
| 37
| 81
| 0.627689
|
4a15ef0f77ec3b5606012acfb170ed75d80a68c7
| 11,843
|
py
|
Python
|
ros/src/waypoint_updater/waypoint_updater.py
|
greenfield932/CarND-Capstone
|
effa605590a2ebf6ef9e9d00815910718c3ec4f1
|
[
"MIT"
] | null | null | null |
ros/src/waypoint_updater/waypoint_updater.py
|
greenfield932/CarND-Capstone
|
effa605590a2ebf6ef9e9d00815910718c3ec4f1
|
[
"MIT"
] | null | null | null |
ros/src/waypoint_updater/waypoint_updater.py
|
greenfield932/CarND-Capstone
|
effa605590a2ebf6ef9e9d00815910718c3ec4f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
import math
from scipy.spatial import KDTree
from std_msgs.msg import Int32
import numpy as np
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
MAX_ACCEL = 2 #m/s^2
STOP_DIST_TRESHOLD = 3 #meters
#State machine states for performing start/stop/accel/deccel logic depending on traffic light
DRIVE_STATE_BREAK = "STATE_BREAK"
DRIVE_STATE_BREAKING = "STATE_BREAKING"
DRIVE_STATE_ACCEL = "STATE_ACCEL"
DRIVE_STATE_ACCELERATING = "STATE_ACCELERATING"
DRIVE_STATE_DRIVING = "STATE_DRIVING"
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater', log_level=rospy.DEBUG)
rospy.loginfo("WaypointUpdater init start")
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.accel_end_wp_idx = -1
# TODO: Add other member variables you need below
self.drive_state = DRIVE_STATE_BREAKING
self.red_light = False
self.closest_light_wp = -1
self.max_vel = None
self.loop()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints:
closest_waypoint_idx = self.get_closest_waypoint_idx()
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def out_of_accelerating(self):
current_wp_idx = self.get_closest_waypoint_idx()
return self.accel_end_wp_idx < current_wp_idx
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x,y], 1)[1]
#check is found point ahead or behind the vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x,y])
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self, closest_idx):
lane = Lane()
lane.header = self.base_waypoints.header
lane.waypoints = self.base_waypoints.waypoints[closest_idx:closest_idx + LOOKAHEAD_WPS]
if len(lane.waypoints) < 10 and closest_idx > 10:
for i in range(0, 10):
lane.waypoints.append(self.base_waypoints.waypoints[i])
self.final_waypoints_pub.publish(lane)
def pose_cb(self, msg):
self.pose = msg
self.process_state_machine()
pass
def waypoints_cb(self, waypoints):
if not self.waypoints_2d:
self.waypoints_2d = []
for waypoint in waypoints.waypoints:
self.waypoints_2d.append([waypoint.pose.pose.position.x, waypoint.pose.pose.position.y])
self.waypoint_tree = KDTree(self.waypoints_2d)
self.base_waypoints = waypoints
self.max_vel = 0.
for wp in waypoints.waypoints:
self.max_vel = max(self.max_vel, wp.twist.twist.linear.x)
for idx in range(0, len(self.base_waypoints.waypoints)):
self.set_waypoint_velocity(self.base_waypoints.waypoints, idx, 0)
#find out max velocity from loaded waypoints
rospy.loginfo("WU: max vel:" + str(self.max_vel))
#check if we need to start decelerating depending on light and distance to light point
def traffic_cb(self, msg):
light_wp_idx = msg.data
if self.base_waypoints is not None and self.pose is not None: # and light_wp_idx is not None and light_wp_idx!=-1:
if light_wp_idx is not None and light_wp_idx!=-1:
current_wp_idx = self.get_closest_waypoint_idx()
if current_wp_idx is not None and current_wp_idx !=-1:
dist = self.distance(self.base_waypoints.waypoints, current_wp_idx, light_wp_idx)
safe_dist = self.get_min_safe_break_distance(light_wp_idx)
if dist - STOP_DIST_TRESHOLD <= safe_dist:
self.red_light = True
self.closest_light_wp = light_wp_idx
rospy.loginfo("WU: Waiting for GREEN light to run:" + str(dist)+ " of safe " +str(safe_dist))
else:
self.red_light = False
rospy.loginfo("WU: Red light found but too far to stop: " + str(dist)+ " of safe " +str(safe_dist))
else:
self.red_light = False
rospy.loginfo("WU: No light WP found, update traffic light to GREEN")
#estimate distance for deceleration to target point with MAX_ACCEL aceleration
def get_min_safe_break_distance(self, target_idx):
current_wp_idx = self.get_closest_waypoint_idx()
current_vel = self.get_waypoint_velocity_by_idx(current_wp_idx)
rospy.loginfo("WU: current vel:"+str(current_vel))
#a = du/dt
#a = MAX_ACCEL
curr_idx = current_wp_idx
next_idx = current_wp_idx
start_idx = current_wp_idx
waypoints_count = len(self.base_waypoints.waypoints)
total_dist = 0
while current_vel > 0:
next_idx = curr_idx+1
if next_idx > waypoints_count-1:
next_idx = 0
if next_idx == start_idx:
#second lap, we can't safely break on such small track or such large speed or such small accel
return 0
L = self.distance(self.base_waypoints.waypoints, curr_idx, next_idx)
total_dist += L
t = L/current_vel #time for move from p0 to p1 with current constant speed
#assume we can immidiately change speed by the step equal to MAX_ACCEL between waypoints
#find speed in next point
#a = dV/dt
#a = (V2-V1)/t
#V2 = at + V1
#add minus for deceleration V2 = -at + V1
current_vel = -MAX_ACCEL*t + current_vel
curr_idx = next_idx
return total_dist
#update waypoints for acceleration from current position to target velocity
def accelerate(self):
rospy.loginfo("WU: accelerate")
current_wp_idx = self.get_closest_waypoint_idx()
current_vel = self.get_waypoint_velocity(self.base_waypoints.waypoints[current_wp_idx])
self.accel_start_wp_idx = current_wp_idx
next_vel = current_vel
if next_vel < 1:
next_vel = 1
waypoints_count = len(self.base_waypoints.waypoints)
for idx in range(current_wp_idx, waypoints_count):
dist = self.distance(self.base_waypoints.waypoints, idx -1, idx)
next_vel = next_vel + MAX_ACCEL * dist / next_vel
if next_vel >= self.max_vel:
next_vel = self.max_vel
#rospy.loginfo("max speed reached at idx:"+str(idx))
#else:
#rospy.loginfo("set vel to idx:"+str(idx) + " vel:" + str(next_vel))
self.set_waypoint_velocity(self.base_waypoints.waypoints, idx, next_vel)
for idx in range(0, current_wp_idx-1):
self.set_waypoint_velocity(self.base_waypoints.waypoints, idx, self.max_vel)
#update waypoints for deceleration from current position to target waypoint
def decelerate(self, target_wp_idx):
rospy.loginfo("WU: decelerate")
current_wp_idx = self.get_closest_waypoint_idx()
current_vel = self.get_waypoint_velocity_by_idx(current_wp_idx)
curr_idx = current_wp_idx
next_idx = current_wp_idx
start_idx = current_wp_idx
waypoints_count = len(self.base_waypoints.waypoints)
total_dist = 0
while current_vel > 0:
next_idx = curr_idx+1
if next_idx > waypoints_count-1:
next_idx = 0
if next_idx == start_idx:
#second lap, we can't safely break on such small track or such large speed or such small accel
return
L = self.distance(self.base_waypoints.waypoints, curr_idx, next_idx)
total_dist += L
t = L/current_vel #time for move from p0 to p1 with current constant speed
#assume we can immidiately change speed by the step equal to MAX_ACCEL between waypoints
#find speed in next point
#a = dV/dt
#a = (V2-V1)/t
#V2 = at + V1
#add minus for deceleration V2 = -at + V1
current_vel = -MAX_ACCEL*t + current_vel
if current_vel < 0:
current_vel = 0
self.set_waypoint_velocity(self.base_waypoints.waypoints, next_idx, current_vel)
curr_idx = next_idx
#state machine for start/stop logic
def process_state_machine(self):
prev_state = self.drive_state
if self.drive_state == DRIVE_STATE_ACCEL:
self.accelerate()
self.drive_state = DRIVE_STATE_DRIVING
elif self.drive_state == DRIVE_STATE_BREAK:
self.decelerate(self.closest_light_wp)
self.drive_state = DRIVE_STATE_BREAKING
elif self.drive_state == DRIVE_STATE_BREAKING and self.red_light == False:
self.drive_state = DRIVE_STATE_ACCEL
elif self.drive_state == DRIVE_STATE_DRIVING and self.red_light == True:
self.drive_state = DRIVE_STATE_BREAK
if prev_state!=self.drive_state:
rospy.loginfo("WU: state machine in: "+prev_state + " out: " + self.drive_state)
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def get_waypoint_velocity_by_idx(self, idx):
return self.base_waypoints.waypoints[idx].twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint_idx, velocity):
waypoints[waypoint_idx].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 40.697595
| 123
| 0.645276
|
4a15efe7e7ec2fa21f447b80b6509c643696011f
| 818
|
py
|
Python
|
emmet-api/emmet/api/routes/magnetism/resources.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
emmet-api/emmet/api/routes/magnetism/resources.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
emmet-api/emmet/api/routes/magnetism/resources.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from maggma.api.resource import ReadOnlyResource
from emmet.core.magnetism import MagnetismDoc
from maggma.api.query_operator import PaginationQuery, SortQuery, SparseFieldsQuery
from emmet.api.routes.magnetism.query_operators import MagneticQuery
from emmet.api.core.global_header import GlobalHeaderProcessor
def magnetism_resource(magnetism_store):
resource = ReadOnlyResource(
magnetism_store,
MagnetismDoc,
query_operators=[
MagneticQuery(),
SortQuery(),
PaginationQuery(),
SparseFieldsQuery(
MagnetismDoc, default_fields=["material_id", "last_updated"]
),
],
header_processor=GlobalHeaderProcessor(),
tags=["Magnetism"],
disable_validation=True,
)
return resource
| 30.296296
| 83
| 0.690709
|
4a15f00f46d57c144a0475f2eebb8d1f0a5f4937
| 1,139
|
py
|
Python
|
catalyst/contrib/data/__init__.py
|
tadejsv/catalyst
|
2553ce8fd7cecc025ad88819aea73faf8abb229b
|
[
"Apache-2.0"
] | 206
|
2018-10-05T19:16:47.000Z
|
2019-01-19T21:10:41.000Z
|
catalyst/contrib/data/__init__.py
|
tadejsv/catalyst
|
2553ce8fd7cecc025ad88819aea73faf8abb229b
|
[
"Apache-2.0"
] | 20
|
2018-10-07T06:30:49.000Z
|
2019-01-17T17:26:15.000Z
|
catalyst/contrib/data/__init__.py
|
tadejsv/catalyst
|
2553ce8fd7cecc025ad88819aea73faf8abb229b
|
[
"Apache-2.0"
] | 22
|
2018-10-06T12:34:08.000Z
|
2019-01-10T16:00:48.000Z
|
# flake8: noqa
from catalyst.settings import SETTINGS
from catalyst.contrib.data.collate_fn import FilteringCollateFn
from catalyst.contrib.data.dataset import (
ListDataset,
MergeDataset,
NumpyDataset,
PathsDataset,
)
from catalyst.contrib.data.dataset_ml import (
MetricLearningTrainDataset,
QueryGalleryDataset,
)
from catalyst.contrib.data.reader import (
IReader,
ScalarReader,
LambdaReader,
ReaderCompose,
)
from catalyst.contrib.data.sampler_inbatch import (
IInbatchTripletSampler,
InBatchTripletsSampler,
AllTripletsSampler,
HardTripletsSampler,
HardClusterSampler,
)
from catalyst.contrib.data.sampler import BalanceBatchSampler, DynamicBalanceClassSampler
from catalyst.contrib.data.transforms import (
image_to_tensor,
normalize_image,
Compose,
ImageToTensor,
NormalizeImage,
)
if SETTINGS.cv_required:
from catalyst.contrib.data.dataset_cv import ImageFolderDataset
from catalyst.contrib.data.reader_cv import ImageReader, MaskReader
# if SETTINGS.nifti_required:
# from catalyst.contrib.data.reader_nifti import NiftiReader
| 23.244898
| 89
| 0.778753
|
4a15f0f445fb75774b43831f0a0cab645e5abf88
| 43
|
py
|
Python
|
builder_engine/custom_components/metrics.py
|
DiablosWhisper/machine_learning_toolpack
|
3f4b82b549a3d70b95fc7a2c01959cd99d2b88b9
|
[
"Apache-2.0"
] | null | null | null |
builder_engine/custom_components/metrics.py
|
DiablosWhisper/machine_learning_toolpack
|
3f4b82b549a3d70b95fc7a2c01959cd99d2b88b9
|
[
"Apache-2.0"
] | null | null | null |
builder_engine/custom_components/metrics.py
|
DiablosWhisper/machine_learning_toolpack
|
3f4b82b549a3d70b95fc7a2c01959cd99d2b88b9
|
[
"Apache-2.0"
] | null | null | null |
from tensorflow.keras.metrics import Metric
| 43
| 43
| 0.883721
|
4a15f1b0b491567d960bc1489b0ee09302a0037c
| 5,326
|
py
|
Python
|
tests/test_binary_convert_read.py
|
yangboz/maro
|
0973783e55ca07bf8e177910c9d47854117a4ea8
|
[
"MIT"
] | 598
|
2020-09-23T00:50:22.000Z
|
2022-03-31T08:12:54.000Z
|
tests/test_binary_convert_read.py
|
gx9702/maro
|
38c796f0a7ed1e0f64c299d96c6e0df032401fa9
|
[
"MIT"
] | 235
|
2020-09-22T10:20:48.000Z
|
2022-03-31T02:10:03.000Z
|
tests/test_binary_convert_read.py
|
gx9702/maro
|
38c796f0a7ed1e0f64c299d96c6e0df032401fa9
|
[
"MIT"
] | 116
|
2020-09-22T09:19:04.000Z
|
2022-02-12T05:04:07.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT licence
import os
import tempfile
import unittest
from maro.data_lib import BinaryConverter, BinaryReader
from maro.data_lib.item_meta import BinaryMeta
class TestBinaryConverter(unittest.TestCase):
def test_convert_with_events(self):
out_dir = tempfile.mkdtemp()
out_bin = os.path.join(out_dir, "trips.bin")
meta_file = os.path.join("tests", "data", "data_lib", "case_1", "meta.yml")
csv_file = os.path.join("tests", "data", "data_lib", "trips.csv")
bct = BinaryConverter(out_bin, meta_file)
# add and convert 1st csv file
bct.add_csv(csv_file)
# add again will append to the end ignore the order
bct.add_csv(csv_file)
# flush will close the file, cannot add again
bct.flush()
# check if output exist
self.assertTrue(os.path.exists(out_bin))
# check content
reader = BinaryReader(out_bin)
# start tick should be smallest one
start_date = reader.start_datetime
self.assertEqual(start_date.year, 2019)
self.assertEqual(start_date.month, 1)
self.assertEqual(start_date.day, 1)
self.assertEqual(start_date.hour, 0)
self.assertEqual(start_date.minute, 0)
self.assertEqual(start_date.second, 0)
end_date = reader.end_datetime
self.assertEqual(end_date.year, 2019)
self.assertEqual(end_date.month, 1)
self.assertEqual(end_date.day, 1)
self.assertEqual(end_date.hour, 0)
self.assertEqual(end_date.minute, 5)
self.assertEqual(end_date.second, 0)
# there should be double items as trips.csv
self.assertEqual(4*2, reader.header.item_count)
# 20 byte
self.assertEqual(20, reader.header.item_size)
start_station_index = [0, 0, 1, 0]
idx = 0
# check iterating interface
for item in reader.items():
# check if fields same as meta
self.assertTupleEqual(('timestamp', 'durations', 'src_station', 'dest_station'), item._fields)
# check item start station index
self.assertEqual(start_station_index[idx % len(start_station_index)], item.src_station)
idx += 1
# check if filter works as expected
l = len([item for item in reader.items(end_time_offset=0, time_unit="m")])
# although there are 2 items that match the condition, but they not sorted, reader will not try to read to the end, but
# to the first item which not match the condition
self.assertEqual(1, l)
l = len([item for item in reader.items(start_time_offset=1, time_unit='m')])
# reader will try to read 1st one that > end tick, so there should be 6 items
self.assertEqual(6, l)
def test_convert_without_events(self):
out_dir = tempfile.mkdtemp()
out_bin = os.path.join(out_dir, "trips.bin")
meta_file = os.path.join("tests", "data", "data_lib", "case_2", "meta.yml")
csv_file = os.path.join("tests", "data", "data_lib", "trips.csv")
bct = BinaryConverter(out_bin, meta_file)
bct.add_csv(csv_file)
# flush will close the file, cannot add again
bct.flush()
reader = BinaryReader(out_bin)
meta: BinaryMeta = reader.meta
self.assertIsNotNone(meta)
# check events
self.assertListEqual(["require_bike", "return_bike", "rebalance_bike", "deliver_bike"], [event.display_name for event in meta.events])
self.assertListEqual(["RequireBike", "ReturnBike", "RebalanceBike", "DeliverBike"], [event.type_name for event in meta.events])
self.assertEqual("RequireBike", meta.default_event_name)
self.assertIsNone(meta.event_attr_name)
def test_convert_with_starttimestamp(self):
out_dir = tempfile.mkdtemp()
out_bin = os.path.join(out_dir, "trips.bin")
meta_file = os.path.join("tests", "data", "data_lib", "case_2", "meta.yml")
csv_file = os.path.join("tests", "data", "data_lib", "trips.csv")
#12/31/2018 @ 11:59pm (UTC)
bct = BinaryConverter(out_bin, meta_file, utc_start_timestamp=1546300740)
bct.add_csv(csv_file)
# flush will close the file, cannot add again
bct.flush()
reader = BinaryReader(out_bin)
# check header
self.assertEqual(1546300740, reader.header.starttime)
# then tick 0 will not be 2019/01/01 00:00:00
l = len([item for item in reader.items(end_time_offset=0, time_unit='m')])
self.assertEqual(0, l)
# it should be tick 1 for now
l = len([item for item in reader.items(end_time_offset=1, time_unit='m')])
self.assertEqual(1, l)
def test_convert_without_meta_timestamp(self):
out_dir = tempfile.mkdtemp()
out_bin = os.path.join(out_dir, "trips.bin")
meta_file = os.path.join("tests", "data", "data_lib", "case_3", "meta.yml")
csv_file = os.path.join("tests", "data", "data_lib", "trips.csv")
#12/31/2018 @ 11:59pm (UTC)
with self.assertRaises(Exception) as ctx:
bct = BinaryConverter(out_bin, meta_file)
if __name__ == "__main__":
unittest.main()
| 31.892216
| 142
| 0.639317
|
4a15f1c928a3e9fb3ac3098185f586f64e60561d
| 2,175
|
py
|
Python
|
tests/functional_tests/util_resources.py
|
sherkitty/main
|
2cbcb6597c0ff6b95c9fa9cd43b6815649ae1b64
|
[
"MIT"
] | 1
|
2021-10-30T10:36:18.000Z
|
2021-10-30T10:36:18.000Z
|
tests/functional_tests/util_resources.py
|
sherkitty/main
|
2cbcb6597c0ff6b95c9fa9cd43b6815649ae1b64
|
[
"MIT"
] | null | null | null |
tests/functional_tests/util_resources.py
|
sherkitty/main
|
2cbcb6597c0ff6b95c9fa9cd43b6815649ae1b64
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2021 The Sherkitty Project
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Help determine how much CPU power is available at the given time
by running numerical calculations
"""
from __future__ import print_function
import subprocess
import psutil
def available_ram_gb():
ram_bytes = psutil.virtual_memory().available
kilo = 1024.0
ram_gb = ram_bytes / kilo**3
return ram_gb
def get_time_pi_seconds(cores, app_dir='.'):
app_path = '{}/cpu_power_test'.format(app_dir)
time_calc = subprocess.check_output([app_path, str(cores)])
decoded = time_calc.decode('utf-8')
miliseconds = int(decoded)
return miliseconds / 1000.0
| 41.037736
| 89
| 0.765057
|
4a15f1d5aeff7592250c05b4c7a4f6b14aba366a
| 2,029
|
py
|
Python
|
src/__init__.py
|
fulcrum-rocks/w3af-ci-scan
|
98fd3ab75739bd77b5a0314aa4e4e8022b94e5c1
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
fulcrum-rocks/w3af-ci-scan
|
98fd3ab75739bd77b5a0314aa4e4e8022b94e5c1
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
fulcrum-rocks/w3af-ci-scan
|
98fd3ab75739bd77b5a0314aa4e4e8022b94e5c1
|
[
"MIT"
] | null | null | null |
import subprocess
import os
def getFormatedConfig(url):
result = """
#Configure HTTP settings
http-settings
set timeout 30
back
#Configure scanner global behaviors
misc-settings
set max_discovery_time 20
set fuzz_cookies True
set fuzz_form_files True
set fuzz_url_parts True
set fuzz_url_filenames True
back
plugins
#Configure entry point (CRAWLING) scanner
crawl web_spider
crawl config web_spider
set only_forward False
set ignore_regex (?i)(logout|disconnect|signout|exit)+
back
#Configure vulnerability scanners
##Specify list of AUDIT plugins type to use
audit cors_origin, response_splitting, xpath, xss, xst
##Customize behavior of each audit plugin when needed
audit config file_upload
set extensions jsp,asp,aspx,pl,cfm,rb,py,sh,ksh,csh,bat,ps,exe
back
##Specify list of GREP plugins type to use (grep plugin is a type of
#plugin
#that can find also vulnerabilities or informations disclosure)
grep analyze_cookies, click_jacking, cross_domain_js, directory_indexing, dom_xss, error_500, error_pages, html_comments, strange_headers, strange_http_codes, xss_protection_header
#Specify list of INFRASTRUCTURE plugins type to use (infrastructure
#plugins
#is a type of plugin that can find informations disclosure)
#infrastructure server_header, server_status, domain_dot, dot_net_errors
#back
#Configure reporting in order to generate an HTML report
output console, xml_file
output config xml_file
set output_file /W3afReport.xml
set verbose True
back
output config console
set verbose True
back
back
#Set target informations, do a cleanup and run the scan
target
set target {0}
back
cleanup
start
exit
""".format(url)
return result
def process(template):
with open("script.w3af", "w") as text_file:
text_file.write(template)
p = subprocess.run(["./w3af_console", "-s", "script.w3af"], stdout=subprocess.PIPE,
input='y\n', encoding='ascii')
subprocess.call(["cat", "/W3afReport.xml"])
| 26.012821
| 182
| 0.755545
|
4a15f429d8063cb5824490e339b34e7092ccf450
| 9,698
|
py
|
Python
|
scrub/tools/coverity/get_coverity_warnings.py
|
ablack-jpl/scrub
|
46739b4a82eab7c37e7f02cf9d537c3a58d40e01
|
[
"Apache-2.0"
] | null | null | null |
scrub/tools/coverity/get_coverity_warnings.py
|
ablack-jpl/scrub
|
46739b4a82eab7c37e7f02cf9d537c3a58d40e01
|
[
"Apache-2.0"
] | null | null | null |
scrub/tools/coverity/get_coverity_warnings.py
|
ablack-jpl/scrub
|
46739b4a82eab7c37e7f02cf9d537c3a58d40e01
|
[
"Apache-2.0"
] | null | null | null |
import re
import os
import logging
from distutils.version import StrictVersion
WARNING_LEVEL = 'Low'
ID_PREFIX = 'coverity'
def get_error_indices(raw_input_file):
"""This function gets the indices of the first line of all Coverity warnings.
Inputs:
- raw_input_file: Full path to the file containing raw Coverity warnings [string]
Outputs:
- error_indices: List of warning indices [list of int]
"""
# Initialize variables
error_indices = []
# Import the input data file
with open(raw_input_file, 'r') as input_fh:
input_data = input_fh.readlines()
# Iterate through every line of the input file
for i in range(0, len(input_data)):
# Get the line
line = input_data[i].strip()
if ('Error:' in line) or ('Checker:' in line) or ('Type:' in line):
error_indices.append(i)
return error_indices
def parse_warnings_2019_12(raw_input_file, parsed_output_file):
"""This function parses the raw Coverity warnings (version 2019.12) into the SCRUB format.
Inputs:
- input_file: Absolute path to the file containing raw Coverity warnings [string]
- parsed_output_file: Absolute path to the file where the parsed warnings will be stored [string]
"""
# Print status message
logging.info('')
logging.info('\t>> Executing command: get_coverity_warnings.parse_warnings_2019_12(%s, %s)',
raw_input_file, parsed_output_file)
logging.info('\t>> From directory: %s', os.getcwd())
# Import the input data file
with open(raw_input_file, 'r') as input_fh:
input_data = input_fh.readlines()
# Create the output file
with open(parsed_output_file, 'w+') as output_fh:
# Iterate through every line of the input file
error_indices = get_error_indices(raw_input_file)
# Iterate through every line of the input file and parse warnings
for i in range(0, len(error_indices)):
# Initialize variables
warning_text = []
# Get the index line
error_index = error_indices[i]
# Get the name of the warnings
warning_name = list(filter(None, re.split('[()]', input_data[error_index].strip())))[-1].strip()
# Get the location information
line = input_data[error_index - 1].strip()
line_split = list(filter(None, re.split(':', line)))
warning_file = line_split[-2]
warning_line = int(line_split[-1])
# Increment the warning count
warning_count = i + 1
# Get the warning text
if i < len(error_indices) - 1:
warning_index_end = error_indices[i + 1] - 2
else:
warning_index_end = len(input_data)
for j in range(error_index + 1, warning_index_end):
# Add the line ot the list, if it's not blank
if not input_data[j].strip() == '':
warning_text.append(input_data[j].strip())
# Write the data to the output file
output_fh.write('%s%03d <%s> :%s:%d: %s\n' % (ID_PREFIX, warning_count, WARNING_LEVEL, warning_file,
warning_line, warning_name))
for line in warning_text:
output_fh.write(' %s\n' % line)
output_fh.write('\n')
# Change the permissions of the output file
os.chmod(parsed_output_file, 438)
def parse_warnings_2019_06(raw_input_file, parsed_output_file):
"""This function parses the raw Coverity warnings (version 2019.06) into the SCRUB format.
Inputs:
- raw_input_file: Full path to the file containing raw Coverity warnings [string]
- parsed_output_file: Full path to the file where the parsed warnings will be stored [string]
"""
# Print status message
logging.info('')
logging.info('\tParsing results...')
logging.info('\t>> Executing command: get_coverity_warnings.parse_warnings_2019_06(%s, %s)',
raw_input_file, parsed_output_file)
logging.info('\t>> From directory: %s', os.getcwd())
# Import the input data file
with open(raw_input_file, 'r') as input_fh:
input_data = input_fh.readlines()
# Create the output file
with open(parsed_output_file, 'w+') as output_fh:
# Iterate through every line of the input file
error_indices = get_error_indices(raw_input_file)
# Iterate through every line of the input file and parse warnings
for i in range(0, len(error_indices)):
# Initialize variables
warning_text = []
# Get the index line
error_index = error_indices[i]
# Get the name of the warnings
warning_name = list(filter(None, re.split(':', input_data[error_index].strip())))[-1].strip()
# Get the location information
line = input_data[error_index - 1].strip()
line_split = list(filter(None, re.split(':', line)))
warning_file = line_split[-2]
warning_line = int(line_split[-1])
# Increment the warning count
warning_count = i + 1
# Get the warning text
if i < len(error_indices) - 1:
warning_index_end = error_indices[i + 1] - 2
else:
warning_index_end = len(input_data)
for j in range(error_index + 1, warning_index_end):
# Add the line ot the list, if it's not blank
if not input_data[j].strip() == '':
warning_text.append(input_data[j].strip())
# Write the data to the output file
output_fh.write('%s%03d <%s> :%s:%d: %s\n' % (ID_PREFIX, warning_count, WARNING_LEVEL, warning_file,
warning_line, warning_name))
for line in warning_text:
output_fh.write(' %s\n' % line)
output_fh.write('\n')
# Change the permissions of the output file
os.chmod(parsed_output_file, 438)
def parse_warnings_legacy(raw_input_file, parsed_output_file):
"""This function parses the raw Coverity warnings (version 2018.09 and older) into the SCRUB format.
Inputs:
- raw_input_file: Full path to the file containing raw Coverity warnings [string]
- parsed_output_file: Full path to the file where the parsed warnings will be stored [string]
"""
# Print status message
logging.info('')
logging.info('\tParsing results...')
logging.info('\t>> Executing command: get_coverity_warnings.parse_warnings_legacy(%s, %s)',
raw_input_file, parsed_output_file)
logging.info('\t>> From directory: %s', os.getcwd())
# Import the input data file
with open(raw_input_file, 'r') as input_fh:
input_data = input_fh.readlines()
# Create the output file
with open(parsed_output_file, 'w+') as output_fh:
# Iterate through every line of the input file
error_indices = get_error_indices(raw_input_file)
# Iterate through every line of the input file and parse warnings
for i in range(0, len(error_indices)):
# Initialize variables
warning_text = []
# Get the index line
error_index = error_indices[i]
# Get the name of the warnings
warning_name = list(filter(None, re.split(':', input_data[error_index].strip())))[-1].strip()
# Get the location information
line = input_data[error_index + 1].strip()
line_split = list(filter(None, re.split(':', line)))
warning_file = line_split[-2]
warning_line = int(line_split[-1])
# Increment the warning count
warning_count = i + 1
# Get the warning text
if i < len(error_indices)-1:
warning_index_end = error_indices[i+1]-1
else:
warning_index_end = len(input_data)
for j in range(error_index+1, warning_index_end):
# Add the line ot the list
warning_text.append(input_data[j].strip())
# Write the data to the output file
output_fh.write('%s%03d <%s> :%s:%d: %s\n' % (ID_PREFIX, warning_count, WARNING_LEVEL, warning_file,
warning_line, warning_name))
for line in warning_text:
output_fh.write(' %s\n' % line)
output_fh.write('\n')
# Change the permissions of the output file
os.chmod(parsed_output_file, 438)
def parse_warnings(raw_input_file, parsed_output_file, coverity_version_number):
"""This function will examine the raw_input_file to determine which parser will be used.
Inputs:
- raw_input_file: Full path to the file containing raw Coverity warnings [string]
- parsed_output_file: Full path to the file where the parsed warnings will be stored [string]
- version_number: Version number for Coverity instance being used [string]
"""
# Select which parser should be used
if StrictVersion(coverity_version_number) >= StrictVersion('2019.12'):
parse_warnings_2019_12(raw_input_file, parsed_output_file)
elif (StrictVersion(coverity_version_number) >= StrictVersion('2019.06')) and \
(StrictVersion(coverity_version_number) < StrictVersion('2019.12')):
parse_warnings_2019_06(raw_input_file, parsed_output_file)
else:
parse_warnings_legacy(raw_input_file, parsed_output_file)
| 38.63745
| 112
| 0.619612
|
4a15f59e01edcf65305feae60cde2c3a85a06613
| 9,114
|
py
|
Python
|
Greengrass Core/trialteration_core.py
|
simformsolutions/Beacon-positioning-using-aws-greengrass
|
c82cb3f266dbc9cfdbe17f8bcbd27807a3162e15
|
[
"Apache-2.0"
] | 1
|
2019-04-05T07:16:05.000Z
|
2019-04-05T07:16:05.000Z
|
Greengrass Core/trialteration_core.py
|
simformsolutions/Beacon-positioning-using-aws-greengrass
|
c82cb3f266dbc9cfdbe17f8bcbd27807a3162e15
|
[
"Apache-2.0"
] | null | null | null |
Greengrass Core/trialteration_core.py
|
simformsolutions/Beacon-positioning-using-aws-greengrass
|
c82cb3f266dbc9cfdbe17f8bcbd27807a3162e15
|
[
"Apache-2.0"
] | null | null | null |
#python basicdiscovery.py --endpoint a3drj1nn7u6229.iot.us-east-1.amazonaws.com --rootCA root-ca-cert.pem --cert c511834aea.cert.pem --key c511834aea.private.key --thingName rpi2 --topic 'hello/world/send' --mode both
import os
import sys
import time
import uuid
import json
import logging
import argparse
from AWSIoTPythonSDK.core.greengrass.discovery.providers import DiscoveryInfoProvider
from AWSIoTPythonSDK.core.protocol.connection.cores import ProgressiveBackOffCore
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryInvalidRequestException
AllowedActions = ['both', 'publish', 'subscribe']
x = 0
y = 0
# General message notification callback
def customOnMessage(message):
print('Received message on topic %s: %s\n' % (message.topic, message.payload)) # recieved beacon distance message
data =json.loads(message.payload)
if data['message'] == "rpi3": # parse distance value and write to file from recieved message from rpi3 thing
distance3 = data['distance3']
f = open("rpi3.txt","w+r") # open rpi3.txt file
f.truncate() # remove all previous content
f.write(str(float(distance3))) # write distance value to file
f.close() # close file
print (distance3)
if data['message'] == "rpi1": # parse distance value and write to file from recieved message from rpi1 thing
distance1 = data['distance1']
f = open("rpi1.txt","w+r") #open rpi1.txt file
f.truncate() # remove all previous content
f.write(str(float(distance1))) # write distance value to file
f.close() #close file
print (distance1)
if data['message'] == "rpi4": # parse distance value and write to file from recieved message from rpi4 thing
distance4 = data['distance4']
f = open("rpi4.txt","w+r") # open rpi4.txt file
f.truncate() # remove all previous content
f.write(str(float(distance4))) # write distance value to file
f.close() # close file
print (distance4)
# Read three raspberrypi beacon distance from file
f = open("rpi4.txt","r")
dis3 = f.read()
d3 = float(dis3) # rpi4 thing distance value
print ("d3="+str(float(d3)))
f.close()
f = open("rpi3.txt","r")
dis2 = f.read()
d2 = float(dis2) # rpi2 thing distance value
print ("d2="+str(float(d2)))
f.close()
f = open("rpi1.txt","r")
dis1 = f.read()
d1 = float(dis1) # rpi1 thing distance value
print ("d1="+str(float(d1)))
f.close()
#Trilateration Formula
x1 = 0
y1 = 0
x2 = 4.2
y2 = 0
x3 = 2.4
y3 = 1.8
R1 = (x1,y1)
R2 = (x2,y2)
R3 = (x3,y3)
# if d1 ,d2 and d3 in known
# calculate A ,B and C coifficents
A = R1[0]**2 + R1[1]**2 - d1**2
B = R2[0]**2 + R2[1]**2 - d2**2
C = R3[0]**2 + R3[1]**2 - d3**2
X32 = R3[0] - R2[0]
X13 = R1[0] - R3[0]
X21 = R2[0] - R1[0]
Y32 = R3[1] - R2[1]
Y13 = R1[1] - R3[1]
Y21 = R2[1] - R1[1]
# calculate beacon position cordinates
global x
x = (A * Y32 + B * Y13 + C * Y21)/(2.0*(R1[0]*Y32 + R2[0]*Y13 + R3[0]*Y21))
global y
y = (A * X32 + B * X13 + C * X21)/(2.0*(R1[1]*X32 + R2[1]*X13 + R3[1]*X21))
MAX_DISCOVERY_RETRIES = 10
GROUP_CA_PATH = "./groupCA/"
# Read in command-line parameters
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="Your AWS IoT custom endpoint")
parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path")
parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="Certificate file path")
parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path")
parser.add_argument("-n", "--thingName", action="store", dest="thingName", default="Bot", help="Targeted thing name")
parser.add_argument("-t", "--topic", action="store", dest="topic", default="sdk/test/Python", help="Targeted topic")
parser.add_argument("-m", "--mode", action="store", dest="mode", default="both",
help="Operation modes: %s"%str(AllowedActions))
parser.add_argument("-M", "--message", action="store", dest="message", default="Hello World!",
help="Message to publish")
args = parser.parse_args()
host = args.host
rootCAPath = args.rootCAPath
certificatePath = args.certificatePath
privateKeyPath = args.privateKeyPath
clientId = args.thingName
thingName = args.thingName
topic = args.topic
if args.mode not in AllowedActions:
parser.error("Unknown --mode option %s. Must be one of %s" % (args.mode, str(AllowedActions)))
exit(2)
if not args.certificatePath or not args.privateKeyPath:
parser.error("Missing credentials for authentication.")
exit(2)
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Progressive back off core
backOffCore = ProgressiveBackOffCore()
# Discover GGCs
discoveryInfoProvider = DiscoveryInfoProvider()
discoveryInfoProvider.configureEndpoint(host)
discoveryInfoProvider.configureCredentials(rootCAPath, certificatePath, privateKeyPath)
discoveryInfoProvider.configureTimeout(10) # 10 sec
retryCount = MAX_DISCOVERY_RETRIES
discovered = False
groupCA = None
coreInfo = None
while retryCount != 0:
try:
discoveryInfo = discoveryInfoProvider.discover(thingName)
caList = discoveryInfo.getAllCas()
coreList = discoveryInfo.getAllCores()
# We only pick the first ca and core info
groupId, ca = caList[0]
coreInfo = coreList[0]
print("Discovered GGC: %s from Group: %s" % (coreInfo.coreThingArn, groupId))
print("Now we persist the connectivity/identity information...")
groupCA = GROUP_CA_PATH + groupId + "_CA_" + str(uuid.uuid4()) + ".crt"
if not os.path.exists(GROUP_CA_PATH):
os.makedirs(GROUP_CA_PATH)
groupCAFile = open(groupCA, "w")
groupCAFile.write(ca)
groupCAFile.close()
discovered = True
print("Now proceed to the connecting flow...")
break
except DiscoveryInvalidRequestException as e:
print("Invalid discovery request detected!")
print("Type: %s" % str(type(e)))
print("Error message: %s" % e.message)
print("Stopping...")
break
except BaseException as e:
print("Error in discovery!")
print("Type: %s" % str(type(e)))
print("Error message: %s" % e.message)
retryCount -= 1
print("\n%d/%d retries left\n" % (retryCount, MAX_DISCOVERY_RETRIES))
print("Backing off...\n")
backOffCore.backOff()
if not discovered:
print("Discovery failed after %d retries. Exiting...\n" % (MAX_DISCOVERY_RETRIES))
sys.exit(-1)
# Iterate through all connection options for the core and use the first successful one
myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId)
myAWSIoTMQTTClient.configureCredentials(groupCA, privateKeyPath, certificatePath)
myAWSIoTMQTTClient.onMessage = customOnMessage
connected = False
for connectivityInfo in coreInfo.connectivityInfoList:
currentHost = connectivityInfo.host
currentPort = connectivityInfo.port
print("Trying to connect to core at %s:%d" % (currentHost, currentPort))
myAWSIoTMQTTClient.configureEndpoint(currentHost, currentPort)
try:
myAWSIoTMQTTClient.connect()
connected = True
break
except BaseException as e:
print("Error in connect!")
print("Type: %s" % str(type(e)))
print("Error message: %s" % e.message)
if not connected:
print("Cannot connect to core %s. Exiting..." % coreInfo.coreThingArn)
sys.exit(-2)
# Successfully connected to the core
if args.mode == 'both' or args.mode == 'subscribe':
myAWSIoTMQTTClient.subscribe(topic, 0, None)
time.sleep(2)
loopCount = 0
while True:
if args.mode == 'both' or args.mode == 'publish':
print('x=%d, y=%d' %(x,y))
if x != 0 and y != 0:
#publish Trilateration cordinates
message = {}
message['x'] = str(x)
message['y'] = str(y)
messageJson = json.dumps(message)
myAWSIoTMQTTClient.publish("hello/world/position", messageJson, 0)
if x > 2 and y > 2:
#Trigger Lambda function
messagee = {}
messagee['message'] = "Hello from AWS IoT console"
messageJsonn = json.dumps(messagee)
print (messageJsonn)
myAWSIoTMQTTClient.publish("hello/world/position/trigger", messageJsonn, 0)
global x
x = 0
global y
y = 0
if args.mode == 'both':
print('Published topic %s: %s\n' % ("hello/world/position", messageJson))
# loopCount += 1
time.sleep(1)
| 37.04878
| 217
| 0.651854
|
4a15f67f195a0429a9bee2e33b198faefbae42d2
| 141
|
py
|
Python
|
Leetcode/2001-3000/2177. Find Three Consecutive Integers That Sum to a Given Number/2177.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/2001-3000/2177. Find Three Consecutive Integers That Sum to a Given Number/2177.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/2001-3000/2177. Find Three Consecutive Integers That Sum to a Given Number/2177.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
class Solution:
def sumOfThree(self, num: int) -> List[int]:
if num % 3:
return []
x = num // 3
return [x - 1, x, x + 1]
| 20.142857
| 46
| 0.503546
|
4a15f76da7405d7fb6d0584362a438b886bf9001
| 3,121
|
py
|
Python
|
gui/tests/test_numpad.py
|
a-bombarda/mvm-gui
|
e00c3fe39cf25c6fb2d2725891610da8885d1d76
|
[
"MIT"
] | null | null | null |
gui/tests/test_numpad.py
|
a-bombarda/mvm-gui
|
e00c3fe39cf25c6fb2d2725891610da8885d1d76
|
[
"MIT"
] | null | null | null |
gui/tests/test_numpad.py
|
a-bombarda/mvm-gui
|
e00c3fe39cf25c6fb2d2725891610da8885d1d76
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# from pytestqt import qt_compat
from pytestqt.qt_compat import qt_api
import pytest
import time
from .mvm_basics import *
from mainwindow import MainWindow
from numpad.numpad import NumPad
from PyQt5.QtCore import QCoreApplication
"""
TH01
"""
def test_createNumPad(qtbot):
'''
Test the creation of the NumPad instance
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
window = MainWindow(config, esp32)
qtbot.addWidget(window)
pad = NumPad(window)
assert pad is not None
time.sleep(0.5)
def checkCode():
print("Ok")
assert True
"""
TH02
"""
def test_codeNumPad(qtbot):
'''
Test the assignment and the comparison of the code
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
window = MainWindow(config, esp32)
qtbot.addWidget(window)
pad = NumPad(window)
pad.assign_code("1234", checkCode)
assert pad.func is not None
# Check that the code is correctly set
assert pad.code == [1,2,3,4]
# Try to set the code
pad.input_number(1)
pad.input_number(2)
pad.input_number(3)
pad.input_number(4)
pad.check_code()
assert pad.code == [1, 2, 3, 4]
time.sleep(0.5)
"""
TS01: Security Requirement - 1
"""
def test_lockTheScreen(qtbot):
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
window = MainWindow(config, esp32)
window.show()
qtbot.addWidget(window)
# Click on the menù button
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
# Click on the settings button
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsbar
# Click on the lock screen button
qtbot.mouseClick(window.button_lockscreen, QtCore.Qt.LeftButton)
# Check if all the elements in the gui are locked
assert window.toppane.isEnabled() == False
assert window.home_button.currentWidget() == window.goto_unlock
window.close()
"""
TH06
"""
def test_unlockTheScreen(qtbot):
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
window = MainWindow(config, esp32)
qtbot.addWidget(window)
# Click on the menù button
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
# Click on the settings button
qtbot.mouseClick(window.button_start_settings, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsbar
# Click on the lock screen button
qtbot.mouseClick(window.button_lockscreen, QtCore.Qt.LeftButton)
# Check if all the elements in the gui are locked
assert window.toppane.isEnabled() == False
assert window.home_button.currentWidget() == window.goto_unlock
# Unlock the screen
window.unlock_screen()
assert window.toppane.isEnabled() == True
window.close()
| 24.574803
| 72
| 0.707466
|
4a15f7bb59f4c659e7e9dd4bfbef8b1d8367a93d
| 8,160
|
py
|
Python
|
google-cloud-firestore/synth.py
|
naveed-ahmad/google-cloud-ruby
|
ec86e413a157e09ee0ff1080468dd75556d0908f
|
[
"Apache-2.0"
] | 1
|
2021-01-02T05:11:13.000Z
|
2021-01-02T05:11:13.000Z
|
google-cloud-firestore/synth.py
|
naveed-ahmad/google-cloud-ruby
|
ec86e413a157e09ee0ff1080468dd75556d0908f
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-firestore/synth.py
|
naveed-ahmad/google-cloud-ruby
|
ec86e413a157e09ee0ff1080468dd75556d0908f
|
[
"Apache-2.0"
] | 2
|
2019-10-14T17:26:31.000Z
|
2019-10-16T03:38:26.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import logging
import re
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v1_library = gapic.ruby_library(
'firestore', 'v1',
config_path='/google/firestore/artman_firestore_v1.yaml',
artman_output_name='google-cloud-ruby/google-cloud-firestore'
)
s.copy(v1_library / 'lib/google/cloud/firestore/v1')
s.copy(v1_library / 'lib/google/cloud/firestore/v1.rb')
s.copy(v1_library / 'lib/google/firestore/v1')
s.copy(v1_library / 'test/google/cloud/firestore/v1')
v1beta1_library = gapic.ruby_library(
'firestore', 'v1beta1',
config_path='/google/firestore/artman_firestore.yaml',
artman_output_name='google-cloud-ruby/google-cloud-firestore'
)
s.copy(v1beta1_library / 'lib/google/cloud/firestore/v1beta1')
s.copy(v1beta1_library / 'lib/google/cloud/firestore/v1beta1.rb')
s.copy(v1beta1_library / 'lib/google/firestore/v1beta1')
s.copy(v1beta1_library / 'test/google/cloud/firestore/v1beta1')
admin_v1_library = gapic.ruby_library(
'firestore-admin', 'v1',
config_path='/google/firestore/admin/artman_firestore_v1.yaml',
artman_output_name='google-cloud-ruby/google-cloud-firestore_admin'
)
s.copy(admin_v1_library / 'lib/google/cloud/firestore/admin.rb')
s.copy(admin_v1_library / 'lib/google/cloud/firestore/admin/v1')
s.copy(admin_v1_library / 'lib/google/cloud/firestore/admin/v1.rb')
s.copy(admin_v1_library / 'lib/google/firestore/admin/v1')
s.copy(admin_v1_library / 'test/google/cloud/firestore/admin/v1')
# PERMANENT: Handwritten layer owns Firestore.new so low-level clients need to
# use Firestore::V1beta1.new instead of Firestore.new(version: :v1beta1).
# Update the examples and tests.
s.replace(
[
'lib/google/cloud/firestore/v1beta1/firestore_client.rb',
'test/google/cloud/firestore/v1beta1/firestore_client_test.rb'
],
'require "google/cloud/firestore"',
'require "google/cloud/firestore/v1beta1"')
s.replace(
[
'lib/google/cloud/firestore/v1beta1/firestore_client.rb',
'test/google/cloud/firestore/v1beta1/firestore_client_test.rb'
],
'Google::Cloud::Firestore\\.new\\(version: :v1beta1\\)',
'Google::Cloud::Firestore::V1beta1.new')
s.replace(
[
'lib/google/cloud/firestore/v1/firestore_client.rb',
'test/google/cloud/firestore/v1/firestore_client_test.rb'
],
'require "google/cloud/firestore"',
'require "google/cloud/firestore/v1"')
s.replace(
[
'lib/google/cloud/firestore/v1/firestore_client.rb',
'test/google/cloud/firestore/v1/firestore_client_test.rb'
],
'Google::Cloud::Firestore\\.new\\(version: :v1\\)',
'Google::Cloud::Firestore::V1.new')
s.replace(
[
'lib/google/cloud/firestore/v1/firestore_admin_client.rb',
'test/google/cloud/firestore/v1/firestore_admin_client_test.rb'
],
'require "google/cloud/firestore"',
'require "google/cloud/firestore/v1"')
s.replace(
[
'lib/google/cloud/firestore/v1/firestore_admin_client.rb',
'test/google/cloud/firestore/v1/firestore_admin_client_test.rb'
],
'Google::Cloud::Firestore\\.new\\(version: :v1\\)',
'Google::Cloud::Firestore::V1::FirestoreAdminClient.new')
# Support for service_address
s.replace(
[
'lib/google/cloud/firestore/v*.rb',
'lib/google/cloud/firestore/v*/*_client.rb',
'lib/google/cloud/firestore/admin/v*.rb',
'lib/google/cloud/firestore/admin/v*/*_client.rb'
],
'\n(\\s+)#(\\s+)@param exception_transformer',
'\n\\1#\\2@param service_address [String]\n' +
'\\1#\\2 Override for the service hostname, or `nil` to leave as the default.\n' +
'\\1#\\2@param service_port [Integer]\n' +
'\\1#\\2 Override for the service port, or `nil` to leave as the default.\n' +
'\\1#\\2@param exception_transformer'
)
s.replace(
[
'lib/google/cloud/firestore/v*.rb',
'lib/google/cloud/firestore/v*/*_client.rb',
'lib/google/cloud/firestore/admin/v*.rb',
'lib/google/cloud/firestore/admin/v*/*_client.rb'
],
'\n(\\s+)metadata: nil,\n\\s+exception_transformer: nil,\n',
'\n\\1metadata: nil,\n\\1service_address: nil,\n\\1service_port: nil,\n\\1exception_transformer: nil,\n'
)
s.replace(
[
'lib/google/cloud/firestore/v*.rb',
'lib/google/cloud/firestore/v*/*_client.rb',
'lib/google/cloud/firestore/admin/v*.rb',
'lib/google/cloud/firestore/admin/v*/*_client.rb'
],
',\n(\\s+)lib_name: lib_name,\n\\s+lib_version: lib_version',
',\n\\1lib_name: lib_name,\n\\1service_address: service_address,\n\\1service_port: service_port,\n\\1lib_version: lib_version'
)
s.replace(
[
'lib/google/cloud/firestore/v*/*_client.rb',
'lib/google/cloud/firestore/admin/v*/*_client.rb'
],
'service_path = self\\.class::SERVICE_ADDRESS',
'service_path = service_address || self.class::SERVICE_ADDRESS'
)
s.replace(
[
'lib/google/cloud/firestore/v*/*_client.rb',
'lib/google/cloud/firestore/admin/v*/*_client.rb'
],
'port = self\\.class::DEFAULT_SERVICE_PORT',
'port = service_port || self.class::DEFAULT_SERVICE_PORT'
)
# https://github.com/googleapis/gapic-generator/issues/2242
def escape_braces(match):
expr = re.compile('^([^`]*(`[^`]*`[^`]*)*)([^`#\\$\\\\])\\{([\\w,]+)\\}')
content = match.group(0)
while True:
content, count = expr.subn('\\1\\3\\\\\\\\{\\4}', content)
if count == 0:
return content
s.replace(
[
'lib/google/cloud/firestore/v1*/**/*.rb',
'lib/google/cloud/firestore/admin/v1*/**/*.rb'
],
'\n\\s+#[^\n]*[^\n#\\$\\\\]\\{[\\w,]+\\}',
escape_braces)
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
[
'lib/google/cloud/firestore/v1*/*_client.rb',
'lib/google/cloud/firestore/admin/v1*/*_client.rb'
],
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
'lib/**/*.rb',
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
'lib/**/*.rb',
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
)
# https://github.com/googleapis/google-cloud-ruby/issues/3058
for version in ['v1', 'v1beta1', 'admin/v1']:
s.replace(
f'lib/google/cloud/firestore/{version}/*_client.rb',
f'(require \".*credentials\"\n)\n',
f'\\1require "google/cloud/firestore/version"\n\n'
)
s.replace(
f'lib/google/cloud/firestore/{version}/*_client.rb',
'Gem.loaded_specs\[.*\]\.version\.version',
'Google::Cloud::Firestore::VERSION'
)
# Fix links for devsite migration
s.replace(
'lib/**/*.rb',
'https://googleapis.github.io/google-cloud-ruby/#/docs/google-cloud-logging/latest/google/cloud/logging/logger',
'https://googleapis.dev/ruby/google-cloud-logging/latest'
)
s.replace(
'lib/**/*.rb',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-firestore/latest/file.AUTHENTICATION.html'
)
| 36.756757
| 130
| 0.668995
|
4a15f7c820cf5b1857dd3d3b389f99bc589da94a
| 875
|
py
|
Python
|
deepcave/__init__.py
|
PhMueller/DeepCAVE
|
2aec109470e667d4bbbe0cd0d9abb11e683a23c4
|
[
"Apache-2.0"
] | null | null | null |
deepcave/__init__.py
|
PhMueller/DeepCAVE
|
2aec109470e667d4bbbe0cd0d9abb11e683a23c4
|
[
"Apache-2.0"
] | null | null | null |
deepcave/__init__.py
|
PhMueller/DeepCAVE
|
2aec109470e667d4bbbe0cd0d9abb11e683a23c4
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
from deepcave.runs.recorder import Recorder
from deepcave.runs.objective import Objective
from deepcave.__version__ import __version__
version = __version__
exec_file = sys.argv[0]
if "server.py" in exec_file or "worker.py" in exec_file:
from deepcave.utils.cache import Cache # noqa
from deepcave.utils.run_caches import RunCaches # noqa
from deepcave.server import get_app # noqa
from deepcave.queue import Queue # noqa
from deepcave.config import CONFIG, META # noqa
app = get_app()
queue = Queue(CONFIG["REDIS_URL"])
# Meta cache
c = Cache(
filename=os.path.join(CONFIG["CACHE_DIR"], "meta.json"),
defaults=META)
# Run caches
rc = RunCaches()
__all__ = ["version", "app", "queue", "c", "rc", "Recorder", "Objective"]
else:
__all__ = ["version", "Recorder", "Objective"]
| 27.34375
| 77
| 0.685714
|
4a15f9612734cead03c7da5c99fee9dda6a80f42
| 19,741
|
py
|
Python
|
AddToFile.py
|
nchauhan890/sublime-text-add-to-file
|
44272a036fe6adb32a104da0004a8277fbb9fbb7
|
[
"MIT"
] | 2
|
2018-07-26T09:35:57.000Z
|
2021-05-05T18:49:24.000Z
|
AddToFile.py
|
nchauhan890/sublime-text-add-to-file
|
44272a036fe6adb32a104da0004a8277fbb9fbb7
|
[
"MIT"
] | 2
|
2018-06-28T15:37:31.000Z
|
2018-07-07T10:59:46.000Z
|
AddToFile.py
|
nchauhan890/sublime-text-add-to-file
|
44272a036fe6adb32a104da0004a8277fbb9fbb7
|
[
"MIT"
] | 2
|
2018-11-14T12:17:13.000Z
|
2021-01-26T12:06:11.000Z
|
import os
import sublime
import sublime_plugin
class AddToCommand(sublime_plugin.TextCommand):
"""main AddToFile command which handles display and selection
of views and runs command to insert text
contains various methods to help separate functions provided"""
def get_preview(self, view):
"""get the actual preview lines to be shown in file selection
returns list of strings"""
if isinstance(view, sublime.View):
return view.settings().get('preview_lines',
self.get_contents(view))
else:
return self.get_contents(self, view)
# send the invald view to this method as it handles
# non-view arguements passed
def get_items(self):
"""return list of active view objects except the current view"""
return [view for view in self.view.window().views()
# if view.file_name()
if view != self.view]
def get_view_path(self, view):
"""return the full file path of the view passed"""
try:
return view.file_name()
# return a the 'unsplit' file path
except (AttributeError, TypeError):
return os.path.join('untitled', 'untitled')
# use os.path.join() to use the correct backslash
# or forward slash depending on platform
def get_contents(self, view):
"""get the content of the first 3 lines of a view
returns a list of strings"""
length = 0
if isinstance(view, sublime.View):
items = []
for _ in range(3):
items.append(view.substr(view.line(length)))
# add the line contents to list
length += len(view.substr(view.line(length))) + 1
# record the cumulative character count to get each line
# and make up +1 for newline character
else:
items = ['' for _ in range(3)]
# return empty values if the view parameter isnt a view object
return items
def split_view_path(self, view):
"""return the split view path of the passed view"""
try:
return list(os.path.split(view.file_name()))
# return a list containing [path, file]
except (AttributeError, TypeError):
return ['untitled', 'untitled']
# handling non-view values
def get_split_view_paths(self, views):
"""call split_view_path on a list of views"""
return [self.split_view_path(view) for view in views]
# returnlist = []
# for item in views:
# returnlist.append(self.split_view_path(item))
# return returnlist
def get_view_paths(self, views):
"""return the full file path for a list of views"""
return [view.file_name() for view in views]
def on_done(self, val):
"""handles inserting text, status bar messages, creation
of new files and view switching/scrolling"""
settings = sublime.load_settings('AddToFile.sublime-settings')
v = self.view # save the starting view
if val == -1:
return # end if called with -1 value
elif val == 'New File':
f = self.view.window().new_file()
self.items = self.get_items()
# do the same as below, but it will not have to get and item from
# the list with a string index which would have raised an error
elif self.items[val] == 'New File': # support for
# AddToNewFileCommand invocation;
# if the selected option is 'New File',
# create a new file
f = self.view.window().new_file()
self.items = self.get_items()
else: # selected item isn't a file so must be 'new file'
f = self.items[val]
f.run_command('insert_to_end',
{"lines": [self.view.substr(s)
for s in self.view.sel()]})
self.view.window().focus_view(v) # use the starting view
# override when new file is created to stop focus switching
# run insert command to add text
string = settings.get('status_message')
string = string.format(name=self.split_view_path(f)[1],
path=self.get_view_path(f),
dir=self.split_view_path(f)[0],
# substitute values from destination file
sourcename=self.split_view_path(
self.view.file_name())[1],
sourcepath=self.get_view_path(
self.view.file_name()),
sourcedir=self.split_view_path(
self.view.file_name())[0])
# substitute values from source file
# set string using template from settings, substituting
# values accordingly
if not settings.get('keep_focus', True):
self.view.window().focus_view(f)
# switch focus to destination file if specified in settings
if settings.get('scroll_view', False):
f.show(f.size())
if settings.get('show_status_message', False):
# run status bar message command if value specified
# in settings
self.view.window().run_command('add_status_bar_msg',
{"msg": string})
def run(self, edit, new_file=False, smart=False):
"""handles display of popup with correct data specified in settings"""
settings = sublime.load_settings('AddToFile.sublime-settings')
# load settings file
# if not ''.join(self.view.substr(s) for s in self.view.sel()):
# return
if all(s.empty() for s in self.view.sel()):
return # end if the selection is empty
self.items = self.get_items()
# add list of views, excluding the current view
# self.view.run_command('change_preview')
if smart is True or settings.get('auto_smart', False):
self.paths = SmartDisplay.run(self.items)
# self.paths = [os.path.join(*path) for path in self.paths]
# get path names given by the SmartDisplay class
for i, p in enumerate(self.paths):
if len(p) > 1:
self.paths[i] = '({}) {}'.format(p[-1], p[0])
else:
self.paths[i] = p[0]
# replace the paths
if settings.get('suggest_new_file', False):
self.items.append('New File')
if (settings.get('show_file_path', False)
or settings.get('show_containing_folder', False)):
self.paths.append('New File')
else:
# self.paths.append(['New File', 'New File'])
self.paths.append('New File')
if settings.get('show_popup', False):
# show popup with file path - ignores file preview
# as popup cannot have multiple lines per item
print(self.paths)
self.view.show_popup_menu(self.paths, self.on_done)
else:
if settings.get('show_preview', False):
# get a list of starting content of the open views
# excluding the current view
self.view_content = [self.get_preview(view)
for view in self.view.window().views()
if view != self.view]
self.popup = [[path] + content
for path, content in zip(self.paths,
self.view_content)]
# get a list with the path and the view content to the file
# at the path using zip()
if settings.get('suggest_new_file', False):
self.popup.append(['New File', '', '', ''])
# and 'New File' to popup list with blank lines
self.view.window().show_quick_panel(self.popup,
self.on_done)
# show panel with file path and preview
else:
# otherwise show the panel with just the file paths
self.view.window().show_quick_panel(self.paths,
self.on_done)
return # end function as it has already inserted text
if new_file is True:
self.on_done('New File') # automatically run on_done with value
# New File to override method
return # end the command when it has finished
if settings.get('show_file_path', False):
self.paths = self.get_view_paths(self.items)
# get the 'unsplit' paths
elif settings.get('show_containing_folder', False):
self.paths = [os.path.join(
str(os.path.split(os.path.split(path)[0])[1]),
# this returns the folder containing the file:
# 1 - split the path
# 2 - get the head [0]
# 3 - split the head
# 4 - get the tail [1]
str(os.path.split(path)[1]))
# this returns the file itself
for path in self.get_view_paths(self.items)]
# split the directory to the containing folder
# and select the folder itself;
# join to the file name returned from
# get_split_view_paths
else:
self.paths = self.get_split_view_paths(self.items)
# make a list of the view paths
if settings.get('add_to_single_view', False) and len(self.items) == 1:
self.on_done(0)
# auto-run on_done if there's only 1 other view and if specified
# in settings
if settings.get('show_preview', False):
# self.view_content = [self.get_contents(view)
# for view in self.view.window().views()
# if view != self.view]
# # get a list of starting content of the open views
# # excluding the current view
self.view_content = [self.get_preview(view)
for view in self.view.window().views()
if view != self.view]
self.popup = []
for path, content in zip(self.paths, self.view_content):
if (settings.get('show_file_path', False)
or settings.get('show_containing_folder', False)):
a = [path] # add path to list since it's not a list
else:
a = path[:] # copy 'split' path list to temp variable
a.pop(0)
a.extend(content)
self.popup.append(a)
# create popup list of strings which contains the file name and
# content
if settings.get('suggest_new_file', False):
self.popup.append(['New File', '', '', ''])
# and 'New File' to popup list with blank lines
if settings.get('suggest_new_file', False):
self.items.append('New File')
if (settings.get('show_file_path', False)
or settings.get('show_containing_folder', False)):
self.paths.append('New File')
else:
self.paths.append(['New File', 'New File'])
# self.paths.append('New File')
# add 'new file' option if specified settings
# 'override' addition of a path to self.paths by creating
# a mock path called 'New File / New File'
if settings.get('show_popup', False):
# continue if the simple popup option is specified
# (file previews ignored)
if (settings.get('show_file_path', False)
or settings.get('show_containing_folder', False)):
self.view.show_popup_menu([path
for path in self.paths],
self.on_done)
# create popup with the *file paths* if specified in settings
else:
# create popup with the *file names* if specified in settings
self.view.show_popup_menu([path[1]
for path in self.paths],
self.on_done)
else:
if settings.get('show_preview', False):
self.view.window().show_quick_panel(self.popup,
self.on_done)
# create popup with the *file name + preview* if
# specified in settings
elif (settings.get('show_file_path', False)
or settings.get('show_containing_folder', False)):
self.view.window().show_quick_panel([path
for path in self.paths],
self.on_done)
# create popup with the *file paths* if specified in settings
else:
self.view.window().show_quick_panel([path[1]
for path in self.paths],
self.on_done)
# create popup with the *file names* if specified in settings
class InsertToEndCommand(sublime_plugin.TextCommand):
"""insert text to the end of the given view
'lines' parameter is a list of strings to be added"""
def run(self, edit, lines):
for line in lines:
# insert lines to end of file
self.view.insert(edit, self.view.size(), line)
self.view.insert(edit, self.view.size(), '\n')
class AddStatusBarMsg(sublime_plugin.WindowCommand):
"""set the status bar message to the given string"""
def run(self, msg):
# set the status bar message
sublime.status_message(msg)
class AddToNewFileCommand(sublime_plugin.TextCommand):
"""runs add_to command with parameter new_file as true to override
behaviour and directly copy selection to a new file instead of having
to manually select it from the selection panel"""
def run(self, edit):
self.view.run_command('add_to', {"new_file": True})
# called by command: "add_to_new_file", but theoretically could also be
# caled by command: "add_to", args: {"new_file": true}
# in JSON
class ChangePreviewCommand(sublime_plugin.TextCommand):
"""set the lines to be previewed to the 3 lines from the lines
where the first selection begins
sets the setting 'preview_lines' of the view's individual settings"""
def run(self, edit):
settings = self.view.settings()
length = self.view.line(self.view.sel()[0]).begin()
# sets the start length to the begin point of the line
# in which the first cursor is
items = []
for _ in range(3):
items.append(self.view.substr(self.view.line(length)))
# add the line contents to list
length += len(self.view.substr(self.view.line(length))) + 1
# record the cumulative character count to get each line
settings.set('preview_lines', items)
# set the view-specific preview to the lines at the first cursor
class GetPreviewCommand(sublime_plugin.TextCommand):
"""scroll the view to show the lines which will be previewed
returns the 3 lines as a list of strings"""
def run(self, edit):
# 'user interface' style command which scrolls the view
# to show the lines which are previewed
# sublime.message_dialog(', '.join(self.view.settings().get(
# 'preview_lines', [])))
self.view.show(
self.view.find(
self.view.settings().get('preview_lines', [])[0],
0).begin())
# scroll the view to the begin point of the first line that
# will be previewed
return self.view.settings().get('preview_lines',
AddToCommand.get_contents(self,
self.view))
# return the individual view's settings of what 3 lines to preview,
# if there is no preview set, use the standard preview from line 1
class SmartDisplay:
"""return a list to display in the file selection panel which
accounts for duplicate names and untitled files
returns list of strings to display"""
@staticmethod
def path_split(view):
"""return the individual parts of the file path"""
try:
return os.path.normpath(view.file_name()).split(os.path.sep)
# try and return the path split into it's individual
# parts without the slashes
# 1. normalise the path
# 2. split path by separator
except (AttributeError, TypeError):
# if encountered an error - untitled file,
# then return it as untitled
return ['untitled']
@staticmethod
def run(views):
sections = [SmartDisplay.path_split(view)
for view in views]
# get a list of the split file paths
ends = [[path[-1]] for path in sections]
# get a list of the file ends - file names
prev = []
count = -1
while ends != prev:
count -= 1
# used for getting items from end of list - e.g. list[-1]
found = []
prev = ends.copy()
# set the starting value to be compared to on the next loop
# create a copy using full list slice
for n, file in enumerate(ends):
if file == 'untitled':
continue
if ends.count(file) > 1:
# if the file name occurs more than once:
# then add the position it occurs in to the found list
found.append(n)
for i in found:
# for each position that was a duplicate name
try:
ends[i] = ends[i] + [sections[i][count]]
# change the path name to what is was before and add on the
# part of the file path that came before - uses the counter to
# keep track of how far back into the file path the loop is
except IndexError:
print(ends[i])
pass
# if the file path has reached its maximum depth then leave it
# as it is
return ends
class SmartAddToCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command('add_to', {"smart": True})
| 46.123832
| 80
| 0.527937
|
4a15fb4d1a30dd546c9f0de35c5225ff1d052a1d
| 2,200
|
py
|
Python
|
docs/examples/viz_mde.py
|
fury-gl/helios
|
14e39e0350b4b9666775ba0c4840d2e9887678c2
|
[
"MIT"
] | 3
|
2021-10-13T14:38:57.000Z
|
2021-10-16T19:40:14.000Z
|
docs/examples/viz_mde.py
|
fury-gl/helios
|
14e39e0350b4b9666775ba0c4840d2e9887678c2
|
[
"MIT"
] | 14
|
2021-07-04T19:00:57.000Z
|
2021-10-16T18:35:45.000Z
|
docs/examples/viz_mde.py
|
fury-gl/helios
|
14e39e0350b4b9666775ba0c4840d2e9887678c2
|
[
"MIT"
] | 3
|
2021-06-06T14:43:59.000Z
|
2021-10-17T19:03:54.000Z
|
"""
=====================================================
Minmum Distortion Embedding: Anchored Constraints
=====================================================
"""
import numpy as np
import argparse
from fury.window import record
from helios import NetworkDraw
from helios.layouts.mde import MDE
# from
# https://github.com/cvxgrp/pymde/blob/main/examples/anchor_constraints.ipynb
parser = argparse.ArgumentParser()
parser.add_argument(
'--interactive', dest='interactive', default=True, action='store_false')
args = parser.parse_args()
interactive = args.interactive
depth = 9
n_items = 2**(depth + 1) - 1
edges = []
stack = [0]
while stack:
root = stack.pop()
first_child = root*2 + 1
second_child = root*2 + 2
if first_child < n_items:
edges.append([root, first_child])
stack.append(first_child)
if second_child < n_items:
edges.append([root, second_child])
stack.append(second_child)
# these are the indices of the nodes that we will pin in place
anchors = np.arange(2**depth) + 2**depth - 1
radius = 20
# pin the root to be at (0, 0), and the leaves to be spaced uniformly on a
# circle
angles = np.linspace(0, 2*np.pi, anchors.shape[0] + 1)[1:]
anchors_pos = radius * np.stack([np.sin(angles), np.cos(angles)], axis=1)
centers = np.random.normal(size=(n_items, 2))*5
centers[anchors] = anchors_pos.copy()
network_draw = NetworkDraw(
positions=centers,
scales=.4,
node_edge_width=0,
edge_line_opacity=.5,
edge_line_color=(0, 0, 0),
marker='3d',
window_size=(700, 700),
edges=np.array(edges)
)
mde = MDE(
np.array(edges), network_draw,
constraint_name='anchored',
anchors=anchors.astype('float32'),
anchors_pos=anchors_pos.astype('float32'),
use_shortest_path=True
)
if not interactive:
exec(mde._command_string(1, 300))
mde.update()
network_draw.refresh()
record(
network_draw.showm.scene, out_path='viz_mde.png', size=(600, 600))
else:
mde.start(
3, 300, 1,
record_positions=True, without_iren_start=False)
if interactive:
network_draw.showm.initialize()
network_draw.showm.start()
| 24.719101
| 77
| 0.642727
|
4a15fb9ffe95133ed1f8d82e90a4e18c6325c377
| 1,710
|
py
|
Python
|
utils/ema.py
|
TomerMe2/FixMatch-Computational-Learning-Project
|
f68e01d074964dfc5387639a15abe75a24eaa074
|
[
"MIT"
] | 12
|
2020-12-07T04:24:58.000Z
|
2022-02-16T15:33:26.000Z
|
utils/ema.py
|
TomerMe2/FixMatch-Computational-Learning-Project
|
f68e01d074964dfc5387639a15abe75a24eaa074
|
[
"MIT"
] | 1
|
2021-07-15T23:02:22.000Z
|
2021-07-15T23:02:22.000Z
|
utils/ema.py
|
TomerMe2/FixMatch-Computational-Learning-Project
|
f68e01d074964dfc5387639a15abe75a24eaa074
|
[
"MIT"
] | 1
|
2021-07-14T10:21:48.000Z
|
2021-07-14T10:21:48.000Z
|
# Imported from https://github.com/YUE-FAN/FixMatch-PyTorch/blob/master/utils/ema.py
import torch
class EMA(object):
def __init__(self, model, alpha=0.999):
self.model = model
self.alpha = alpha
self.shadow = self.get_model_state()
self.backup = {}
self.param_keys = [k for k, _ in self.model.named_parameters()]
# num_batches_tracked, running_mean, running_var in bn
self.buffer_keys = [k for k, _ in self.model.named_buffers()]
def update_params(self):
decay = self.alpha
state = self.model.state_dict() # current params
for name in self.param_keys:
self.shadow[name].copy_(
decay * self.shadow[name] + (1 - decay) * state[name]
)
def update_buffer(self):
# without EMA
state = self.model.state_dict()
for name in self.buffer_keys:
self.shadow[name].copy_(state[name])
def apply_shadow(self):
self.backup = self.get_model_state()
self.model.load_state_dict(self.shadow)
def restore(self):
self.model.load_state_dict(self.backup)
def get_model_state(self):
return {
k: v.clone().detach()
for k, v in self.model.state_dict().items()
}
def load_state_dict(self, checkpoint_state_dict):
self.shadow = {
k: v.clone()
for k, v in checkpoint_state_dict.items()
}
if __name__ == '__main__':
print('=====')
model = torch.nn.BatchNorm1d(5)
ema = EMA(model, 0.9)
inten = torch.randn(10, 5)
out = model(inten)
ema.update_params()
print(ema.shadow)
ema.update_buffer()
print(ema.shadow)
| 29.482759
| 84
| 0.599415
|
4a15fbd02a7063019b2ff7b254d708a51a642946
| 3,087
|
py
|
Python
|
mvntime.py
|
gaol/maven-repository-extension
|
4f37003f25f7e082c15941d125dc26291d8465a3
|
[
"Apache-2.0"
] | 1
|
2020-05-01T13:03:04.000Z
|
2020-05-01T13:03:04.000Z
|
mvntime.py
|
gaol/maven-repository-extension
|
4f37003f25f7e082c15941d125dc26291d8465a3
|
[
"Apache-2.0"
] | null | null | null |
mvntime.py
|
gaol/maven-repository-extension
|
4f37003f25f7e082c15941d125dc26291d8465a3
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/python
import sys
import os
import os.path
import optparse
import re
from urlparse import urlparse
import urllib2
import base64
MVN_DOWNLOAD_RE = re.compile(r'\[INFO\] Downloaded: ([^\n]*) \(([^\n]*) ([K]?B) at ([^\n]*) KB\/sec\)')
def calculate(url, username = None, password = None):
result = {}
f = None
try:
f = open(url)
except IOError:
if not url.endswith("consoleText"):
url = url + "/consoleText"
req = urllib2.Request(url)
if username is not None:
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
authheader = "Basic %s" % base64string
req.add_header("Authorization", authheader)
f = urllib2.urlopen(req)
if f is None:
print "Can't open %s" % url
exit
for line in f:
m = MVN_DOWNLOAD_RE.match(line)
if m is not None:
host = urlparse(m.group(1)).hostname
if m.group(3) == 'KB':
size = float(m.group(2))
else:
size = float(m.group(2)) / 1024
speed = float(m.group(4))
if host not in result:
result[host] = {"normal": {"totalSize": 0, "count": 0, "totalTime": 0, "avgSpeed": 0}, "zerospeed": []}
hostResult = result[host]
if speed == 0:
hostResult["zerospeed"].append("%s, size: %.3f" % (m.group(1)[m.group(1).rfind('/') + 1:], size))
continue
hostResult["normal"]["totalSize"] += size
hostResult["normal"]["count"] += 1
hostResult["normal"]["totalTime"] += float(size / speed)
hostResult["normal"]["avgSpeed"] = (hostResult["normal"]["totalSize"] / hostResult["normal"]["totalTime"])
f.close()
return result
# end of caculateFromStream
def main():
"""
Caculates Maven Artifacts downloading time from the log.
"""
usage="%prog [options] LOG-FILE or Jenkins-Job-Link"
description=""" calculates downloading time from a maven build log file """
parser = optparse.OptionParser(usage=usage, description = description)
parser.add_option('-u', '--username', dest='username', type='string', help='User name to access jenkins log in case of Jenkins link')
parser.add_option('-p', '--password', dest='password', type='string', help='Password to access jenkins log in case of Jenkins link')
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit()
result = calculate(args[0], username = options.username, password = options.password)
if len(result.keys()) == 0:
print "No Maven Artifacts Downloaded found!"
exit
print "\nRepositories are: %s" % ", ".join(result.keys())
for k in result.keys():
print "\nDownloaded artifacts from host '%s' :" % k
print "\tTotal Size: %.3f KB, \tTotal Number: %d, \tAverage Speed: %.3f KB/sec" % (result[k]["normal"]["totalSize"], result[k]["normal"]["count"], result[k]["normal"]["avgSpeed"])
if len(result[k]["zerospeed"]) > 0:
print "\nThere are %d artifacts downloaded with 0 speed: (Not counted in above total number)" % len(result[k]["zerospeed"])
print "\n\t" + "\n\t".join(result[k]["zerospeed"])
if __name__ == '__main__':
main()
| 37.192771
| 183
| 0.628766
|
4a15fbedd7eeab9df4a3ce9b6206236e6eb31390
| 6,779
|
py
|
Python
|
pylinex/model/ScaledModel.py
|
CU-NESS/pylinex
|
b6f342595b6a154e129eb303782e5268088f34d5
|
[
"Apache-2.0"
] | null | null | null |
pylinex/model/ScaledModel.py
|
CU-NESS/pylinex
|
b6f342595b6a154e129eb303782e5268088f34d5
|
[
"Apache-2.0"
] | null | null | null |
pylinex/model/ScaledModel.py
|
CU-NESS/pylinex
|
b6f342595b6a154e129eb303782e5268088f34d5
|
[
"Apache-2.0"
] | null | null | null |
"""
File: pylinex/model/ScaledModel.py
Author: Keith Tauscher
Date: 2 Aug 2018
Description: File containing a class which represents a model which simply
scales the output of a different model.
"""
import numpy as np
from ..util import real_numerical_types
from .Model import Model
class ScaledModel(Model):
"""
Class which represents a model which simply scales the output of a
different model.
"""
def __init__(self, model, scale_factor):
"""
Creates a ScaledModel with the given model and scale factor.
model: Model object to build this model around
scale_factor: the number by which to multiply outputs of model
"""
self.model = model
self.scale_factor = scale_factor
@property
def model(self):
"""
Property storing the Model object at the core of this model.
"""
if not hasattr(self, '_model'):
raise AttributeError("model referenced before it was set.")
return self._model
@model.setter
def model(self, value):
"""
Setter for the Model object at the core of this model.
value: must be a Model object
"""
if isinstance(value, Model):
self._model = value
else:
raise TypeError("model was not a Model object.")
@property
def num_channels(self):
"""
Property storing the number of channels in outputs of this model.
"""
if not hasattr(self, '_num_channels'):
self._num_channels = self.model.num_channels
return self._num_channels
@property
def scale_factor(self):
"""
Property storing the scale factor by which all outputs of the model at
the heart of this model will be multiplied.
"""
if not hasattr(self, '_scale_factor'):
raise AttributeError("scale_factor was referenced before it " +\
"was set.")
return self._scale_factor
@scale_factor.setter
def scale_factor(self, value):
"""
Sets the scale_factor by which to multiply all outputs of the model at
the core of this model.
"""
if type(value) in real_numerical_types:
self._scale_factor = value
else:
raise TypeError("scale_factor was set to a non-number.")
@property
def parameters(self):
"""
Property storing a list of strings associated with the parameters
necessitated by this model. These are the same as the parameters
necessitated by the parameters of the core model.
"""
return self.model.parameters
def __call__(self, parameters):
"""
Gets the scaled curve associated with the given parameters.
returns: array of size (num_channels,)
"""
return self.scale_factor * self.model(parameters)
@property
def gradient_computable(self):
"""
Property storing whether the gradient of this model is computable. This
is true as long as the gradient of the core model is computable.
"""
return self.model.gradient_computable
def gradient(self, parameters):
"""
Function which computes the gradient of this model at the given
parameters.
parameters: numpy.ndarray of parameter values. shape: (num_parameters,)
returns: numpy.ndarray of gradient values of this model of shape
(num_channels, num_parameters)
"""
return self.scale_factor * self.model.gradient(parameters)
@property
def hessian_computable(self):
"""
Property storing whether the hessian of this model is computable. This
is true as long as the hessian of the core model is computable.
"""
return self.model.hessian_computable
def hessian(self, parameters):
"""
Function which computes the hessian of this model at the given
parameters.
parameters: numpy.ndarray of parameter values. shape: (num_parameters,)
returns: numpy.ndarray of hessian values of this model of shape
(num_channels, num_parameters, num_parameters)
"""
return self.scale_factor * self.model.hessian(parameters)
def fill_hdf5_group(self, group):
"""
Fills the given hdf5 file group with information necessary to reload
it at a later time.
group: the hdf5 file group to fill with information about this model
"""
group.attrs['class'] = 'ScaledModel'
self.model.fill_hdf5_group(group.create_group('model'))
group.attrs['scale_factor'] = self.scale_factor
def __eq__(self, other):
"""
Checks if other is equivalent to this model.
other: object to check for equality
returns: False unless other is an ScaledModel with the same core model
and scale_factor.
"""
if isinstance(other, ScaledModel):
return ((self.model == other.model) and\
(self.scale_factor == other.scale_factor))
else:
return False
def quick_fit(self, data, error, quick_fit_parameters=[], prior=None):
"""
Performs a quick fit of this model to the given data with (or without)
a given noise level.
data: 1D array to fit with this scaled model.
error: if None, the unweighted least square fit is given for
parameter_mean and parameter_covariance will be
nonsense
otherwise, error should a 1D array of same length as data
quick_fit_parameters: quick fit parameters to pass to underlying model
prior: either None or a GaussianDistribution object containing priors
(in space of underlying model)
returns: (parameter_mean, parameter_covariance) which are 1D and 2D
arrays respectively
"""
if type(error) is type(None):
error = np.ones_like(data)
data_to_fit = data / self.scale_factor
error_to_fit = error / np.abs(self.scale_factor)
return self.model.quick_fit(data_to_fit, error_to_fit,\
quick_fit_parameters=quick_fit_parameters, prior=prior)
@property
def bounds(self):
"""
Property storing the natural bounds of the parameters of this model.
Since this is just a rebranding of he underlying model, the bounds are
passed through with no changes.
"""
return self.model.bounds
| 34.586735
| 79
| 0.615578
|
4a15fc50761ea35c94787a8d9f21371982791151
| 2,143
|
py
|
Python
|
src/beanmachine/ppl/compiler/tests/binary_vs_multiary_addition_perf_test.py
|
dmitryvinn/beanmachine
|
1ac1bc2a38f22372d96f3f3321bd851834ef1456
|
[
"MIT"
] | null | null | null |
src/beanmachine/ppl/compiler/tests/binary_vs_multiary_addition_perf_test.py
|
dmitryvinn/beanmachine
|
1ac1bc2a38f22372d96f3f3321bd851834ef1456
|
[
"MIT"
] | null | null | null |
src/beanmachine/ppl/compiler/tests/binary_vs_multiary_addition_perf_test.py
|
dmitryvinn/beanmachine
|
1ac1bc2a38f22372d96f3f3321bd851834ef1456
|
[
"MIT"
] | null | null | null |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test performance of multiary addition optimization """
import platform
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch.distributions import Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.functional
def sum_1(counter):
sum = 0.0
for i in range(counter):
sum = sum + norm(i)
return sum
@bm.functional
def sum_2():
return sum_1(100) + sum_1(50)
def get_report(skip_optimizations):
observations = {}
queries = [sum_2()]
number_samples = 1000
_, perf_report = BMGInference()._infer(
queries, observations, number_samples, skip_optimizations=skip_optimizations
)
return perf_report
class BinaryVsMultiaryAdditionPerformanceTest(unittest.TestCase):
def test_perf_num_nodes_edges(self) -> None:
"""
Test to check if Multiary addition optimization reduces the
number of nodes and number of edges using the performance
report returned by BMGInference.
"""
if platform.system() == "Windows":
self.skipTest("Disabling *_perf_test.py until flakiness is resolved")
self.maxDiff = None
skip_optimizations = {
"BetaBernoulliConjugateFixer",
"BetaBinomialConjugateFixer",
"NormalNormalConjugateFixer",
}
report_w_optimization = get_report(skip_optimizations)
self.assertEqual(report_w_optimization.node_count, 105)
self.assertEqual(report_w_optimization.edge_count, 204)
skip_optimizations = {
"multiary_addition_fixer",
"BetaBernoulliConjugateFixer",
"BetaBinomialConjugateFixer",
"NormalNormalConjugateFixer",
}
report_wo_optimization = get_report(skip_optimizations)
self.assertEqual(report_wo_optimization.node_count, 203)
self.assertEqual(report_wo_optimization.edge_count, 302)
| 28.197368
| 84
| 0.692954
|
4a15fd06959332cd87c8f93b2dc164eb643a3812
| 1,204
|
py
|
Python
|
python/misc/missing_third_angle.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | null | null | null |
python/misc/missing_third_angle.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | 2
|
2022-03-10T03:49:14.000Z
|
2022-03-14T00:49:54.000Z
|
python/misc/missing_third_angle.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Missing Third Angle.
You are given 2 out of 3 of the angles in a triangle, in degrees.
Write a function that classifies the missing angle as either "acute", "right",
or "obtuse" based on its degrees.
Source:
https://edabit.com/challenge/PKPmS5zwefc7M5emK
"""
def calc_missing_angle(a: int, b: int) -> int:
"""Return the missing angle of a triangle in degrees."""
return 180 - (a + b)
def angle_classifier(a: int) -> str:
"""Classify the angle.
An acute angle is one smaller than 90 degrees.
A right angle is one that is exactly 90 degrees.
An obtuse angle is one greater than 90 degrees
(but smaller than 180 degrees).
"""
if a > 90 and a < 180:
return "obtuse"
elif a < 90 and a > 0:
return "acute"
return "right"
def missing_angle(a: int, b: int) -> str:
"""Determine the class of the missing angle."""
missing_angle = calc_missing_angle(a, b)
result = angle_classifier(missing_angle)
return result
def main():
"""Run missing angle."""
print(missing_angle(27, 59))
print(missing_angle(135, 11))
print(missing_angle(45, 45))
if __name__ == "__main__":
main()
| 22.716981
| 79
| 0.653654
|
4a15fd739b1bfa6dec10c587dd198a7df20dcfc2
| 2,715
|
py
|
Python
|
tests/test_middle_simplistic.py
|
vltr/middle
|
f7782610fbb1d9232a3b4cfea057a9331db2775e
|
[
"MIT"
] | 11
|
2018-06-25T11:36:10.000Z
|
2020-10-02T09:29:24.000Z
|
tests/test_middle_simplistic.py
|
vltr/middle
|
f7782610fbb1d9232a3b4cfea057a9331db2775e
|
[
"MIT"
] | 158
|
2018-06-22T14:35:45.000Z
|
2022-03-28T21:57:06.000Z
|
tests/test_middle_simplistic.py
|
vltr/middle
|
f7782610fbb1d9232a3b4cfea057a9331db2775e
|
[
"MIT"
] | 2
|
2019-08-17T19:27:44.000Z
|
2020-03-24T18:47:23.000Z
|
import typing as t
from enum import Enum, IntEnum, unique
import middle
@unique
class PlatformEnum(str, Enum):
XBOX1 = "XBOX1"
PLAYSTATION4 = "PLAYSTATION4"
PC = "PC"
@unique
class LanguageEnum(IntEnum):
ENGLISH = 1
JAPANESE = 2
SPANISH = 3
GERMAN = 4
PORTUGUESE = 5
@unique
class CityRegionEnum(str, Enum):
TROPICAL = "TROPICAL"
TEMPERATE = "TEMPERATE"
BOREAL = "BOREAL"
class City(middle.Model):
name = {"type": str}
region = {"type": CityRegionEnum}
class Game(middle.Model):
name = {"type": str}
platform = {"type": PlatformEnum}
score = {"type": float}
resolution_tested = {"pattern": r"^\d+x\d+$", "type": str}
genre = {"type": t.List[str]}
rating = {"type": t.Dict[str, float]}
players = {"type": t.Set[str]}
language = {"type": LanguageEnum}
awesome_city = {"type": City}
def test_instance():
game = Game(
name="Cities: Skylines",
platform="PC",
score=9.0,
resolution_tested="1920x1080",
genre=["Simulators", "City Building"],
rating={"IGN": 8.5, "Gamespot": 8.0, "Steam": 4.5},
players=["Flux", "strictoaster"],
language=1,
awesome_city=City(name="Blumenau", region=CityRegionEnum.TEMPERATE),
)
assert isinstance(game, Game)
assert isinstance(game.platform, PlatformEnum)
assert isinstance(game.language, LanguageEnum)
assert isinstance(game.awesome_city, City)
assert isinstance(game.awesome_city.region, CityRegionEnum)
def test_instance_to_dict():
game = Game(
name="Cities: Skylines",
platform="PC",
score=9.0,
resolution_tested="1920x1080",
genre=["Simulators", "City Building"],
rating={"IGN": 8.5, "Gamespot": 8.0, "Steam": 4.5},
players=["Flux", "strictoaster"],
language=1,
awesome_city=City(name="Blumenau", region=CityRegionEnum.TEMPERATE),
)
data = middle.asdict(game)
assert isinstance(data, dict)
assert isinstance(data.get("awesome_city", None), dict)
assert data.get("awesome_city").get("region") == "TEMPERATE"
def test_dict_to_instance():
data = {
"name": "Cities: Skylines",
"platform": "PC",
"score": 9.0,
"resolution_tested": "1920x1080",
"genre": ["Simulators", "City Building"],
"rating": {"IGN": 8.5, "Gamespot": 8.0, "Steam": 4.5},
"players": ["Flux", "strictoaster"],
"language": 1,
"awesome_city": {"name": "Blumenau", "region": "TEMPERATE"},
}
game = Game(**data)
assert isinstance(game, Game)
assert isinstance(game.awesome_city, City)
assert game.platform == PlatformEnum.PC
| 26.105769
| 76
| 0.605893
|
4a15ff8c0df8e7a4c8d79f1f9ec8d9607e620ac6
| 2,647
|
py
|
Python
|
box-graph/box.py
|
Linh181/Density-based-Clustering
|
7327c486d77a43c0a00c36048153e50db93e606f
|
[
"MIT"
] | null | null | null |
box-graph/box.py
|
Linh181/Density-based-Clustering
|
7327c486d77a43c0a00c36048153e50db93e606f
|
[
"MIT"
] | null | null | null |
box-graph/box.py
|
Linh181/Density-based-Clustering
|
7327c486d77a43c0a00c36048153e50db93e606f
|
[
"MIT"
] | null | null | null |
from point import ClusterPoint
from enum import Enum
class Box():
DEFAULT_LABEL = -1
class Func(Enum):
"""
Function of a box. Can be one of the following:\\
NONE: No core points in box \\
CORE: Only core points in box \\
PARTIAL: Contains at least one core point
"""
NONE = 1
CORE = 2
PARTIAL = 3
def __init__(self, points, func=Func.NONE):
self.points = points
self.func = func
self.neighbours = []
self.label = self.DEFAULT_LABEL
self.bounds = {
"bottom": min(self.points, key=lambda x: x.coords[1]).coords[1],
"top": max(self.points, key=lambda x: x.coords[1]).coords[1],
"left": min(self.points, key=lambda x: x.coords[0]).coords[0],
"right": max(self.points, key=lambda x: x.coords[0]).coords[0],
}
def is_labeled(self):
"""
Returns whether a box is labeled
"""
return not self.label == self.DEFAULT_LABEL
def sqr_distance_to(self, other):
"""
Returns the square of the minimal distance from self to other.\\
Assumes that other and self do not overlap
"""
# Determine difference in width
if other.bounds["right"] < self.bounds["left"]:
w = (self.bounds["left"] - other.bounds["right"])**2
elif other.bounds["left"] > self.bounds["right"]:
w = (other.bounds["left"] - self.bounds["right"])**2
else:
w = 0
# Determine difference in height:
if other.bounds["top"] < self.bounds["bottom"]:
h = (self.bounds["bottom"] - other.bounds["top"])**2
elif other.bounds["bottom"] > self.bounds["top"]:
h = (other.bounds["bottom"] - self.bounds["top"])**2
else:
h = 0
return w + h
def is_core_neighbour(self, other, dist):
"""
Returns whether there exists a core point A in this box and a core point B in the `other` box for which dist(A,B) <= `dist`
"""
sqr_dist = dist**2
my_core_points = [
p for p in self.points if p.func == ClusterPoint.Func.CORE]
other_core_points = [
p for p in other.points if p.func == ClusterPoint.Func.CORE]
for cp in my_core_points:
for ocp in other_core_points:
if cp.sq_distance_to(ocp) <= sqr_dist:
return True
return False
def add_neighbour(self, neighbour):
"""
Adds neighbour to list of neighbours
"""
self.neighbours.append(neighbour)
| 31.891566
| 131
| 0.55119
|
4a16008a10085ec7478a02259069a3e5a84fe4f5
| 97,620
|
py
|
Python
|
nova/tests/unit/conductor/test_conductor.py
|
nicholaskuechler/nova
|
ff412c3888b234eb123161cc4e6d0d0d69c0004e
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/conductor/test_conductor.py
|
nicholaskuechler/nova
|
ff412c3888b234eb123161cc4e6d0d0d69c0004e
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/conductor/test_conductor.py
|
nicholaskuechler/nova
|
ff412c3888b234eb123161cc4e6d0d0d69c0004e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import contextlib
import uuid
import mock
from mox3 import mox
import oslo_messaging as messaging
from oslo_utils import timeutils
import six
from nova.api.ec2 import ec2utils
from nova.compute import arch
from nova.compute import flavors
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova.conductor.tasks import migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova.image import api as image_api
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import cast_as_call
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_server_actions
from nova.tests.unit import fake_utils
from nova.tests.unit.objects import test_volume_usage
from nova import utils
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
fake_utils.stub_out_utils_spawn_n(self.stubs)
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = arch.X86_64
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates, None)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db is None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'], None)
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 1, 'device_name': 'foo',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(fake_bdm)
fake_bdm2 = {'id': 1, 'device_name': 'foo2',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm2 = fake_block_device.FakeDbBlockDeviceDict(fake_bdm2)
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=False)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=False)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
None, False)
def test_instance_get_all_by_filters_use_slave(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=True)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None,
use_slave=True)
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host',
None, None)
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node', None)
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn(condargs)
if name == 'service_get_by_compute_host':
self.mox.StubOutWithMock(
objects.ComputeNodeList, 'get_all_by_host')
objects.ComputeNodeList.get_all_by_host(
self.context, mox.IgnoreArg()
).AndReturn(['fake-compute'])
self.mox.ReplayAll()
if db_exception:
self.assertRaises(messaging.ExpectedException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
if name == 'service_get_by_compute_host':
condargs['compute_node'] = ['fake-compute']
self.assertEqual([condargs], result)
else:
self.assertEqual(condargs, result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(host=None, topic=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_host_and_binary(self):
self._test_stubbed('service_get_by_host_and_binary',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_host_and_binary_not_found(self):
self._test_stubbed('service_get_by_host_and_binary',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj_base.NovaObjectRegistry.register(TestObject)
obj = TestObject()
# NOTE(danms): After a trip over RPC, any tuple will be a list,
# so use a list here to make sure we can handle it
fake_args = []
if is_classmethod:
result = self.conductor.object_class_action(
self.context, TestObject.obj_name(), 'bar', '1.0',
fake_args, {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', fake_args,
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj_base.NovaObjectRegistry.register(TestObject)
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def test_object_class_action_versions(self):
@obj_base.NovaObjectRegistry.register
class TestObject(obj_base.NovaObject):
VERSION = '1.10'
@classmethod
def foo(cls, context):
return cls()
versions = {
'TestObject': '1.2',
'OtherObj': '1.0',
}
with mock.patch.object(self.conductor_manager,
'_object_dispatch') as m:
m.return_value = TestObject()
m.return_value.obj_to_primitive = mock.MagicMock()
self.conductor.object_class_action_versions(
self.context, TestObject.obj_name(), 'foo', versions,
tuple(), {})
m.return_value.obj_to_primitive.assert_called_once_with(
target_version='1.2', version_manifest=versions)
def _test_expected_exceptions(self, db_method, conductor_method, errors,
*args, **kwargs):
# Tests that expected exceptions are handled properly.
for error in errors:
with mock.patch.object(db, db_method, side_effect=error):
self.assertRaises(messaging.ExpectedException,
conductor_method,
self.context, *args, **kwargs)
def test_action_event_start_expected_exceptions(self):
error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
self._test_expected_exceptions(
'action_event_start', self.conductor.action_event_start, [error],
{'foo': 'bar'})
def test_action_event_finish_expected_exceptions(self):
errors = (exc.InstanceActionNotFound(request_id='1',
instance_uuid='2'),
exc.InstanceActionEventNotFound(event='1', action_id='2'))
self._test_expected_exceptions(
'action_event_finish', self.conductor.action_event_finish,
errors, {'foo': 'bar'})
def test_instance_update_expected_exceptions(self):
errors = (exc.InvalidUUID(uuid='foo'),
exc.InstanceNotFound(instance_id=1),
exc.UnexpectedTaskStateError(instance_uuid='fake_uuid',
expected={'task_state': 'foo'},
actual={'task_state': 'bar'}))
self._test_expected_exceptions(
'instance_update', self.conductor.instance_update,
errors, None, {'foo': 'bar'}, None)
def test_instance_get_by_uuid_expected_exceptions(self):
error = exc.InstanceNotFound(instance_id=1)
self._test_expected_exceptions(
'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
[error], None, [])
def test_aggregate_host_add_expected_exceptions(self):
error = exc.AggregateHostExists(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_add', self.conductor.aggregate_host_add,
[error], {'id': 1}, None)
def test_aggregate_host_delete_expected_exceptions(self):
error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_delete', self.conductor.aggregate_host_delete,
[error], {'id': 1}, None)
def test_service_update_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_update',
self.conductor.service_update,
[error], {'id': 1}, None)
def test_service_destroy_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_destroy',
self.conductor.service_destroy,
[error], 1)
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertNotIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_instance_destroy(self):
instance = objects.Instance(id=1, uuid='fake-uuid')
@mock.patch.object(instance, 'destroy')
@mock.patch.object(obj_base, 'obj_to_primitive',
return_value='fake-result')
def do_test(mock_to_primitive, mock_destroy):
result = self.conductor.instance_destroy(self.context, instance)
mock_destroy.assert_called_once_with()
mock_to_primitive.assert_called_once_with(instance)
self.assertEqual(result, 'fake-result')
do_test()
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args,
update_cells=True)
self.assertEqual(result, 'foo')
@mock.patch.object(notifications, 'audit_period_bounds')
@mock.patch.object(notifications, 'bandwidth_usage')
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
def test_notify_usage_exists(self, mock_notify, mock_bw, mock_audit):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = objects.Instance(id=1, system_metadata={})
mock_audit.return_value = ('start', 'end')
mock_bw.return_value = 'bw_usage'
self.conductor.notify_usage_exists(self.context, instance, False, True,
system_metadata={},
extra_usage_info=dict(extra='info'))
class MatchInstance(object):
def __eq__(self, thing):
return thing.id == instance.id
notifier = self.conductor_manager.notifier
mock_audit.assert_called_once_with(False)
mock_bw.assert_called_once_with(MatchInstance(), 'start', True)
mock_notify.assert_called_once_with(notifier, self.context,
MatchInstance(),
'exists', system_metadata={},
extra_usage_info=info)
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
{'fake': 'values'})
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertIsNone(result)
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', None)
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn(test_volume_usage.fake_vol_usage)
compute_utils.usage_volume_info(
mox.IsA(objects.VolumeUsage)).AndReturn('fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst, None, False)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('conductor.%s' % self.conductor_manager.host,
msg.publisher_id)
self.assertEqual('volume.usage', msg.event_type)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake-info', msg.payload)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise messaging.MessagingTimeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
@mock.patch('oslo_versionedobjects.base.obj_tree_get_versions')
def test_object_backport_redirect(self, mock_ovo):
mock_ovo.return_value = mock.sentinel.obj_versions
mock_objinst = mock.Mock()
with mock.patch.object(self.conductor,
'object_backport_versions') as mock_call:
self.conductor.object_backport(mock.sentinel.ctxt,
mock_objinst,
mock.sentinel.target_version)
mock_call.assert_called_once_with(mock.sentinel.ctxt,
mock_objinst,
mock.sentinel.obj_versions)
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI)
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.API)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI)
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(use_local=True),
conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI)
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_manager.ConductorManager()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
elif key == 'access_ip_v4':
updates[key] = '10.0.0.2'
elif key == 'access_ip_v6':
updates[key] = '2001:db8:0:1::1'
elif key in ('instance_type_id', 'memory_mb', 'ephemeral_gb',
'root_gb', 'vcpus', 'power_state', 'progress'):
updates[key] = 5
elif key == 'system_metadata':
updates[key] = {'foo': 'foo'}
else:
updates[key] = 'foo'
def fake_save(inst):
# id that comes back from db after updating
inst.id = 1
with mock.patch.object(objects.Instance, 'save',
side_effect=fake_save,
autospec=True) as mock_save:
conductor.instance_update(ctxt, 'fake-instance', updates,
'conductor')
mock_save.assert_called_once_with(mock.ANY)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_server_actions.stub_out_action_events(self.stubs)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
def _prepare_rebuild_args(self, update_args=None):
rebuild_args = {'new_pass': 'admin_password',
'injected_files': 'files_to_inject',
'image_ref': 'image_ref',
'orig_image_ref': 'orig_image_ref',
'orig_sys_metadata': 'orig_sys_meta',
'bdms': {},
'recreate': False,
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host'}
if update_args:
rebuild_args.update(update_args)
return rebuild_args
@mock.patch('nova.objects.Migration')
def test_live_migrate(self, migobj):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
migration = migobj()
self.mox.StubOutWithMock(live_migrate.LiveMigrationTask, 'execute')
task = self.conductor_manager._build_live_migrate_task(
self.context, inst_obj, 'destination', 'block_migration',
'disk_over_commit', migration)
task.execute()
self.mox.ReplayAll()
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'live_migrate_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.live_migrate_instance(self.context, inst_obj,
'destination', 'block_migration', 'disk_over_commit')
else:
self.conductor.migrate_server(self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
self.assertEqual('pre-migrating', migration.status)
self.assertEqual('destination', migration.dest_compute)
self.assertEqual(inst_obj.host, migration.source_compute)
def _test_cold_migrate(self, clean_shutdown=True):
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(migrate.MigrationTask, 'execute')
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
inst_obj.system_metadata = {'image_hw_disk_bus': 'scsi'}
flavor = flavors.get_default_flavor()
flavor.extra_specs = {'extra_specs': 'fake'}
filter_properties = {'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
request_spec = {'instance_type': obj_base.obj_to_primitive(flavor),
'instance_properties': {}}
utils.get_image_from_system_metadata(
inst_obj.system_metadata).AndReturn('image')
scheduler_utils.build_request_spec(
self.context, 'image',
[mox.IsA(objects.Instance)],
instance_type=mox.IsA(objects.Flavor)).AndReturn(request_spec)
task = self.conductor_manager._build_cold_migrate_task(
self.context, inst_obj, flavor, filter_properties,
request_spec, [], clean_shutdown=clean_shutdown)
task.execute()
self.mox.ReplayAll()
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
self.context, inst_obj, {}, scheduler_hint, flavor, [],
clean_shutdown)
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [],
clean_shutdown)
def test_cold_migrate(self):
self._test_cold_migrate()
def test_cold_migrate_forced_shutdown(self):
self._test_cold_migrate(clean_shutdown=False)
@mock.patch('nova.objects.Instance.refresh')
@mock.patch('nova.utils.spawn_n')
def test_build_instances(self, mock_spawn, mock_refresh):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
instance_type = flavors.get_default_flavor()
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuid.uuid4(),
flavor=instance_type) for i in range(2)]
instance_type_p = obj_base.obj_to_primitive(instance_type)
instance_properties = obj_base.obj_to_primitive(instances[0])
instance_properties['system_metadata'] = flavors.save_flavor_info(
{}, instance_type)
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
spec = {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2}
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.block_device_mapping_get_all_by_instance(self.context,
instances[0].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host1',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'retry': {'num_attempts': 1,
'hosts': [['host1', 'node1']]},
'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
db.block_device_mapping_get_all_by_instance(self.context,
instances[1].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2', 'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_build_instances_scheduler_failure(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
exception = exc.NoValidHost(reason='fake-reason')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(scheduler_utils, 'set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1,
'hosts': []}}).AndRaise(exception)
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
scheduler_utils.set_vm_state_and_notify(
self.context, instance.uuid, 'compute_task', 'build_instances',
updates, exception, spec, self.conductor_manager.db)
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch('nova.utils.spawn_n')
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_build_instances_scheduler_group_failure(self, state_mock,
sig_mock, bs_mock,
spawn_mock):
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
# NOTE(gibi): LocalComputeTaskAPI use eventlet spawn that makes mocking
# hard so use direct call instead.
spawn_mock.side_effect = lambda f, *a, **k: f(*a, **k)
bs_mock.return_value = spec
exception = exc.UnsupportedPolicyException(reason='fake-reason')
sig_mock.side_effect = exception
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(
context=self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
calls = []
for instance in instances:
calls.append(mock.call(self.context, instance.uuid,
'build_instances', updates, exception, spec))
state_mock.assert_has_calls(calls)
def test_unshelve_instance_on_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_offloaded_instance_glance_image_not_found(self):
shelved_image_id = "image_not_found"
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
e = exc.ImageNotFound(image_id=shelved_image_id)
self.conductor_manager.image_api.get(
self.context, shelved_image_id, show_deleted=False).AndRaise(e)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
system_metadata['shelved_image_id'] = shelved_image_id
self.assertRaises(
exc.UnshelveException,
self.conductor_manager.unshelve_instance,
self.context, instance)
self.assertEqual(instance.vm_state, vm_states.ERROR)
def test_unshelve_offloaded_instance_image_id_is_none(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
# 'shelved_image_id' is None for volumebacked instance
instance.system_metadata['shelved_image_id'] = None
with contextlib.nested(
mock.patch.object(self.conductor_manager,
'_schedule_instances'),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'unshelve_instance'),
) as (schedule_mock, unshelve_mock):
schedule_mock.return_value = [{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}]
self.conductor_manager.unshelve_instance(self.context, instance)
self.assertEqual(1, unshelve_mock.call_count)
def test_unshelve_instance_schedule_and_rebuild(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {'retry': {'num_attempts': 1,
'hosts': []}}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image='fake_image',
filter_properties={'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['fake_host',
'fake_node']]}},
node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, image, filter_properties,
*instances):
raise exc.NoValidHost(reason='')
with contextlib.nested(
mock.patch.object(self.conductor_manager.image_api, 'get',
return_value='fake_image'),
mock.patch.object(self.conductor_manager, '_schedule_instances',
fake_schedule_instances)
) as (_get_image, _schedule_instances):
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances',
side_effect=messaging.MessagingTimeout())
@mock.patch.object(image_api.API, 'get', return_value='fake_image')
def test_unshelve_instance_schedule_and_rebuild_messaging_exception(
self, mock_get_image, mock_schedule_instances):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.assertRaises(messaging.MessagingTimeout,
self.conductor_manager.unshelve_instance,
self.context, instance)
mock_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {'retry': {'num_attempts': 1,
'hosts': []}}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager._schedule_instances(self.context,
None, filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image=None,
filter_properties={'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['fake_host',
'fake_node']]}},
node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_rebuild_instance(self):
inst_obj = self._create_fake_instance_obj()
rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations')
) as (rebuild_mock, select_dest_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.assertFalse(select_dest_mock.called)
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
expected_host = 'thebesthost'
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
return_value=[{'host': expected_host}]),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, select_dest_mock, bs_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
rebuild_args['host'] = expected_host
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
self.assertEqual('compute.instance.rebuild.scheduled',
fake_notifier.NOTIFICATIONS[0].event_type)
def test_rebuild_instance_with_scheduler_no_host(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason='')),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, select_dest_mock, bs_mock):
self.assertRaises(exc.NoValidHost,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
self.assertFalse(rebuild_mock.called)
@mock.patch('nova.utils.spawn_n')
@mock.patch.object(conductor_manager.compute_rpcapi.ComputeAPI,
'rebuild_instance')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.scheduler_client.SchedulerClient,
'select_destinations')
@mock.patch('nova.scheduler.utils.build_request_spec')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_rebuild_instance_with_scheduler_group_failure(self,
state_mock,
bs_mock,
select_dest_mock,
sig_mock,
rebuild_mock,
spawn_mock):
inst_obj = self._create_fake_instance_obj()
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
bs_mock.return_value = request_spec
# NOTE(gibi): LocalComputeTaskAPI use eventlet spawn that makes mocking
# hard so use direct call instead.
spawn_mock.side_effect = lambda f, *a, **k: f(*a, **k)
exception = exc.UnsupportedPolicyException(reason='')
sig_mock.side_effect = exception
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.assertRaises(exc.UnsupportedPolicyException,
self.conductor.rebuild_instance,
self.context,
inst_obj,
**rebuild_args)
updates = {'vm_state': vm_states.ACTIVE, 'task_state': None}
state_mock.assert_called_once_with(self.context, inst_obj.uuid,
'rebuild_server', updates,
exception, request_spec)
self.assertFalse(select_dest_mock.called)
self.assertFalse(rebuild_mock.called)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, flavor, None, None)
def _build_request_spec(self, instance):
return {
'instance_properties': {
'uuid': instance['uuid'], },
}
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate.LiveMigrationTask, 'execute')
def _test_migrate_server_deals_with_expected_exceptions(self, ex,
mock_execute, mock_set):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
mock_execute.side_effect = ex
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(type(ex),
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
mock_set.assert_called_once_with(self.context,
inst_obj.uuid,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate.LiveMigrationTask, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
task = self.conductor._build_live_migrate_task(
self.context, inst_obj, 'destination', 'block_migration',
'disk_over_commit', mox.IsA(objects.Migration))
task.execute().AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
inst_obj.uuid,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InvalidCPUInfo,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_expected_exception(self):
exs = [exc.InstanceInvalidState(instance_uuid="fake", attr='',
state='', method=''),
exc.DestinationHypervisorTooOld(),
exc.HypervisorUnavailable(host='dummy'),
exc.LiveMigrationWithOldNovaNotSafe(server='dummy'),
exc.MigrationPreCheckError(reason='dummy'),
exc.InvalidSharedStorage(path='dummy', reason='dummy'),
exc.NoValidHost(reason='dummy'),
exc.ComputeServiceUnavailable(host='dummy'),
exc.InvalidHypervisorType(),
exc.InvalidCPUInfo(reason='dummy'),
exc.UnableToMigrateToSelf(instance_id='dummy', host='dummy'),
exc.InvalidLocalStorage(path='dummy', reason='dummy')]
for ex in exs:
self._test_migrate_server_deals_with_expected_exceptions(ex)
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate.LiveMigrationTask, 'execute')
def test_migrate_server_deals_with_unexpected_exceptions(self,
mock_live_migrate, mock_set_state):
expected_ex = IOError('fake error')
mock_live_migrate.side_effect = expected_ex
instance = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
ex = self.assertRaises(exc.MigrationError,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
mock_set_state.assert_called_once_with(self.context,
instance['uuid'],
'compute_task', 'migrate_server',
dict(vm_state=vm_states.ERROR,
task_state=inst_obj.task_state,
expected_task_state=task_states.MIGRATING,),
expected_ex, request_spec, self.conductor.db)
self.assertEqual(ex.kwargs['reason'], six.text_type(expected_ex))
def test_set_vm_state_and_notify(self):
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 1, 'compute_task', 'method', 'updates',
'ex', 'request_spec', self.conductor.db)
self.mox.ReplayAll()
self.conductor._set_vm_state_and_notify(
self.context, 1, 'method', 'updates', 'ex', 'request_spec')
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(utils, 'get_image_from_system_metadata')
@mock.patch.object(objects.Quotas, 'from_reservations')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
@mock.patch.object(migrate.MigrationTask, 'rollback')
def test_cold_migrate_no_valid_host_back_in_active_state(
self, rollback_mock, notify_mock, select_dest_mock, quotas_mock,
metadata_mock, sig_mock, brs_mock):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst_obj = objects.Instance(
image_ref='fake-image_ref',
instance_type_id=flavor['id'],
vm_state=vm_states.ACTIVE,
system_metadata={},
uuid='fake',
user_id='fake')
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
metadata_mock.return_value = image
brs_mock.return_value = request_spec
exc_info = exc.NoValidHost(reason="")
select_dest_mock.side_effect = exc_info
updates = {'vm_state': vm_states.ACTIVE,
'task_state': None}
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate,
self.context, inst_obj,
flavor, filter_props, [resvs],
clean_shutdown=True)
metadata_mock.assert_called_with({})
brs_mock.assert_called_once_with(self.context, image,
[inst_obj],
instance_type=flavor)
quotas_mock.assert_called_once_with(self.context, [resvs],
instance=inst_obj)
sig_mock.assert_called_once_with(self.context, request_spec,
filter_props)
notify_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates,
exc_info, request_spec)
rollback_mock.assert_called_once_with()
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(utils, 'get_image_from_system_metadata')
@mock.patch.object(objects.Quotas, 'from_reservations')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
@mock.patch.object(migrate.MigrationTask, 'rollback')
def test_cold_migrate_no_valid_host_back_in_stopped_state(
self, rollback_mock, notify_mock, select_dest_mock, quotas_mock,
metadata_mock, sig_mock, brs_mock):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst_obj = objects.Instance(
image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'],
system_metadata={},
uuid='fake',
user_id='fake')
image = 'fake-image'
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict(),
image=image)
filter_props = dict(context=None)
resvs = 'fake-resvs'
metadata_mock.return_value = image
brs_mock.return_value = request_spec
exc_info = exc.NoValidHost(reason="")
select_dest_mock.side_effect = exc_info
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate,
self.context, inst_obj,
flavor, filter_props, [resvs],
clean_shutdown=True)
metadata_mock.assert_called_with({})
brs_mock.assert_called_once_with(self.context, image,
[inst_obj],
instance_type=flavor)
quotas_mock.assert_called_once_with(self.context, [resvs],
instance=inst_obj)
sig_mock.assert_called_once_with(self.context, request_spec,
filter_props)
notify_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates,
exc_info, request_spec)
rollback_mock.assert_called_once_with()
def test_cold_migrate_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst_obj = objects.Instance(
image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'],
system_metadata={},
uuid='fake',
user_id='fake')
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(self.conductor, '_set_vm_state_and_notify'),
mock.patch.object(migrate.MigrationTask,
'execute',
side_effect=exc.NoValidHost(reason="")),
mock.patch.object(migrate.MigrationTask, 'rollback')
) as (image_mock, brs_mock, set_vm_mock, task_execute_mock,
task_rollback_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
self.assertIn('cold migrate', nvh.message)
@mock.patch.object(utils, 'get_image_from_system_metadata')
@mock.patch('nova.scheduler.utils.build_request_spec')
@mock.patch.object(migrate.MigrationTask, 'execute')
@mock.patch.object(migrate.MigrationTask, 'rollback')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_cold_migrate_no_valid_host_in_group(self,
set_vm_mock,
task_rollback_mock,
task_exec_mock,
brs_mock,
image_mock):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst_obj = objects.Instance(
image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'],
system_metadata={},
uuid='fake',
user_id='fake')
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
exception = exc.UnsupportedPolicyException(reason='')
image_mock.return_value = image
brs_mock.return_value = request_spec
task_exec_mock.side_effect = exception
self.assertRaises(exc.UnsupportedPolicyException,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
updates = {'vm_state': vm_states.STOPPED, 'task_state': None}
set_vm_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates,
exception, request_spec)
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(utils, 'get_image_from_system_metadata')
@mock.patch.object(objects.Quotas, 'from_reservations')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
@mock.patch.object(migrate.MigrationTask, 'rollback')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
def test_cold_migrate_exception_host_in_error_state_and_raise(
self, prep_resize_mock, rollback_mock, notify_mock,
select_dest_mock, quotas_mock, metadata_mock, sig_mock, brs_mock):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst_obj = objects.Instance(
image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'],
system_metadata={},
uuid='fake',
user_id='fake')
image = 'fake-image'
request_spec = dict(instance_type=dict(),
instance_properties=dict(),
image=image)
filter_props = dict(context=None)
resvs = 'fake-resvs'
hosts = [dict(host='host1', nodename=None, limits={})]
metadata_mock.return_value = image
brs_mock.return_value = request_spec
exc_info = test.TestingException('something happened')
select_dest_mock.return_value = hosts
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
prep_resize_mock.side_effect = exc_info
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, flavor,
filter_props, [resvs],
clean_shutdown=True)
metadata_mock.assert_called_with({})
brs_mock.assert_called_once_with(self.context, image,
[inst_obj],
instance_type=flavor)
quotas_mock.assert_called_once_with(self.context, [resvs],
instance=inst_obj)
sig_mock.assert_called_once_with(self.context, request_spec,
filter_props)
select_dest_mock.assert_called_once_with(
self.context, request_spec, filter_props)
prep_resize_mock.assert_called_once_with(
self.context, image, inst_obj, flavor,
hosts[0]['host'], [resvs],
request_spec=request_spec,
filter_properties=filter_props,
node=hosts[0]['nodename'], clean_shutdown=True)
notify_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates,
exc_info, request_spec)
rollback_mock.assert_called_once_with()
def test_resize_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
flavor_new = flavors.get_flavor_by_name('m1.small')
inst_obj = objects.Instance(
image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'],
system_metadata={},
uuid='fake',
user_id='fake')
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(self.conductor, '_set_vm_state_and_notify'),
mock.patch.object(migrate.MigrationTask,
'execute',
side_effect=exc.NoValidHost(reason="")),
mock.patch.object(migrate.MigrationTask, 'rollback')
) as (image_mock, brs_mock, vm_st_mock, task_execute_mock,
task_rb_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor_new, filter_props,
[resvs], clean_shutdown=True)
self.assertIn('resize', nvh.message)
def test_build_instances_instance_not_found(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
self.mox.StubOutWithMock(instances[0], 'refresh')
self.mox.StubOutWithMock(instances[1], 'refresh')
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
instances[0].refresh().AndRaise(
exc.InstanceNotFound(instance_id=instances[0].uuid))
instances[1].refresh()
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context, instance=instances[1], host='host2',
image={'fake-data': 'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_utils, 'build_request_spec')
def test_build_instances_info_cache_not_found(self, build_request_spec,
setup_instance_group):
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
spec = {'fake': 'specs',
'instance_properties': instances[0]}
build_request_spec.return_value = spec
with contextlib.nested(
mock.patch.object(instances[0], 'refresh',
side_effect=exc.InstanceInfoCacheNotFound(
instance_uuid=instances[0].uuid)),
mock.patch.object(instances[1], 'refresh'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations', return_value=destinations),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
) as (inst1_refresh, inst2_refresh, select_destinations,
build_and_run_instance):
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
# NOTE(sbauza): Due to populate_retry() later in the code,
# filter_properties is dynamically modified
setup_instance_group.assert_called_once_with(
self.context, spec, {'retry': {'num_attempts': 1,
'hosts': []}})
build_and_run_instance.assert_called_once_with(self.context,
instance=instances[1], host='host2', image={'fake-data':
'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=[])
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
"""Conductor compute_task RPC namespace Tests."""
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
"""Conductor LocalComputeTaskAPI Tests."""
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
class ConductorV3ManagerProxyTestCase(test.NoDBTestCase):
def test_v3_manager_proxy(self):
manager = conductor_manager.ConductorManager()
proxy = conductor_manager._ConductorManagerV3Proxy(manager)
ctxt = context.get_admin_context()
methods = [
# (method, number_of_args)
('provider_fw_rule_get_all', 0),
('object_class_action_versions', 5),
('object_action', 4),
('object_backport_versions', 2),
]
for method, num_args in methods:
args = range(num_args)
with mock.patch.object(manager, method) as mock_method:
getattr(proxy, method)(ctxt, *args)
mock_method.assert_called_once_with(ctxt, *args)
| 46.797699
| 79
| 0.591703
|
4a160103739de1c0fc46a0f7a7e267576486bd2f
| 4,626
|
py
|
Python
|
hendaza_custom_site/hooks.py
|
MostafaFekry/hendaza_custom_site
|
6c39145b004935e8b0c6174cf7769e72ecddd4dd
|
[
"MIT"
] | null | null | null |
hendaza_custom_site/hooks.py
|
MostafaFekry/hendaza_custom_site
|
6c39145b004935e8b0c6174cf7769e72ecddd4dd
|
[
"MIT"
] | null | null | null |
hendaza_custom_site/hooks.py
|
MostafaFekry/hendaza_custom_site
|
6c39145b004935e8b0c6174cf7769e72ecddd4dd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "hendaza_custom_site"
app_title = "Hendaza Custom Site"
app_publisher = "MostafaFekry"
app_description = "Custom site for Hendaza"
app_icon = "octicon octicon-globe"
app_color = "grey"
app_email = "mostafa.fekry@gmail.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/hendaza_custom_site/css/hendaza_custom_site.css"
# app_include_js = "/assets/hendaza_custom_site/js/hendaza_custom_site.js"
# include js, css files in header of web template
# web_include_css = "/assets/hendaza_custom_site/css/hendaza_custom_site.css"
# web_include_js = "/assets/hendaza_custom_site/js/hendaza_custom_site.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "hendaza_custom_site.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# website
update_website_context = "hendaza_custom_site.utils.update_website_context"
# before_install = "hendaza_custom_site.install.before_install"
# after_install = "hendaza_custom_site.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "hendaza_custom_site.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "hendaza_custom_site.tasks.all"
# ],
# "daily": [
# "hendaza_custom_site.tasks.daily"
# ],
# "hourly": [
# "hendaza_custom_site.tasks.hourly"
# ],
# "weekly": [
# "hendaza_custom_site.tasks.weekly"
# ]
# "monthly": [
# "hendaza_custom_site.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "hendaza_custom_site.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "hendaza_custom_site.event.get_events"
# }
# fixtures
fixtures = [{"dt": "Custom Field", "filters": [["name", "in", [
"Company History-old_year",
"Company History-title",
"Company History-column_break_4",
"Company History-image",
"About Us Team Member-position",
"Item Group-light_description",
"Item Group-page_header_background",
"Item Group-column_break_13",
"Item Group-territory",
"Item Group-unit_usage",
"Item Group-google_maps",
"Item Group-containssb",
"Item Group-website_share_files",
"Item Website Specification-property_features",
"Item Website Specification-icon",
"Item-light_description",
"Item-page_header_background",
"Item-territory",
"Item-unit_usage",
"Item-google_maps",
"Item-website_share_files",
"Item Attribute-column_break_2",
"Item Attribute-icon",
"Web Page-light_description",
"Web Page-page_header_background",
"Website Slideshow Item-slider_description",
"Website Slideshow Item-heading_title",
"Website Slideshow Item-column_break_5",
"Website Slideshow Item-link_title",
"Website Slideshow Item-link_path",
"Website Slideshow Item-link_target",
"Website Slideshow Item-set_position"
]]]},
{"dt": "Language", "filters": [["name", "in", [
"en",
"ar"
]]]},
{"dt": "Website Languages", "filters": [["name", "in", [
"en",
"ar"
]]]},
{"dt": "Top Bar Item"},
{"dt": "Coming Soon Settings"},
{"dt": "Website About Us Settings"},
{"dt": "Website Contact Us Settings"},
{"dt": "Unit Usage"},
{"dt": "Property Features"}
]
| 26.284091
| 87
| 0.688067
|
4a160136f9b7cd3bc1045bf3cb026d28ce825d94
| 6,686
|
py
|
Python
|
tests/core_test.py
|
wendazhou/jax
|
d7894198a1ad0e54de42450c27ad5e715cb59aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-06-13T20:51:49.000Z
|
2021-06-14T02:37:06.000Z
|
tests/core_test.py
|
wendazhou/jax
|
d7894198a1ad0e54de42450c27ad5e715cb59aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/core_test.py
|
wendazhou/jax
|
d7894198a1ad0e54de42450c27ad5e715cb59aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-03-14T10:07:22.000Z
|
2019-03-14T10:07:22.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
from collections import namedtuple
from unittest import skip
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
from jax import api
from jax import core
from jax import numpy as np
from jax import test_util as jtu
from jax.api import jvp, linearize, vjp, jit
from jax.lax import UnshapedArray, ShapedArray, ConcreteArray
from jax.tree_util import tree_flatten, tree_unflatten, tree_multimap, tree_reduce
from jax.util import partial
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
from jax.config import config
config.parse_flags_with_absl()
_ = pe.PartialVal((UnshapedArray(onp.float32), core.unit))
__ = pe.PartialVal((ShapedArray((), onp.float32), core.unit))
def call(f, *args):
return jit(f)(*args)
def simple_fun(x, y):
return np.sin(x * y)
def simple_fun_fanout(x, y):
return np.sin(x * y) * x
def fun_with_call(x):
return call(np.sin, x)
def fun_with_nested_calls(x):
def f(y):
y2 = np.sin(y) + 1.0 + (2.0 * x)
@jit
def g(z):
return y2 * z * x + (x * y)
return call(g, y)
return call(f, x)
def error(*args):
def f(*args):
assert False
return f
def fun_with_nested_calls_2(x):
def bar(y):
def baz(w):
q = call(lambda x: y, x)
q = q + call(lambda: y)
q = q + call(lambda y: w + y, y)
q = call(lambda w: call(np.sin, x) * y, 1.0) + q
return q
p, t = jvp(baz, (x + 1.0,), (y,))
return t + (x * p)
return call(bar, x)
def fun_call_jitted(x):
@jit
def g(z):
return x * z
return call(g, x)
def fun_with_two_calls(x):
return call(np.sin, x) + call(np.cos, x)
def fun_with_call_closure(x):
def foo(y, z):
return (x * x) * np.sin(y) * z
return call(foo, x, np.cos(x)) + x
def product_io_fun(x, y):
xa = x['a']
xb = x['b']
y1, (y2, y3) = y
return np.sin(xa + y2), [xb, (y1, y3)]
R = onp.random.randn
TestSpec = namedtuple('TestSpec', ['fun', 'args'])
test_specs_base = [
TestSpec(simple_fun, (R(3, 2), R(3, 2))),
TestSpec(simple_fun_fanout, (R(3, 2), R(3, 2))),
TestSpec(product_io_fun, ({'a': R(2, 2), 'b': R(2, 2)},
(R(2, 2), (R(2, 2), R(2, 2))))),
TestSpec(fun_with_call, (R(3, 2),)),
TestSpec(fun_with_two_calls, (R(3, 2),)),
TestSpec(fun_with_call_closure, (R(3, 2),)),
TestSpec(fun_call_jitted, (R(1,),)),
TestSpec(fun_with_nested_calls, (R(),)),
TestSpec(fun_with_nested_calls, (R(3, 2),)),
TestSpec(fun_with_nested_calls_2, (R(1, 2),)),
]
def jvp_unlinearized(f, primals, tangents):
out, jvp = linearize(f, *primals)
return out, jvp(*tangents)
test_specs = []
for ts in test_specs_base:
test_specs.append(ts)
test_specs.append(TestSpec(partial(jvp, ts.fun), (ts.args, ts.args)))
test_specs.append(TestSpec(jit(ts.fun), ts.args))
test_specs.append(TestSpec(jit(jit(ts.fun)), ts.args))
test_specs.append(TestSpec(partial(jvp_unlinearized, ts.fun),
(ts.args, ts.args)))
def fwd_deriv(f):
def df(x):
return jvp(f, (x,), (1.0,))[1]
return df
class CoreTest(jtu.JaxTestCase):
def test_tree_multimap(self):
xs = ({'a': 1}, [2, 3])
ys = ({'a': 10}, [20, 30])
ys_bad = ({'a': 10, 'b': 10}, [20, 30])
zs = ({'a': 11}, [22, 33])
f = lambda x, y: x + y
assert tree_multimap(f, xs, ys) == zs
try:
tree_multimap(f, xs, ys_bad)
assert False
except (TypeError, ValueError):
pass
def test_tree_flatten(self):
flat, _ = tree_flatten(({'a': 1}, [2, 3], 4))
assert flat == [1, 2, 3, 4]
def test_tree_unflatten(self):
tree = [(1, 2), {"roy": (3, [4, 5, ()])}]
flat, treedef = tree_flatten(tree)
assert flat == [1, 2, 3, 4, 5]
tree2 = tree_unflatten(treedef, flat)
nodes_equal = tree_multimap(operator.eq, tree, tree2)
assert tree_reduce(operator.and_, nodes_equal)
@parameterized.parameters(test_specs)
def test_jit(self, f, args):
jtu.check_eq(jit(f)(*args), f(*args))
@parameterized.parameters(test_specs)
def test_jvp(self, f, args):
jtu.check_jvp(f, partial(jvp, f), args)
def test_jvp_zeros(self):
def foo(x):
def bar(y):
return np.sin(x * y)
return jvp(bar, (3 * x,), (2 * x,))
jtu.check_eq(jit(foo)(0.5), foo(0.5))
@parameterized.parameters(test_specs)
def test_jvp_linearized(self, f, args):
jtu.check_jvp(f, partial(jvp_unlinearized, f), args)
@parameterized.parameters(test_specs)
def test_vjp(self, f, args):
jtu.check_vjp(f, partial(vjp, f), args)
def test_jvp_closure(self):
def foo(x):
def bar(y):
return np.multiply(x, y)
return jvp(bar, (3.0,), (1.0,))[1]
ans = jvp(foo, (1.0,), (2.0,))
assert ans == (1.0, 2.0), ans
def test_jit_closure(self):
def foo(x):
@jit
def bar(y):
return x + y
return bar(0.0)
assert jvp(foo, (1.0,), (2.0,)) == (1.0, 2.0)
def test_simple_jit(self):
def foo(x):
if x.shape == ():
return x + 1.
else:
return x + 2.
foo2 = jit(foo)
foo3 = jit(foo2)
x1, y1 = onp.array(1.0), onp.array(2.0)
assert foo(x1) == y1
assert foo2(x1) == y1
assert foo3(x1) == y1
x2, y2 = onp.array([1.0, 2.0]), onp.array([3.0, 4.0])
assert onp.all(foo(x2) == y2)
assert onp.all(foo2(x2) == y2)
assert onp.all(foo3(x2) == y2)
def test_product_jit(self):
def foo(x, tup):
y, z = tup
w = x + z
return (w, {'x': y}), z
foo2 = jit(foo)
foo3 = jit(foo2)
args = (1.0, (2.0, 3.0))
expected_output = ((4.0, {'x': 2.0}), 3.0)
assert foo(*args) == expected_output
assert foo2(*args) == expected_output
assert foo3(*args) == foo(*args)
def test_jvp_2(self):
d_sin = fwd_deriv(np.sin)
d2_sin = fwd_deriv(d_sin)
d3_sin = fwd_deriv(d2_sin)
assert d_sin(0.0) == 1.0
assert d2_sin(0.0) == 0.0
assert d3_sin(0.0) == -1.0
if __name__ == '__main__':
absltest.main()
| 25.616858
| 82
| 0.616961
|
4a16025e5cd7c2112db297d1d6c67aba1d3e6d50
| 12,541
|
py
|
Python
|
golos/utils.py
|
Chainers/golos-python
|
7c06a933256c7ca0c52d4348526d3712ac00e7ab
|
[
"MIT"
] | 1
|
2018-04-11T15:44:21.000Z
|
2018-04-11T15:44:21.000Z
|
golos/utils.py
|
Chainers/steep-golos
|
7c06a933256c7ca0c52d4348526d3712ac00e7ab
|
[
"MIT"
] | null | null | null |
golos/utils.py
|
Chainers/steep-golos
|
7c06a933256c7ca0c52d4348526d3712ac00e7ab
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import logging
import os
import re
import time
from datetime import datetime
from json import JSONDecodeError
from math import log10
from urllib.parse import urlparse
import w3lib.url
from langdetect import DetectorFactory, detect
from langdetect.lang_detect_exception import LangDetectException
from toolz import update_in, assoc
logger = logging.getLogger(__name__)
# https://github.com/matiasb/python-unidiff/blob/master/unidiff/constants.py#L37
# @@ (source offset, length) (target offset, length) @@ (section header)
RE_HUNK_HEADER = re.compile(
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))?\ @@[ ]?(.*)$",
flags=re.MULTILINE)
# ensure deterministec language detection
DetectorFactory.seed = 0
MIN_TEXT_LENGTH_FOR_DETECTION = 20
epoch = datetime(1970, 1, 1)
rus_d = {
'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd',
'е': 'e', 'ё': 'yo', 'ж': 'zh', 'з': 'z', 'и': 'i',
'й': 'ij', 'к': 'k', 'л': 'l', 'м': 'm', 'н': 'n',
'о': 'o', 'п': 'p', 'р': 'r', 'с': 's', 'т': 't',
'у': 'u', 'ф': 'f', 'х': 'kh', 'ц': 'cz', 'ч': 'ch',
'ш': 'sh', 'щ': 'shch', 'ъ': 'xx', 'ы': 'y', 'ь': 'x',
'э': 'ye', 'ю': 'yu', 'я': 'ya',
'А': "A", 'Б': "B", 'В': "V", 'Г': "G", 'Д': "D",
'Е': "E", 'Ё': "yo", 'Ж': "ZH", 'З': "Z", 'И': "I",
'Й': "IJ", 'К': "K", 'Л': "L", 'М': "M", 'Н': "N",
'О': "O", 'П': "P", 'Р': "R", 'С': "S", 'Т': "T",
'У': "U", 'Ф': "F", 'Х': "KH", 'Ц': "CZ", 'Ч': "CH",
'Ш': "SH", 'Щ': "SHCH", 'Ъ': "XX", 'Ы': "Y", 'Ь': "X",
'Э': "YE", 'Ю': "YU", 'Я': "YA",
}
def block_num_from_hash(block_hash: str) -> int:
"""
return the first 4 bytes (8 hex digits) of the block ID (the block_num)
Args:
block_hash (str):
Returns:
int:
"""
return int(str(block_hash)[:8], base=16)
def block_num_from_previous(previous_block_hash: str) -> int:
"""
Args:
previous_block_hash (str):
Returns:
int:
"""
return block_num_from_hash(previous_block_hash) + 1
def chunkify(iterable, chunksize=10000):
"""Yield successive chunksized chunks from iterable.
Args:
iterable:
chunksize: (Default value = 10000)
Returns:
"""
i = 0
chunk = []
for item in iterable:
chunk.append(item)
i += 1
if i == chunksize:
yield chunk
i = 0
chunk = []
if len(chunk) > 0:
yield chunk
def ensure_decoded(thing):
if not thing:
logger.debug('ensure_decoded thing is logically False')
return None
if isinstance(thing, (list, dict)):
logger.debug('ensure_decoded thing is already decoded')
return thing
single_encoded_dict = double_encoded_dict = None
try:
single_encoded_dict = json.loads(thing)
if isinstance(single_encoded_dict, dict):
logger.debug('ensure_decoded thing is single encoded dict')
return single_encoded_dict
elif isinstance(single_encoded_dict, str):
logger.debug('ensure_decoded thing is single encoded str')
if single_encoded_dict == "":
logger.debug(
'ensure_decoded thing is single encoded str == ""')
return None
else:
double_encoded_dict = json.loads(single_encoded_dict)
logger.debug('ensure_decoded thing is double encoded')
return double_encoded_dict
except Exception as e:
extra = dict(
thing=thing,
single_encoded_dict=single_encoded_dict,
double_encoded_dict=double_encoded_dict,
error=e)
logger.error('ensure_decoded error', extra=extra)
return None
def findkeys(node, kv):
if isinstance(node, list):
for i in node:
for x in findkeys(i, kv):
yield x
elif isinstance(node, dict):
if kv in node:
yield node[kv]
for j in node.values():
for x in findkeys(j, kv):
yield x
def extract_keys_from_meta(meta, keys):
if isinstance(keys, str):
keys = list([keys])
extracted = []
for key in keys:
for item in findkeys(meta, key):
if isinstance(item, str):
extracted.append(item)
elif isinstance(item, (list, tuple)):
extracted.extend(item)
else:
logger.warning('unusual item in meta: %s', item)
return extracted
def build_comment_url(parent_permlink=None, author=None, permlink=None):
return '/'.join([parent_permlink, author, permlink])
def canonicalize_url(url, **kwargs):
try:
canonical_url = w3lib.url.canonicalize_url(url, **kwargs)
except Exception as e:
logger.warning('url preparation error', extra=dict(url=url, error=e))
return None
if canonical_url != url:
logger.debug('canonical_url changed %s to %s', url, canonical_url)
try:
parsed_url = urlparse(canonical_url)
if not parsed_url.scheme and not parsed_url.netloc:
_log = dict(
url=url, canonical_url=canonical_url, parsed_url=parsed_url)
logger.warning('bad url encountered', extra=_log)
return None
except Exception as e:
logger.warning('url parse error', extra=dict(url=url, error=e))
return None
return canonical_url
def findall_patch_hunks(body=None):
return RE_HUNK_HEADER.findall(body)
def detect_language(text):
if not text or len(text) < MIN_TEXT_LENGTH_FOR_DETECTION:
logger.debug('not enough text to perform langdetect')
return None
try:
return detect(text)
except LangDetectException as e:
logger.warning(e)
return None
def is_comment(item):
"""Quick check whether an item is a comment (reply) to another post.
The item can be a Post object or just a raw comment object from the blockchain.
"""
return item['permlink'][:3] == "re-" and item['parent_author']
def time_elapsed(posting_time):
"""Takes a string time from a post or blockchain event, and returns a time delta from now.
"""
if type(posting_time) == str:
posting_time = parse_time(posting_time)
return datetime.utcnow() - posting_time
def parse_time(block_time):
"""Take a string representation of time from the blockchain, and parse it into datetime object.
"""
return datetime.strptime(block_time, '%Y-%m-%dT%H:%M:%S')
def time_diff(time1, time2):
return parse_time(time1) - parse_time(time2)
def keep_in_dict(obj, allowed_keys=list()):
""" Prune a class or dictionary of all but allowed keys.
"""
if type(obj) == dict:
items = obj.items()
else:
items = obj.__dict__.items()
return {k: v for k, v in items if k in allowed_keys}
def remove_from_dict(obj, remove_keys=list()):
""" Prune a class or dictionary of specified keys.
"""
if type(obj) == dict:
items = obj.items()
else:
items = obj.__dict__.items()
return {k: v for k, v in items if k not in remove_keys}
def construct_identifier(*args, username_prefix='@'):
""" Create a post identifier from comment/post object or arguments.
Examples:
::
construct_identifier('username', 'permlink')
construct_identifier({'author': 'username', 'permlink': 'permlink'})
"""
if len(args) == 1:
op = args[0]
author, permlink = op['author'], op['permlink']
elif len(args) == 2:
author, permlink = args
else:
raise ValueError('construct_identifier() received unparsable arguments')
fields = dict(prefix=username_prefix, author=author, permlink=permlink)
return "{prefix}{author}/{permlink}".format(**fields)
def json_expand(json_op, key_name='json'):
""" Convert a string json object to Python dict in an op. """
if type(json_op) == dict and key_name in json_op and json_op[key_name]:
try:
return update_in(json_op, [key_name], json.loads)
except JSONDecodeError:
return assoc(json_op, key_name, {})
return json_op
def sanitize_permlink(permlink):
permlink = permlink.strip()
permlink = re.sub("_|\s|\.", "-", permlink)
permlink = re.sub("[^\w-]", "", permlink)
pattern = re.compile('|'.join(rus_d.keys()))
permlink = pattern.sub(lambda x: rus_d[x.group()], permlink)
permlink = re.sub("[^a-zA-Z0-9-]", "", permlink)
permlink = permlink.lower()
return permlink
def sanitize_permlink_category(permlink):
permlink = permlink.strip()
permlink = re.sub("_|\s|\.", "-", permlink)
permlink = re.sub("[^\w-]", "", permlink)
pattern = re.compile('|'.join(rus_d.keys()))
new_permlink = pattern.sub(lambda x: rus_d[x.group()], permlink)
if new_permlink != permlink:
permlink = 'ru--%s' % new_permlink
permlink = re.sub("[^a-zA-Z0-9-]", "", permlink)
permlink = permlink.lower()
return permlink
def derive_permlink(title, parent_permlink=None):
permlink = ""
if parent_permlink:
permlink += "re-"
permlink += parent_permlink
permlink += "-" + fmt_time(time.time())
else:
permlink += title
return sanitize_permlink(permlink)
def derive_permlink_category(title, parent_permlink=None):
permlink = ""
if parent_permlink:
permlink += "re-"
permlink += parent_permlink
permlink += "-" + datetime.utcfromtimestamp(time.time()).strftime("%Y%m%dt%H%M%S%Z")
else:
permlink += title
return sanitize_permlink_category(permlink)
def resolve_identifier(identifier):
match = re.match("@?([\w\-\.]*)/([\w\-]*)", identifier)
if not hasattr(match, "group"):
raise ValueError("Invalid identifier")
return match.group(1), match.group(2)
def fmt_time(t):
""" Properly Format Time for permlinks
"""
return datetime.utcfromtimestamp(t).strftime("%Y%m%dt%H%M%S%Z")
def fmt_time_string(t):
""" Properly Format Time for permlinks
"""
return datetime.strptime(t, '%Y-%m-%dT%H:%M:%S')
def fmt_time_from_now(secs=0):
""" Properly Format Time that is `x` seconds in the future
:param int secs: Seconds to go in the future (`x>0`) or the
past (`x<0`)
:return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`)
:rtype: str
"""
return datetime.utcfromtimestamp(time.time() + int(secs)).strftime('%Y-%m-%dT%H:%M:%S')
def env_unlocked():
""" Check if wallet password is provided as ENV variable. """
return os.getenv('UNLOCK', False)
# todo remove these
def strfage(time, fmt=None):
""" Format time/age
"""
if not hasattr(time, "days"): # dirty hack
now = datetime.utcnow()
if isinstance(time, str):
time = datetime.strptime(time, '%Y-%m-%dT%H:%M:%S')
time = (now - time)
d = {"days": time.days}
d["hours"], rem = divmod(time.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
s = "{seconds} seconds"
if d["minutes"]:
s = "{minutes} minutes " + s
if d["hours"]:
s = "{hours} hours " + s
if d["days"]:
s = "{days} days " + s
return s.format(**d)
def strfdelta(tdelta, fmt):
""" Format time/age
"""
if not tdelta or not hasattr(tdelta, "days"): # dirty hack
return None
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
def is_valid_account_name(name):
return re.match('^[a-z][a-z0-9\-.]{2,15}$', name)
def epoch_seconds(date: datetime):
return (date - epoch).total_seconds()
def calculate_score(S: int, T: int, score: int, created_tm: datetime):
# implemented libraries/plugins/tags/tags_plugin.cpp from Node sources, method calculate_score
if isinstance(score, str):
try:
score = int(score)
except ValueError:
score = 0
mod_score = score / S
order = log10(max(abs(mod_score), 1))
sign = 1 if mod_score > 0 else -1 if mod_score < 0 else 0
return sign * order + epoch_seconds(created_tm) / T
def calculate_hot(score: int, created_tm: datetime):
return calculate_score(10000000, 10000, score, created_tm)
def calculate_trending(score: int, created_tm: datetime):
return calculate_score(10000000, 480000, score, created_tm)
| 29.438967
| 99
| 0.597879
|
4a1602ade40568248d2086c08d7611531250080c
| 846
|
py
|
Python
|
leetcode/medium/Trees/BinTreeFromPre&Inorder.py
|
cheshtaaagarrwal/DS-Algos
|
d64f07355a0ea4342e868a359f34be28c183f8ff
|
[
"MIT"
] | null | null | null |
leetcode/medium/Trees/BinTreeFromPre&Inorder.py
|
cheshtaaagarrwal/DS-Algos
|
d64f07355a0ea4342e868a359f34be28c183f8ff
|
[
"MIT"
] | null | null | null |
leetcode/medium/Trees/BinTreeFromPre&Inorder.py
|
cheshtaaagarrwal/DS-Algos
|
d64f07355a0ea4342e868a359f34be28c183f8ff
|
[
"MIT"
] | 1
|
2021-10-11T23:11:55.000Z
|
2021-10-11T23:11:55.000Z
|
# Given preorder and inorder traversal of a tree, construct the binary tree.
# Note:
# You may assume that duplicates do not exist in the tree.
# For example, given
# preorder = [3,9,20,15,7]
# inorder = [9,3,15,20,7]
# Return the following binary tree:
# 3
# / \
# 9 20
# / \
# 15 7
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, preorder: 'list[int]', inorder: 'list[int]') -> TreeNode:
if inorder:
rootval = preorder.pop(0)
indexOfRoot = inorder.index(rootval)
root = TreeNode(rootval)
root.left = self.buildTree(preorder, inorder[:indexOfRoot])
root.right = self.buildTree(preorder, inorder[(indexOfRoot + 1):])
return root
| 22.864865
| 81
| 0.591017
|
4a16032bd9e701fdf452266a77a991d4788d993e
| 257
|
py
|
Python
|
twist_plotter/utils/math_tools.py
|
alexemm/DataMonitor
|
6560ac38a1cee4a324e521c6d529b9a09f41ade9
|
[
"MIT"
] | null | null | null |
twist_plotter/utils/math_tools.py
|
alexemm/DataMonitor
|
6560ac38a1cee4a324e521c6d529b9a09f41ade9
|
[
"MIT"
] | null | null | null |
twist_plotter/utils/math_tools.py
|
alexemm/DataMonitor
|
6560ac38a1cee4a324e521c6d529b9a09f41ade9
|
[
"MIT"
] | null | null | null |
from Equation import Expression
import numpy as np
def text2lambda(text):
return Expression(text, 'x')
def create_data_from_function(f, begin, end, steps=69):
x = np.linspace(begin, end, steps)
y = np.array(list(map(f, x)))
return x, y
| 18.357143
| 55
| 0.680934
|
4a16033401d9d62994a137225ad1681f90724f01
| 13,099
|
py
|
Python
|
sopel/modules/admin.py
|
torstehu/sopel
|
514d6b8c70b5853d96dd2093d87eecfd8b176c62
|
[
"EFL-2.0"
] | null | null | null |
sopel/modules/admin.py
|
torstehu/sopel
|
514d6b8c70b5853d96dd2093d87eecfd8b176c62
|
[
"EFL-2.0"
] | null | null | null |
sopel/modules/admin.py
|
torstehu/sopel
|
514d6b8c70b5853d96dd2093d87eecfd8b176c62
|
[
"EFL-2.0"
] | null | null | null |
# coding=utf-8
"""
admin.py - Sopel Admin Module
Copyright 2010-2011, Sean B. Palmer (inamidst.com) and Michael Yanovich
(yanovich.net)
Copyright © 2012, Elad Alfassa, <elad@fedoraproject.org>
Copyright 2013, Ari Koivula <ari@koivu.la>
Copyright 2019, Florian Strzelecki, https://github.com/Exirel
Licensed under the Eiffel Forum License 2.
https://sopel.chat
"""
from __future__ import unicode_literals, absolute_import, print_function, division
from sopel.config.types import (
StaticSection, ValidatedAttribute, FilenameAttribute
)
import sopel.module
class AdminSection(StaticSection):
hold_ground = ValidatedAttribute('hold_ground', bool, default=False)
"""Auto re-join on kick"""
auto_accept_invite = ValidatedAttribute('auto_accept_invite', bool,
default=True)
"""Auto-join channels when invited"""
def configure(config):
"""
| name | example | purpose |
| ---- | ------- | ------- |
| hold\\_ground | False | Auto-rejoin the channel after being kicked. |
| auto\\_accept\\_invite | True | Auto-join channels when invited. |
"""
config.define_section('admin', AdminSection)
config.admin.configure_setting('hold_ground',
"Automatically re-join after being kicked?")
config.admin.configure_setting('auto_accept_invite',
'Automatically join channels when invited?')
def setup(bot):
bot.config.define_section('admin', AdminSection)
class InvalidSection(Exception):
def __init__(self, section):
super(InvalidSection, self).__init__(self, 'Section [{}] does not exist.'.format(section))
self.section = section
class InvalidSectionOption(Exception):
def __init__(self, section, option):
super(InvalidSectionOption, self).__init__(self, 'Section [{}] does not have option \'{}\'.'.format(section, option))
self.section = section
self.option = option
def _get_config_channels(channels):
"""List"""
for channel_info in channels:
if ' ' in channel_info:
yield channel_info.split(' ', 1)
else:
yield (channel_info, None)
def _set_config_channels(bot, channels):
bot.config.core.channels = [
' '.join([part for part in items if part])
for items in channels.items()
]
bot.config.save()
def _join(bot, channel, key=None, save=True):
if not channel:
return
if not key:
bot.join(channel)
else:
bot.join(channel, key)
if save:
channels = dict(_get_config_channels(bot.config.core.channels))
# save only if channel is new or key has been changed
if channel not in channels or channels[channel] != key:
channels[channel] = key
_set_config_channels(bot, channels)
def _part(bot, channel, msg=None, save=True):
bot.part(channel, msg or None)
if save:
channels = dict(_get_config_channels(bot.config.core.channels))
if channel in channels:
del channels[channel]
_set_config_channels(bot, channels)
@sopel.module.require_privmsg
@sopel.module.require_admin
@sopel.module.commands('join')
@sopel.module.priority('low')
@sopel.module.example('.join #example key', user_help=True)
@sopel.module.example('.join #example', user_help=True)
def join(bot, trigger):
"""Join the specified channel. This is an admin-only command."""
channel, key = trigger.group(3), trigger.group(4)
_join(bot, channel, key)
@sopel.module.require_privmsg
@sopel.module.require_admin
@sopel.module.commands('tmpjoin')
@sopel.module.priority('low')
@sopel.module.example('.tmpjoin #example or .tmpjoin #example key')
def temporary_join(bot, trigger):
"""Like ``join``, without saving. This is an admin-only command.
Unlike the ``join`` command, ``tmpjoin`` won't remember the channel upon
restarting the bot.
"""
channel, key = trigger.group(3), trigger.group(4)
_join(bot, channel, key, save=False)
@sopel.module.require_privmsg
@sopel.module.require_admin
@sopel.module.commands('part')
@sopel.module.priority('low')
@sopel.module.example('.part #example')
def part(bot, trigger):
"""Part the specified channel. This is an admin-only command."""
channel, _sep, part_msg = trigger.group(2).partition(' ')
_part(bot, channel, part_msg)
@sopel.module.require_privmsg
@sopel.module.require_admin
@sopel.module.commands('tmppart')
@sopel.module.priority('low')
@sopel.module.example('.tmppart #example')
def temporary_part(bot, trigger):
"""Like ``part``, without saving. This is an admin-only command.
Unlike the ``part`` command, ``tmppart`` will rejoin the channel upon
restarting the bot.
"""
channel, _sep, part_msg = trigger.group(2).partition(' ')
_part(bot, channel, part_msg, save=False)
@sopel.module.require_privmsg
@sopel.module.require_owner
@sopel.module.commands('restart')
@sopel.module.priority('low')
def restart(bot, trigger):
"""Restart the bot. This is an owner-only command."""
quit_message = trigger.group(2)
if not quit_message:
quit_message = 'Restart on command from %s' % trigger.nick
bot.restart(quit_message)
@sopel.module.require_privmsg
@sopel.module.require_owner
@sopel.module.commands('quit')
@sopel.module.priority('low')
def quit(bot, trigger):
"""Quit from the server. This is an owner-only command."""
quit_message = trigger.group(2)
if not quit_message:
quit_message = 'Quitting on command from %s' % trigger.nick
bot.quit(quit_message)
@sopel.module.require_privmsg
@sopel.module.require_admin
@sopel.module.commands('say', 'msg')
@sopel.module.priority('low')
@sopel.module.example('.say #YourPants Does anyone else smell neurotoxin?')
def say(bot, trigger):
"""
Send a message to a given channel or nick. Can only be done in privmsg by
an admin.
"""
if trigger.group(2) is None:
return
channel, _sep, message = trigger.group(2).partition(' ')
message = message.strip()
if not channel or not message:
return
bot.say(message, channel)
@sopel.module.require_privmsg
@sopel.module.require_admin
@sopel.module.commands('me')
@sopel.module.priority('low')
def me(bot, trigger):
"""
Send an ACTION (/me) to a given channel or nick. Can only be done in
privmsg by an admin.
"""
if trigger.group(2) is None:
return
channel, _sep, action = trigger.group(2).partition(' ')
action = action.strip()
if not channel or not action:
return
bot.action(action, channel)
@sopel.module.event('INVITE')
@sopel.module.priority('low')
def invite_join(bot, trigger):
"""Join a channel Sopel is invited to, if the inviter is an admin."""
if trigger.admin or bot.config.admin.auto_accept_invite:
bot.join(trigger.args[1])
return
@sopel.module.event('KICK')
@sopel.module.priority('low')
def hold_ground(bot, trigger):
"""
This function monitors all kicks across all channels Sopel is in. If it
detects that it is the one kicked it'll automatically join that channel.
WARNING: This may not be needed and could cause problems if Sopel becomes
annoying. Please use this with caution.
"""
if bot.config.admin.hold_ground:
channel = trigger.sender
if trigger.args[1] == bot.nick:
bot.join(channel)
@sopel.module.require_privmsg
@sopel.module.require_admin
@sopel.module.commands('mode')
@sopel.module.priority('low')
def mode(bot, trigger):
"""Set a user mode on Sopel. Can only be done in privmsg by an admin."""
mode = trigger.group(3)
bot.write(('MODE', bot.nick + ' ' + mode))
def parse_section_option_value(config, trigger):
"""Parse trigger for set/unset to get relevant config elements.
:param config: Sopel's config
:param trigger: IRC line trigger
:return: A tuple with ``(section, section_name, static_sec, option, value)``
:raises InvalidSection: section does not exist
:raises InvalidSectionOption: option does not exist for section
The ``value`` is optional and can be returned as ``None`` if omitted from command.
"""
match = trigger.group(3)
if match is None:
raise ValueError # Invalid command
# Get section and option from first argument.
arg1 = match.split('.')
if len(arg1) == 1:
section_name, option = "core", arg1[0]
elif len(arg1) == 2:
section_name, option = arg1
else:
raise ValueError # invalid command format
section = getattr(config, section_name, False)
if not section:
raise InvalidSection(section_name)
static_sec = isinstance(section, StaticSection)
if static_sec and not hasattr(section, option):
raise InvalidSectionOption(section_name, option) # Option not found in section
if not static_sec and not config.parser.has_option(section_name, option):
raise InvalidSectionOption(section_name, option) # Option not found in section
delim = trigger.group(2).find(' ')
# Skip preceding whitespaces, if any.
while delim > 0 and delim < len(trigger.group(2)) and trigger.group(2)[delim] == ' ':
delim = delim + 1
value = trigger.group(2)[delim:]
if delim == -1 or delim == len(trigger.group(2)):
value = None
return (section, section_name, static_sec, option, value)
@sopel.module.require_privmsg("This command only works as a private message.")
@sopel.module.require_admin("This command requires admin privileges.")
@sopel.module.commands('set')
@sopel.module.example('.set core.owner Me')
def set_config(bot, trigger):
"""See and modify values of Sopel's config object.
Trigger args:
arg1 - section and option, in the form "section.option"
arg2 - value
If there is no section, section will default to "core".
If value is not provided, the current value will be displayed.
"""
try:
section, section_name, static_sec, option, value = parse_section_option_value(bot.config, trigger)
except ValueError:
bot.reply('Usage: {}set section.option [value]'.format(bot.config.core.help_prefix))
return
except (InvalidSection, InvalidSectionOption) as exc:
bot.say(exc.args[1])
return
# Display current value if no value is given
if not value:
if option.endswith("password") or option.endswith("pass"):
value = "(password censored)"
else:
value = getattr(section, option)
bot.reply("%s.%s = %s (%s)" % (section_name, option, value, type(value).__name__))
return
# Owner-related settings cannot be modified interactively. Any changes to these
# settings must be made directly in the config file.
if section_name == 'core' and option in ['owner', 'owner_account']:
bot.say("Changing '{}.{}' requires manually editing the configuration file."
.format(section_name, option))
return
# Otherwise, set the value to one given
if static_sec:
descriptor = getattr(section.__class__, option)
try:
if isinstance(descriptor, FilenameAttribute):
value = descriptor.parse(bot.config, descriptor, value)
else:
value = descriptor.parse(value)
except ValueError as exc:
bot.say("Can't set attribute: " + str(exc))
return
setattr(section, option, value)
bot.say("OK. Set '{}.{}' successfully.".format(section_name, option))
@sopel.module.require_privmsg("This command only works as a private message.")
@sopel.module.require_admin("This command requires admin privileges.")
@sopel.module.commands('unset')
@sopel.module.example('.unset core.owner')
def unset_config(bot, trigger):
"""Unset value of Sopel's config object.
Unsetting a value will reset it to the default specified in the config
definition.
Trigger args:
arg1 - section and option, in the form "section.option"
If there is no section, section will default to "core".
"""
try:
section, section_name, static_sec, option, value = parse_section_option_value(bot.config, trigger)
except ValueError:
bot.reply('Usage: {}unset section.option [value]'.format(bot.config.core.help_prefix))
return
except (InvalidSection, InvalidSectionOption) as exc:
bot.say(exc.args[1])
return
if value:
bot.reply('Invalid command; no value should be provided to unset.')
return
try:
setattr(section, option, None)
bot.say("OK. Unset '{}.{}' successfully.".format(section_name, option))
except ValueError:
bot.reply('Cannot unset {}.{}; it is a required option.'.format(section_name, option))
@sopel.module.require_privmsg
@sopel.module.require_admin
@sopel.module.commands('save')
@sopel.module.example('.save')
def save_config(bot, trigger):
"""Save state of Sopel's config object to the configuration file."""
bot.config.save()
| 32.584577
| 125
| 0.672876
|
4a1603c7ec5f97c7265df5fbfd5aa9d8c6aa9fed
| 170
|
py
|
Python
|
tests/pipelines/pype/relativenesting/nested/arb/relnestarbstep.py
|
mofm/pypyr
|
f417f69ba9a607d8a93019854105cfbc4dc9c36d
|
[
"Apache-2.0"
] | 31
|
2017-03-24T11:27:34.000Z
|
2020-05-27T20:06:28.000Z
|
tests/pipelines/pype/relativenesting/nested/arb/relnestarbstep.py
|
mofm/pypyr
|
f417f69ba9a607d8a93019854105cfbc4dc9c36d
|
[
"Apache-2.0"
] | 89
|
2017-04-12T09:50:32.000Z
|
2020-08-13T13:18:36.000Z
|
tests/pipelines/pype/relativenesting/nested/arb/relnestarbstep.py
|
mofm/pypyr
|
f417f69ba9a607d8a93019854105cfbc4dc9c36d
|
[
"Apache-2.0"
] | 6
|
2017-06-04T14:19:59.000Z
|
2020-02-10T13:16:40.000Z
|
"""Arbitrary testing step that adds arb_in to out list in context."""
def run_step(context):
"""Add arb_in to out.."""
context['out'].append(context['arb_in'])
| 24.285714
| 69
| 0.664706
|
4a1603f78c7ce7159693d881d42e230ca6155e5f
| 560
|
py
|
Python
|
symphony/cli/pyinventory/common/constant.py
|
englishthomas/magma-1
|
a67e255c9d4d6367c0a6186becee85643f9ebe7a
|
[
"BSD-3-Clause"
] | null | null | null |
symphony/cli/pyinventory/common/constant.py
|
englishthomas/magma-1
|
a67e255c9d4d6367c0a6186becee85643f9ebe7a
|
[
"BSD-3-Clause"
] | null | null | null |
symphony/cli/pyinventory/common/constant.py
|
englishthomas/magma-1
|
a67e255c9d4d6367c0a6186becee85643f9ebe7a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
__version__ = "2.6.1"
EQUIPMENTS_TO_SEARCH = 10
LOCATIONS_TO_SEARCH = 5
USER_ROLE = 0
SUPERUSER_ROLE = 3
SCHEMA_FILE_NAME = "survey_schema.json"
SIMPLE_QUESTION_TYPE_TO_REQUIRED_PROPERTY_NAME = {
"DATE": "dateData",
"BOOL": "boolData",
"EMAIL": "emailData",
"TEXT": "textData",
"FLOAT": "floatData",
"INTEGER": "intData",
"PHONE": "phoneData",
}
| 25.454545
| 58
| 0.698214
|
4a1604322cf217a63f9c8443682c004d3158cd2c
| 10,968
|
py
|
Python
|
youwol/routers/environment/upload_assets/upload.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | null | null | null |
youwol/routers/environment/upload_assets/upload.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | 1
|
2022-03-14T09:40:15.000Z
|
2022-03-14T09:40:15.000Z
|
youwol/routers/environment/upload_assets/upload.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | null | null | null |
import asyncio
import json
from typing import Mapping, Dict, cast
from aiohttp import FormData, ClientSession
from fastapi import HTTPException
from youwol.backends.treedb.models import PathResponse
from youwol.environment.clients import RemoteClients, LocalClients
from youwol.environment.youwol_environment import YouwolEnvironment
from youwol_utils import to_json
from youwol.routers.commons import Label
from youwol.routers.commons import local_path, ensure_path
from youwol.routers.environment.upload_assets.data import UploadDataTask
from youwol.routers.environment.upload_assets.flux_project import UploadFluxProjectTask
from youwol.routers.environment.upload_assets.models import UploadTask
from youwol.routers.environment.upload_assets.package import UploadPackageTask
from youwol.routers.environment.upload_assets.story import UploadStoryTask
from youwol_utils import decode_id, JSON
from youwol_utils.clients.assets.assets import AssetsClient
from youwol_utils.clients.assets_gateway.assets_gateway import AssetsGatewayClient
from youwol_utils.clients.treedb.treedb import TreeDbClient
from youwol_utils.context import Context
from youwol_utils.utils_paths import parse_json
async def synchronize_permissions_metadata_symlinks(
asset_id: str,
tree_id: str,
assets_gtw_client: AssetsGatewayClient,
context: Context
):
await asyncio.gather(
create_borrowed_items(asset_id=asset_id, tree_id=tree_id, assets_gtw_client=assets_gtw_client,
context=context),
synchronize_permissions(assets_gtw_client=assets_gtw_client, asset_id=asset_id, context=context),
synchronize_metadata(asset_id=asset_id, assets_gtw_client=assets_gtw_client, context=context)
)
async def synchronize_permissions(assets_gtw_client: AssetsGatewayClient, asset_id: str, context: Context):
async with context.start(
action="synchronize_permissions",
with_attributes={
'assetId': asset_id
}
) as ctx:
env = await context.get('env', YouwolEnvironment)
local_assets_gtw = LocalClients.get_assets_gateway_client(env=env)
access_info = await local_assets_gtw.get_asset_access(asset_id=asset_id)
await ctx.info(
labels=[str(Label.RUNNING)],
text="Permissions retrieved",
data={"access_info": access_info}
)
default_permission = access_info["ownerInfo"]["defaultAccess"]
groups = access_info["ownerInfo"]["exposingGroups"]
await asyncio.gather(
assets_gtw_client.put_asset_access(asset_id=asset_id, group_id='*', body=default_permission),
*[
assets_gtw_client.put_asset_access(asset_id=asset_id, group_id=g['groupId'], body=g['access'])
for g in groups
]
)
async def create_borrowed_items(asset_id: str, tree_id: str, assets_gtw_client: AssetsGatewayClient, context: Context):
env = await context.get('env', YouwolEnvironment)
async with context.start(
action="create_borrowed_items",
with_attributes={
'assetId': asset_id,
'treeId': tree_id
}
) as ctx:
items_treedb = parse_json(env.pathsBook.local_treedb_docdb)
tree_items = [item for item in items_treedb['documents'] if item['related_id'] == asset_id]
borrowed_items = [item for item in tree_items if json.loads(item['metadata'])['borrowed']]
await asyncio.gather(*[
create_borrowed_item(item=item, borrowed_tree_id=tree_id, assets_gtw_client=assets_gtw_client,
context=ctx)
for item in borrowed_items
])
async def create_borrowed_item(borrowed_tree_id: str, item: Mapping[str, any], assets_gtw_client: AssetsGatewayClient,
context: Context):
async with context.start(
action="create_borrowed_items",
with_attributes={
'borrowed_tree_id': borrowed_tree_id,
'tree_id': item["item_id"]
}
) as ctx:
tree_id = item["item_id"]
try:
await assets_gtw_client.get_tree_item(item_id=tree_id)
return
except HTTPException as e:
if e.status_code != 404:
raise e
path_item = await local_path({"treeId": tree_id}, context=ctx)
await ctx.info(
labels=[Label.RUNNING],
text="Borrowed tree item not found, start creation",
data={"treeItemPath": to_json(path_item)}
)
await ensure_path(path_item, assets_gtw_client)
parent_id = path_item.drive.driveId
if len(path_item.folders) > 0:
parent_id = path_item.folders[0].folderId
await assets_gtw_client.borrow_tree_item(tree_id=borrowed_tree_id,
body={
"itemId": tree_id,
"destinationFolderId": parent_id
}
)
await ctx.info(text="Borrowed item created")
async def synchronize_metadata(asset_id: str, assets_gtw_client: AssetsGatewayClient, context: Context):
env = await context.get('env', YouwolEnvironment)
async with context.start(
action="synchronize_metadata",
with_attributes={
'asset_id': asset_id
}
) as ctx:
local_assets_gtw: AssetsGatewayClient = LocalClients.get_assets_gateway_client(env=env)
local_metadata, remote_metadata = await asyncio.gather(
local_assets_gtw.get_asset_metadata(asset_id=asset_id),
assets_gtw_client.get_asset_metadata(asset_id=asset_id)
)
missing_images_urls = [p for p in local_metadata['images'] if p not in remote_metadata['images']]
full_urls = [f"http://localhost:{env.http_port}{url}" for url in missing_images_urls]
filenames = [url.split('/')[-1] for url in full_urls]
await ctx.info(
labels=[str(Label.RUNNING)],
text="Synchronise metadata",
data={
'local_metadata': local_metadata,
'remote_metadata': remote_metadata,
'missing images': full_urls
}
)
async def download_img(session: ClientSession, url: str):
async with await session.get(url=url) as resp:
if resp.status == 200:
return await resp.read()
async with ClientSession() as http_session:
images_data = await asyncio.gather(*[download_img(http_session, url) for url in full_urls])
forms = []
for filename, value in zip(filenames, images_data):
form_data = FormData()
form_data.add_field(name='file', value=value, filename=filename)
forms.append(form_data)
await asyncio.gather(
assets_gtw_client.update_asset(asset_id=asset_id, body=local_metadata),
*[
assets_gtw_client.post_asset_image(asset_id=asset_id, filename=name, data=form)
for name, form in zip(filenames, forms)
]
)
async def upload_asset(
body: JSON,
context: Context
):
upload_factories: Dict[str, any] = {
"data": UploadDataTask,
"flux-project": UploadFluxProjectTask,
"story": UploadStoryTask,
"package": UploadPackageTask
}
asset_id = body['assetId']
async with context.start(
action="upload_asset",
with_attributes={
'asset_id': asset_id
}
) as ctx:
env = await context.get('env', YouwolEnvironment)
local_treedb: TreeDbClient = LocalClients.get_treedb_client(env=env)
local_assets: AssetsClient = LocalClients.get_assets_client(env=env)
raw_id = decode_id(asset_id)
asset, tree_item = await asyncio.gather(
local_assets.get(asset_id=asset_id),
local_treedb.get_item(item_id=asset_id),
return_exceptions=True
)
if isinstance(asset, HTTPException) and asset.status_code == 404:
await ctx.error(text="Can not find the asset in the local assets store")
raise RuntimeError("Can not find the asset in the local assets store")
if isinstance(tree_item, HTTPException) and tree_item.status_code == 404:
await ctx.error(text="Can not find the tree item in the local treedb store")
raise RuntimeError("Can not find the tree item in the local treedb store")
if isinstance(asset, Exception) or isinstance(tree_item, Exception):
raise RuntimeError("A problem occurred while fetching the local asset/tree items")
asset = cast(Dict, asset)
tree_item = cast(Dict, tree_item)
factory: UploadTask = upload_factories[asset['kind']](
raw_id=raw_id,
asset_id=asset_id,
context=ctx
)
local_data = await factory.get_raw()
try:
path_item = await local_treedb.get_path(item_id=tree_item['itemId'])
except HTTPException as e:
if e.status_code == 404:
await ctx.error(text=f"Can not get path of item with id '{tree_item['itemId']}'",
data={"tree_item": tree_item, "error_detail": e.detail})
raise e
await ctx.info(
text="Data retrieved",
data={"path_item": path_item, "raw data": local_data}
)
assets_gtw_client = await RemoteClients.get_assets_gateway_client(context=ctx)
await ensure_path(path_item=PathResponse(**path_item), assets_gateway_client=assets_gtw_client)
try:
await assets_gtw_client.get_asset_metadata(asset_id=asset_id)
await ctx.info(
text="Asset already found in deployed environment"
)
await factory.update_raw(data=local_data, folder_id=tree_item['folderId'])
except HTTPException as e:
if e.status_code != 404:
raise e
await ctx.info(
labels=[Label.RUNNING],
text="Project not already found => start creation"
)
await factory.create_raw(data=local_data, folder_id=tree_item['folderId'])
await synchronize_permissions_metadata_symlinks(
asset_id=asset_id,
tree_id=tree_item['itemId'],
assets_gtw_client=assets_gtw_client,
context=ctx
)
return {}
| 41.078652
| 119
| 0.625182
|
4a1604d7185920fec98335756e83ee84346d2470
| 56,396
|
py
|
Python
|
CFM_main/fcts_snowpackflow.py
|
mvdebolskiy/CommunityFirnModel
|
5380479cdf776132d549f48e9d71b674564ad9cc
|
[
"MIT"
] | 21
|
2019-03-28T13:56:51.000Z
|
2022-01-28T12:39:10.000Z
|
CFM_main/fcts_snowpackflow.py
|
mvdebolskiy/CommunityFirnModel
|
5380479cdf776132d549f48e9d71b674564ad9cc
|
[
"MIT"
] | 2
|
2021-06-10T06:53:49.000Z
|
2022-01-12T22:07:02.000Z
|
CFM_main/fcts_snowpackflow.py
|
mvdebolskiy/CommunityFirnModel
|
5380479cdf776132d549f48e9d71b674564ad9cc
|
[
"MIT"
] | 12
|
2017-10-09T08:16:25.000Z
|
2021-12-11T03:51:40.000Z
|
#!/usr/bin/env python
'''
This script contains all the functions required to make the pereferential flow scheme of snowpack work
https://models.slf.ch/p/snowpack/source/tree/HEAD/branches/dev/snowpack/snowpackCore/ReSolver1d.cc
'''
import numpy as np
from constants import *
from scipy.sparse import diags as diags
from scipy.linalg import solve as solve
from scipy.linalg import solve_banded as solve_banded
import math
from constants import *
def NPtrid(a,b,c,d):
'''
function to solve tridiagonal matrix --> find x matrix in Ax=d equation
a,b,c,d are arrays, should be made of floats, not integer
a and c are 1 index shorter than b and d
- [b0 c0 0. 0. ...]
- [a0 b1 c1 0. ...]
A = [...............]
- [. 0. 0. an-1 bn]
'''
diagonals = [a, b, c]
tridiagmat = diags(diagonals,[-1,0,1]).toarray()
tridiagsol = solve(tridiagmat,d)
return tridiagsol
def TDMAsolver(a,b,c,d):
'''
Way faster than NPtrid!!
TDMA solver --> find x matrix in Ax=d equation
a,b,c,d are arrays, should be made of floats, not integer
a and c are 1 index shorter than b and d
- [b0 c0 0. 0. ...]
- [a0 b1 c1 0. ...]
A = [...............]
- [. 0. 0. an-1 bn]
A is nxn tridiagonal matrix with a - b - c as diagonals, x is nx1 matrix, d is nx1 matrix
Sources:
https://gist.github.com/cbellei/8ab3ab8551b8dfc8b081c518ccd9ada9
https://en.wikibooks.org/wiki/Algorithm_Implementation/Linear_Algebra/Tridiagonal_matrix_algorithm#Python
'''
n = len(d)
ac,bc,cc,dc = map(np.array, (a,b,c,d))
flag = 0.0 # In case there would be a division by 0 at a certain point
if bc[n-1] == 0.: # division by 0
flag = 1
for it in range(1,n):
if bc[it-1] == 0.: # division by 0
flag = 1
mc = ac[it-1]/bc[it-1]
bc[it] = bc[it] - mc*cc[it-1]
dc[it] = dc[it] - mc*dc[it-1]
xc = bc
xc[-1] = dc[-1]/bc[-1]
for il in range(n-2,-1,-1):
xc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]
if flag == 0:
return xc
else:
return -1
def splitCFM(rhoC,dzC,TzC,massC,lwcC,Plwc_memC,r2C,vert_res):
'''
F for fine grid
C for coarse grid
We split the layers of the CFM in layers of a certain maximal thickness.
Maximal thickness must be specified in vert_res.
With the implementation of upstream weighted mean for K at interfaces, we can take large vert_res value!
'''
### Resolution we want for the layers for RE, should maybe be given as input to funcion ###
### Number of sublayers vector ###
split_list = np.array([]) # list that contains nb of sublayers of every layer (index corresponds to index of layer in CFM)
### Vectors of the variables that will be used in Fine grid ###
dzF = np.array([]) # thickness vector for the F
massF = np.array([]) # mass vector for the F
LWCF = np.array([]) # LWC vector for the F
rhoF = np.array([]) # density vector for the F
TzF = np.array([]) # temperature vector for the F
r2F = np.array([]) # squared radius vector for the F
PLWC_memF = np.array([]) # PLWC_mem vector for the flow routine
# must not be executed for refrozen as refrozen is specific to each CFM step -> starts as zero everywhere at every flow routine
for ii in range(len(dzC)):
if dzC[ii] > vert_res: # Cases where the layer of the CFM is thicker than vert_res
nb_sublayers = math.ceil(dzC[ii]/vert_res) # nb of layers in which each CFM value is split: we need layers of vert_res thickness at max (round value to upper integer)
split_list = np.append(split_list,nb_sublayers) # fill in split_list
### Define new values of the variables: caution to the difference between additive and non-additive ###
# Additive variables: value of CFM layer repartitioned between sublayers #
newdz = dzC[ii]/nb_sublayers*np.ones(nb_sublayers) # thickness value of the layers for the F (will be close to vert_res)
newmass = massC[ii]/nb_sublayers*np.ones(nb_sublayers) # mass value of the layers for the F
newLWC = lwcC[ii]/nb_sublayers*np.ones(nb_sublayers) # LWC value of the layers for the F
newPLWC_mem = Plwc_memC[ii]/nb_sublayers*np.ones(nb_sublayers) # PLWC_mem value of the layers for the F
# Non-additive variables: every sublayer keeps the same value as the CFM layer #
newrho = rhoC[ii]*np.ones(nb_sublayers) # density value of the layers for the F
newTz = TzC[ii]*np.ones(nb_sublayers) # temperature value of the layers for the F
newr2 = r2C[ii]*np.ones(nb_sublayers) # grain size value of the layers for the F
### Fill in the vectors that will be used in the RE routine ###
dzF = np.concatenate((dzF,newdz)) # thickness [m]
massF = np.concatenate((massF,newmass)) # mass [kg]
LWCF = np.concatenate((LWCF,newLWC)) # LWC [m]
rhoF = np.concatenate((rhoF,newrho)) # density [kg m-3]
TzF = np.concatenate((TzF,newTz)) # temperature [K]
r2F = np.concatenate((r2F,newr2)) # squared radius [m2]
PLWC_memF = np.concatenate((PLWC_memF,newPLWC_mem)) # LWC [m]
if dzC[ii] <= vert_res: # Cases where the layer of the CFM is not thicker than vert_res -> does not need to be split in sublayers
nb_sublayers = 1 # Only one sublayer per layer of CFM
split_list = np.append(split_list,nb_sublayers) # fill in split_list with a value of 1
### Store immediately the CFM values in the vectors for F ###
dzF = np.append(dzF,dzC[ii]) # thickness [m]
massF = np.append(massF,massC[ii]) # mass [kg]
LWCF = np.append(LWCF,lwcC[ii]) # LWC [m]
rhoF = np.append(rhoF,rhoC[ii]) # density [kg m-3]
TzF = np.append(TzF,TzC[ii]) # temperature [K]
r2F = np.append(r2F,r2C[ii]) # squared radius [m2]
PLWC_memF = np.append(PLWC_memF,Plwc_memC[ii]) # LWC [m]
return split_list,rhoF,dzF,TzF,massF,LWCF,PLWC_memF,r2F
def combineCFM(split_list,rhoF,dzF,TzF,massF,lwcF,Plwc_memF,r2F,refrozenF):
'''
F for fine grid
C for coarse grid
Here, we need the vectors that are outputs of the flow routine:
- combine them back to reintegrate these to the CFM
- Note that we also need the split_list ! (nb of sublayers into which every CFM layer was split by the split function)
- Normally, RE routine (including freezing) should not affect dz and r2 variables but only use them
-> not necessary to combine them and to give them back to CFM (which can keep its own self.dz and self.r2)
'''
### Vectors that will be reattributed to the self. vectors ###
dzC = np.array([]) # thickness vector for the C
massC = np.array([]) # mass vector for the C
lwcC = np.array([]) # LWC vector for the C
Plwc_memC = np.array([]) # PLWC_mem vector for the C
rhoC = np.array([]) # density vector for the C
TzC = np.array([]) # temperature vector for the C
r2C = np.array([]) # squared radius vector for the C
refrozenC = np.array([]) # refrozen water amount per layer vector for the C
jj = 0 # index in split_list
for number in split_list: # number stands for the number of sublayers (the individual values of the split_list vector)
jjlast = int(jj+number) # last index in the RE vectors that corresponds to the same CFM layer as the jjth index in RE vectors
### Combine values of the variables: caution to the difference between additive and non-additive ###
# Additive values: sum the values of all the sublayers that we combine
combdz = np.sum(dzF[jj:jjlast]) # combining the dz values from same C layer (dzF[jj] == dzF[jj+1] == dzF[jj+2] == ... == dzF[jjlast])
combmass = np.sum(massF[jj:jjlast]) # combining the mass values from same C layer
comblwc = np.sum(lwcF[jj:jjlast]) # combining the LWC values from same C layer
combPlwc_mem = np.sum(Plwc_memF[jj:jjlast]) # combining the PLWC_mem values from same C layer
combrefrozen = np.sum(refrozenF[jj:jjlast]) # combining the refrozen values from same C layer
# Non-additive variables: take the mean of the values of all the sublayers that we combine
combrho = np.mean(rhoF[jj:jjlast])
combTz = np.mean(TzF[jj:jjlast])
combr2 = np.mean(r2F[jj:jjlast])
### Fill in the vectors that will be given back to the C ###
dzC = np.append(dzC,combdz) # thickness vector for dz [m]
massC = np.append(massC,combmass) # mass vector for mass [kg]
lwcC = np.append(lwcC,comblwc) # LWC vector for LWC [m]
Plwc_memC = np.append(Plwc_memC,combPlwc_mem) # PLWC_mem vector for PLWC_mem [m]
rhoC = np.append(rhoC,combrho) # density vector for rho [kg m-3]
TzC = np.append(TzC,combTz) # temperature vector for Tz [K]
r2C = np.append(r2C,combr2) # squared radius vector for r2 [m2]
refrozenC = np.append(refrozenC,combrefrozen) # refrozen vector for refrozen [mWE]
jj = jjlast
return rhoC, dzC, TzC, massC, lwcC, Plwc_memC, r2C, refrozenC
def restrictdom(self):
'''
Limit the domain to reduce computational time of flow solving
Spot the deepest layer below pore close-off density (rhoimp). The bottom of the domain will be the next layer.
Thus, we do not take all the column where rho is constantly above pore close-off density (rhoimp).
But we make sure not to exclude any layer that has liquid water
'''
rhobottom = 830.
iirho = 0
iilwc = 0
if np.any(self.rho<rhobottom):
iirho = np.where(self.rho<rhobottom)[0][-1] # Last layer below 830
if np.any(self.LWC>0):
iilwc = np.where(self.LWC>0)[0][-1] # Last layer with water content
ii = max(iirho,iilwc) # Last layer below 830 or with water content
# ii is now the last layer where rho is below rhobottom kg/m3 -> limit the domain until ii+1
rho_short = self.rho[0:ii+2]
dz_short = self.dz[0:ii+2]
Tz_short = self.Tz[0:ii+2]
mass_short = self.mass[0:ii+2]
lwc_short = self.LWC[0:ii+2]
r2_short = self.r2[0:ii+2]
Plwc_mem_short = self.PLWC_mem[0:ii+2]
return(rho_short,dz_short,Tz_short,mass_short,lwc_short,Plwc_mem_short,r2_short)
def lengthendom(self,rho_short,dz_short,Tz_short,mass_short,lwc_short,Plwc_mem_short,r2_short):
'''
After having solved the flow, we concatenate back new values with deep values that were not taken into account
during the flow routine (because all their rho values was above rhoimp from a certain depth).
!! This function has to be called before the flow routine but AFTER the melting !!
'''
ii = len(dz_short)-1 # last layer that may have been affected by flow routine
rho_full = np.append(rho_short,self.rho[ii+1:]) # modify the old variables that were defined on the entire column
dz_full = np.append(dz_short,self.dz[ii+1:])
Tz_full = np.append(Tz_short,self.Tz[ii+1:])
mass_full = np.append(mass_short,self.mass[ii+1:])
lwc_full = np.append(lwc_short,self.LWC[ii+1:])
r2_full = np.append(r2_short,self.r2[ii+1:])
Plwc_mem_full = np.append(Plwc_mem_short,self.PLWC_mem[ii+1:])
return(rho_full,dz_full,Tz_full,mass_full,lwc_full,Plwc_mem_full,r2_full)
def Msatexcess(dz,rho,Mtheta,Mtheta_sat,crtn_theta,rhoimp,totrunoff):
'''
For MFdom
In case the water content of some layers exceeds the water content at saturation, we move the water to
the layers below (in priority) and in the layers above (if there is not enough pore space in all the layers below)
'''
waterexcess = np.where(Mtheta > Mtheta_sat) # spot layers where we exceed saturation
ice1 = np.zeros_like(dz)
sat1 = np.zeros_like(dz)
ice1[np.where(rho>=rhoimp)[0]] = 1
sat1[np.where(Mtheta/Mtheta_sat>=0.95)[0]] = 1
icesat = ice1+sat1
if np.any(icesat==0):
lowest = np.where(icesat == 0)[0][-1]
elif np.all(icesat>0):
lowest = 0
for index in waterexcess[0]:
lwcexc = 1.001*(Mtheta[index]-Mtheta_sat[index]) * dz[index] # move excess of water, with safety margin
lwcexc = min((Mtheta[index]-crtn_theta/10)*dz[index],lwcexc) # we still need minimum theta for numerical stability
Mtheta[index] -= lwcexc/dz[index] # we remove that excess of water but still have to distribute it in the column
tobelow = 1 # we first try to distribute in layers situated below
bb = 1
if (index+bb)>lowest or (index+bb>len(dz)-1): # if there are no layers below, no below distribution
tobelow = 0
while lwcexc > 0. and tobelow == 1: # as long as there is excess to distribute and we did not reach bottom
if rho[index+bb]<rhoimp: # We only transfer water in layers below pore close-off density
transf = np.minimum(lwcexc,(0.98*(Mtheta_sat[index+bb]-Mtheta[index+bb])*dz[index+bb])) # do not oversaturate receiving layers, safety margin
transf = np.maximum(transf,0.) # make sure not to have negative values
elif rho[index+bb]>=rhoimp: # No transfer of water in layers above pore close-off density
transf = 0.
Mtheta[index+bb] += transf/dz[index+bb] # add the water
bb += 1 # go to layer below
lwcexc -= transf # part of lwcexc has been distributed
if (index+bb)>lowest or (index+bb>len(dz)-1): # if we reach bottom, stop below distribution
tobelow = 0
toabove = 1 # if there is still some lwcexc to distribute but below distribution not possible anymore, same process in layers above
#aa = 1
aa = np.maximum(1,index-lowest) # start to look for space above the aquifer
if index-aa < 0:
toabove = 0
while lwcexc > 0. and toabove == 1:
if rho[index-aa]<rhoimp: # We only transfer water in layers below pore close-off density
transf = np.minimum(lwcexc,(0.98*(Mtheta_sat[index-aa]-Mtheta[index-aa])*dz[index-aa]))
transf = np.maximum(transf,0.)
elif rho[index-aa]>=rhoimp: # No transfer of water in layers above pore close-off density
transf = 0.
Mtheta[index-aa] += transf/dz[index-aa]
aa += 1
lwcexc -= transf
if index-aa < 0:
toabove = 0
if lwcexc > 0: # if we could not distribute all the excess
totrunoff += lwcexc # it is added to runoff
#print('Problem: some layers exceed saturation but nowhere to store it -> add it to runoff?')
### Update of thetar ###
Mthetar = np.minimum((np.ones_like(Mtheta)*0.02),0.9*Mtheta) # initial residual water content [/], Wever 2014 (10)
### Update of effSat and lwc
MeffSat = (Mtheta-Mthetar)/(Mtheta_sat-Mthetar)
Mlwc = Mtheta*dz
return Mtheta,Mthetar,MeffSat,Mlwc,totrunoff
def Psatexcess(dz,rho,Ptheta,Ptheta_sat,crtn_theta,rhoimp,totrunoff):
'''
This works the same as Msatexcess but for PFdom
In case the water content of some layers exceeds the water content at saturation, we move the water to
the layers below (in priority) and in the layers above (if there is not enough pore space in all the layers below)
'''
waterexcess = np.where(Ptheta > Ptheta_sat) # spot layers where we exceed saturation
ice1 = np.zeros_like(dz)
sat1 = np.zeros_like(dz)
ice1[np.where(rho>=rhoimp)[0]] = 1
sat1[np.where(Ptheta/Ptheta_sat>=0.95)[0]] = 1
icesat = ice1+sat1
if np.any(icesat==0):
lowest = np.where(icesat == 0)[0][-1]
elif np.all(icesat>0):
lowest = 0
for index in waterexcess[0]:
lwcexc = 1.001*(Ptheta[index]-Ptheta_sat[index]) * dz[index] # move excess of water, with safety margin
lwcexc = min((Ptheta[index]-crtn_theta/10)*dz[index],lwcexc) # we still need minimum theta for numerical stability
Ptheta[index] -= lwcexc/dz[index] # we remove that excess of water but still have to distribute it in the column
tobelow = 1 # we first try to distribute in layers situated below
bb = 1
if (index+bb)>lowest or (index+bb>len(dz)-1): # if there are no layers below, no below distribution
tobelow = 0
while lwcexc > 0. and tobelow == 1: # as long as there is excess to distribute and we did not reach bottom
if rho[index+bb]<rhoimp: # We only transfer water in layers below pore close-off density
transf = np.minimum(lwcexc,(0.99*(Ptheta_sat[index+bb]-Ptheta[index+bb])*dz[index+bb])) # do not oversaturate receiving layers, safety margin
transf = np.maximum(transf,0.) # make sure not to have negative values
elif rho[index+bb]>=rhoimp: # No transfer of water in layers above pore close-off density
transf = 0.
Ptheta[index+bb] += transf/dz[index+bb] # add the water
bb += 1 # go to layer below
lwcexc -= transf # part of lwcexc has been distributed
if index+bb > len(dz)-2: # if we reach bottom, stop below distribution + don't allow water transfer in last layer (supposed to be surface of ice sheet)
tobelow = 0
toabove = 1 # if there is still some lwcexc to distribute but below distribution not possible anymore, same process in layers above
#aa = 1
aa = np.maximum(1,index-lowest) # start to look for space above the aquifer
if index-aa < 0:
toabove = 0
while lwcexc > 0. and toabove == 1:
if rho[index-aa]<rhoimp: # We only transfer water in layers below pore close-off density
transf = np.minimum(lwcexc,(0.99*(Ptheta_sat[index-aa]-Ptheta[index-aa])*dz[index-aa]))
transf = np.maximum(transf,0.)
elif rho[index-aa]>=rhoimp: # No transfer of water in layers above pore close-off density
transf = 0.
Ptheta[index-aa] += transf/dz[index-aa]
aa += 1
lwcexc -= transf
if index-aa < 0:
toabove = 0
if lwcexc > 0: # if we could not distribute all the excess
totrunoff += lwcexc # it is added to runoff
#print('Problem: some layers exceed saturation but nowhere to store it -> add it to runoff?')
### Update of effSat and lwc
PeffSat = Ptheta/Ptheta_sat
Plwc = Ptheta*dz
return Ptheta,PeffSat,Plwc,totrunoff
def Micedryer(dz,rho,Mtheta,Mtheta_sat,crtn_theta,rhoimp,totrunoff):
'''
This works the same as satexcess but in order to make sure that layers at pore close-off density (rho>rhoimp) are dry in MFdom.
'''
icelay = np.where(rho>rhoimp)[0] # spot ice layer
wetlay = np.where(Mtheta>crtn_theta/10)[0] # spot layers above minimum saturation
ice_to_dry = np.intersect1d(icelay,wetlay) # spot ice layers above minimum saturation
ice1 = np.zeros_like(dz)
sat1 = np.zeros_like(dz)
ice1[np.where(rho>=rhoimp)[0]] = 1
sat1[np.where(Mtheta/Mtheta_sat>=0.95)[0]] = 1
icesat = ice1+sat1
if np.any(icesat==0):
lowest = np.where(icesat == 0)[0][-1]
elif np.all(icesat>0):
lowest = 0
for index in ice_to_dry:
lwcexc = (Mtheta[index]-crtn_theta/10) * dz[index] # move excess of water, with safety margin
Mtheta[index] -= lwcexc/dz[index] # we remove that excess of water but still have to distribute it in the column
tobelow = 1 # we first try to distribute in layers situated below
bb = 1
if (index+bb)>lowest or (index+bb>len(dz)-1): # if there are no layers below, no below distribution
tobelow = 0
while lwcexc > 0. and tobelow == 1: # as long as there is excess to distribute and we did not reach bottom
if rho[index+bb]<rhoimp:
transf = np.minimum(lwcexc,(0.99*(Mtheta_sat[index+bb]-Mtheta[index+bb])*dz[index+bb])) # do not oversaturate receiving layers, safety margin
transf = np.maximum(transf,0.) # make sure not to have negative values
elif rho[index+bb]>=rhoimp:
transf = 0.
Mtheta[index+bb] += transf/dz[index+bb] # add the water
bb += 1 # go to layer below
lwcexc -= transf # part of lwcexc has been distributed
if index+bb > len(dz)-1: # if we reach bottom, stop below distribution
tobelow = 0
toabove = 1 # if there is still some lwcexc to distribute but below distribution not possible anymore, same process in layers above
aa = np.maximum(1,index-lowest) # start to look for space above the aquifer
if index-aa < 0:
toabove = 0
while lwcexc > 0. and toabove == 1:
if rho[index-aa]<rhoimp:
transf = np.minimum(lwcexc,(0.99*(Mtheta_sat[index-aa]-Mtheta[index-aa])*dz[index-aa]))
transf = np.maximum(transf,0.)
elif rho[index-aa]>=rhoimp:
transf = 0.
Mtheta[index-aa] += transf/dz[index-aa]
aa += 1
lwcexc -= transf
if index-aa < 0:
toabove = 0
if lwcexc > 0: # if we could not distribute all the excess
totrunoff += lwcexc # it is added to runoff
### Update of thetar ###
Mthetar = np.minimum((np.ones_like(Mtheta)*0.02),0.9*Mtheta) # initial residual water content [/], Wever 2014 (10)
### Update of effSat and lwc
MeffSat = (Mtheta-Mthetar)/(Mtheta_sat-Mthetar)
Mlwc = Mtheta*dz
return Mtheta,Mthetar,MeffSat,Mlwc,totrunoff
def Picedryer(dz,rho,Ptheta,Ptheta_sat,crtn_theta,rhoPdr,totrunoff):
'''
This works the same as satexcess but in order to make sure that layers at pore close-off density (rho>rhoimp) are dry in MFdom.
'''
icelay = np.where(rho>rhoPdr)[0] # spot ice layer
wetlay = np.where(Ptheta>crtn_theta/10)[0] # spot layers above minimum saturation
ice_to_dry = np.intersect1d(icelay,wetlay) # spot ice layers above minimum saturation
ice1 = np.zeros_like(dz)
sat1 = np.zeros_like(dz)
ice1[np.where(rho>=rhoPdr)[0]] = 1
sat1[np.where(Ptheta/Ptheta_sat>=0.95)[0]] = 1
icesat = ice1+sat1
if np.any(icesat==0):
lowest = np.where(icesat == 0)[0][-1]
elif np.all(icesat>0):
lowest = 0
for index in ice_to_dry:
lwcexc = (Ptheta[index]-crtn_theta/10) * dz[index] # move excess of water, with safety margin
Ptheta[index] -= lwcexc/dz[index] # we remove that excess of water but still have to distribute it in the column
if np.any(rho[index:]<rhoPdr):
tobelow = 1 # we first try to distribute in layers situated below
bb = 1
if (index+bb)>lowest or (index+bb>len(dz)-1): # if there are no layers below, no below distribution
tobelow = 0
while lwcexc > 0. and tobelow == 1: # as long as there is excess to distribute and we did not reach bottom
if rho[index+bb]<rhoPdr:
transf = np.minimum(lwcexc,(0.9*(Ptheta_sat[index+bb]-Ptheta[index+bb])*dz[index+bb])) # do not oversaturate receiving layers, safety margin
transf = np.maximum(transf,0.) # make sure not to have negative values
elif rho[index+bb]>=rhoPdr:
transf = 0.
Ptheta[index+bb] += transf/dz[index+bb] # add the water
bb += 1 # go to layer below
lwcexc -= transf # part of lwcexc has been distributed
if index+bb > len(dz)-1: # if we reach bottom, stop below distribution
tobelow = 0
toabove = 1 # if there is still some lwcexc to distribute but below distribution not possible anymore, same process in layers above
aa = np.maximum(1,index-lowest) # start to look for space above the aquifer
if index-aa < 0:
toabove = 0
while lwcexc > 0. and toabove == 1:
if rho[index-aa]<rhoPdr:
transf = np.minimum(lwcexc,(0.9*(Ptheta_sat[index-aa]-Ptheta[index-aa])*dz[index-aa]))
transf = np.maximum(transf,0.)
elif rho[index-aa]>=rhoPdr:
transf = 0.
Ptheta[index-aa] += transf/dz[index-aa]
aa += 1
lwcexc -= transf
if index-aa < 0:
toabove = 0
if lwcexc > 0: # if we could not distribute all the excess
totrunoff += lwcexc # it is added to runoff
### Update of effSat and lwc
PeffSat = Ptheta/Ptheta_sat
Plwc = Ptheta*dz
return Ptheta,PeffSat,Plwc,totrunoff
def entrysuction(dz,Mtheta,Mthetar,Mthetar_old,MeffSat,Mtheta_sat,Mlwc,Ptheta,PeffSat,Plwc,Ptheta_sat,crtn_theta,aquif,MSat_westag):
'''
When the pressure exceeds the water entry suction of layer below, water penetrates from MFdom layer to PFdom of layer below
We convert water entry suction in more intuitive saturation value
If after transfer, saturation of the layer below in PFdom is still inferior to saturation in layer above in MFdom, we equalise saturations
'''
entry = np.where(MeffSat[0:aquif-1]>MSat_westag[0:aquif-1])[0] # layers where water entry suction is exceeded, use
for ii in entry:
transfer = dz[ii]*(MSat_westag[ii]*(Mtheta_sat[ii]-Mthetar[ii])+Mthetar[ii]) #amount of water to transfer so MeffSat[ii]==MSat_we[ii+1]
transfer = min(transfer,0.95*(Ptheta_sat[ii+1]-Ptheta[ii+1])*dz[ii+1]) # no oversaturation
transfer = min(transfer,(Mtheta[ii]-crtn_theta/10)*dz[ii]) # preserve numerical stability
Mtheta[ii] -= transfer/dz[ii] # convert in water content
## Update values
Mthetar[ii] = min((0.02),0.9*Mtheta[ii])
if Mtheta[ii]<Mthetar[ii]+1e-6:
if Mtheta[ii]>crtn_theta/10:
Mthetar[ii] = Mtheta[ii] - crtn_theta/10
if Mtheta[ii]<=crtn_theta/10:
Mthetar[ii] = 0
MeffSat[ii] = (Mtheta[ii]-Mthetar[ii])/(Mtheta_sat[ii]-Mthetar[ii])
Mlwc[ii] = Mtheta[ii]*(dz[ii]) # update lwc
Ptheta[ii+1] += transfer/dz[ii+1] # transfer towards PFdom
PeffSat[ii+1] = Ptheta[ii+1] / Ptheta_sat[ii+1]
Plwc[ii+1] = Ptheta[ii+1]*dz[ii+1]
## If saturation in MFdom of layer ii is still abobe saturation in PFdom of layer ii+1, we equalise (Wever 2016) ##
if (PeffSat[ii+1] < MeffSat[ii]) and (Mtheta[ii]>crtn_theta/10):
# Wever 2016, equations (3),(4),(5) -> corrected versions (there were some errors in (5))
lwctot = Mtheta[ii]*dz[ii] + Ptheta[ii+1]*dz[ii+1]
Mtheta[ii] = (dz[ii+1]*(Mthetar[ii]*Ptheta_sat[ii+1])+lwctot*(Mtheta_sat[ii]-Mthetar[ii])) / (dz[ii]*(Mtheta_sat[ii]-Mthetar[ii])+dz[ii+1]*Ptheta_sat[ii+1])
if Mtheta[ii] < crtn_theta/10:
Mtheta[ii] = crtn_theta/10
Ptheta[ii+1] = (lwctot-Mtheta[ii]*dz[ii])/dz[ii+1]
## Update all variables ##
Mthetar[ii] = min((0.02),0.9*Mtheta[ii])
if Mtheta[ii]<Mthetar[ii]+1e-6:
if Mtheta[ii]>crtn_theta/10:
Mthetar[ii] = Mtheta[ii] - crtn_theta/10
if Mtheta[ii]<=crtn_theta/10:
Mthetar[ii] = 0
MeffSat[ii] = (Mtheta[ii]-Mthetar[ii])/(Mtheta_sat[ii]-Mthetar[ii])
Mlwc[ii] = Mtheta[ii]*(dz[ii]) # update Plwc
PeffSat[ii+1] = Ptheta[ii+1] / Ptheta_sat[ii+1]
Plwc[ii+1] = Ptheta[ii+1]*dz[ii+1]
#print('In entrysuction, equalisation of saturation between layers', ii, ii+1, 'MeffSat and PeffSat are:',MeffSat[ii], PeffSat[ii+1])
return Mtheta,Mthetar,MeffSat,Mlwc,Ptheta,PeffSat,Plwc
def layerequaliser_eq(dz,Mtheta,Mthetar,Mthetar_old,MeffSat,Mtheta_sat,Mlwc,Ptheta,PeffSat,Plwc,Ptheta_sat,crtn_theta,aquif):
'''
Whenever the effective saturation in the MFdom exceeds the saturation in the PFdom within a same layer, we equalise saturations
Not sure this is physically realistic but that is what is written in Wever 2016
https://models.slf.ch/p/snowpack/source/tree/HEAD/branches/dev/snowpack/snowpackCore/ReSolver1d.cc
+ confirmed by Nander Wever, email 27 Sept 2018
'''
highsat = np.where(MeffSat[0:aquif]>PeffSat[0:aquif])[0] # Where MeffSat exceeds PeffSat
oknum = np.where(Mtheta[0:aquif]>crtn_theta/10)[0] # Don't transfer water where we are at value of numerical stability
okres = np.where(Mtheta[0:aquif]>0.02)[0]
toequalise = np.intersect1d(highsat,okres)
for ii in toequalise:
while (abs(PeffSat[ii]-MeffSat[ii])>1e-5): # use of a while loop because Mthetar might change -> MeffSat not as high as what was fixed by first exchange
# Wever 2016, equations (3),(4),(5) -> corrected versions (there were some errors in (5))
lwctot = Mlwc[ii] + Plwc[ii]
Mtheta[ii] = (dz[ii]*(Mthetar[ii]*Ptheta_sat[ii])+lwctot*(Mtheta_sat[ii]-Mthetar[ii])) / (dz[ii]*(Mtheta_sat[ii]-Mthetar[ii])+dz[ii]*Ptheta_sat[ii])
if Mtheta[ii] < crtn_theta/10:
Mtheta[ii] = crtn_theta/10
Ptheta[ii] = (lwctot-Mtheta[ii]*dz[ii])/dz[ii]
## Update all variables
Mthetar[ii] = min((0.02),0.9*Mtheta[ii])
if Mtheta[ii]<Mthetar[ii]+1e-6:
if Mtheta[ii]>crtn_theta/10:
Mthetar[ii] = Mtheta[ii] - crtn_theta/10
if Mtheta[ii]<=crtn_theta/10:
Mthetar[ii] = 0
MeffSat[ii] = (Mtheta[ii]-Mthetar[ii])/(Mtheta_sat[ii]-Mthetar[ii])
Mlwc[ii] = Mtheta[ii]*(dz[ii]) # update Plwc
PeffSat[ii] = Ptheta[ii] / Ptheta_sat[ii]
Plwc[ii] = Ptheta[ii]*dz[ii]
return Mtheta,Mthetar,MeffSat,Mlwc,Ptheta,PeffSat,Plwc
def PFleave(dz,rho,Tz,Mtheta,Mthetar,Mthetar_old,MeffSat,Mtheta_sat,Mlwc,Ptheta,PeffSat,Plwc,Ptheta_sat,crtn_theta,rhoimp,aquif,PSatlim):
'''
When the saturation in the PF domain reaches a threshold value (PSatlim), backflow towards MFdom occurs.
First we transfer as much water as the cold content of the layer can accomodate
Second, if PSatlim is still exceeded, we equalise saturations in both domains
Note that at the end, saturation might not be the same if we equalise because Mthetar can change subsequently
'''
### First : calculate cold content of every layer ###
## Layers mass ##
mass = rho*dz
## Calculate the refreezing potential in every layer ##
cold_content = CP_I * mass * (T_MELT - Tz) # cold content of each box, i.e. how much heat to bring it to 273K [J]
cold_content_sum = cold_content.cumsum(axis=0) # cumulative cold content, starting from the surface [J]
refreeze_mass_pot = cold_content / LF_I # how much mass of the meltwater could be refrozen due to cold content [kg]
refreeze_mass_pot = np.maximum(refreeze_mass_pot,0.)
refreeze_vol_pot = refreeze_mass_pot/1000. # how much meters of the meltwater could be refrozen due to cold content [m]
backflowlayers = np.where(PeffSat[0:aquif] > PSatlim)[0]
for ii in backflowlayers:
if rho[ii] < rhoimp: # Don't transfer water in MFdom of ice layers
transf = refreeze_vol_pot[ii] # transfer amount that can be accomodated by cold content
transf = min(transf,(Mtheta_sat[ii]-Mtheta[ii])*dz[ii]) # no oversaturation
transf = min(transf,(Ptheta[ii]-crtn_theta/10)*dz[ii]) # preserve numerical stability
transf = max(transf,0.) # make sure no negative values
Ptheta[ii] -= transf/dz[ii]
Mtheta[ii] += transf/dz[ii]
## Update all variables
Mthetar[ii] = min((0.02),0.9*Mtheta[ii])
if Mtheta[ii]<Mthetar[ii]+1e-6:
if Mtheta[ii]>crtn_theta/10:
Mthetar[ii] = Mtheta[ii] - crtn_theta/10
if Mtheta[ii]<=crtn_theta/10:
Mthetar[ii] = 0
MeffSat[ii] = (Mtheta[ii]-Mthetar[ii])/(Mtheta_sat[ii]-Mthetar[ii])
Mlwc[ii] = Mtheta[ii]*(dz[ii]) # update Plwc
PeffSat[ii] = Ptheta[ii] / Ptheta_sat[ii]
Plwc[ii] = Ptheta[ii]*dz[ii]
if (PeffSat[ii]>PSatlim and PeffSat[ii]>MeffSat[ii]): # If we still exceed the threshold saturation in PFdom, we equalise saturations
# if PeffSat[ii] < MeffSat[ii]:
# print('PeffSat[ii] < MeffSat[ii] but we equalise in PFleave')
while (abs(PeffSat[ii]-MeffSat[ii])>1e-5): # use of a while loop because Mthetar might change -> MeffSat not as high as what was fixed by first exchange
# Wever 2016, equations (3),(4),(5) -> corrected versions (there were some errors in (5))
lwctot = Mlwc[ii] + Plwc[ii]
Mtheta[ii] = (dz[ii]*(Mthetar[ii]*Ptheta_sat[ii])+lwctot*(Mtheta_sat[ii]-Mthetar[ii])) / (dz[ii]*(Mtheta_sat[ii]-Mthetar[ii])+dz[ii]*Ptheta_sat[ii])
Ptheta[ii] = (lwctot-Mtheta[ii]*dz[ii])/dz[ii]
## Update all variables
Mthetar[ii] = min((0.02),0.9*Mtheta[ii])
if Mtheta[ii]<Mthetar[ii]+1e-6:
if Mtheta[ii]>crtn_theta/10:
Mthetar[ii] = Mtheta[ii] - crtn_theta/10
if Mtheta[ii]<=crtn_theta/10:
Mthetar[ii] = 0
MeffSat[ii] = (Mtheta[ii]-Mthetar[ii])/(Mtheta_sat[ii]-Mthetar[ii])
Mlwc[ii] = Mtheta[ii]*(dz[ii]) # update Plwc
PeffSat[ii] = Ptheta[ii] / Ptheta_sat[ii]
Plwc[ii] = Ptheta[ii]*dz[ii]
return Mtheta,Mthetar,MeffSat,Mlwc,Ptheta,PeffSat,Plwc
def PFleaveheat(dz,rho,Tz,Mtheta,Mthetar,Mthetar_old,MeffSat,Mtheta_sat,Mlwc,Ptheta,PeffSat,Plwc,Ptheta_sat,crtn_theta,kth,bigF,bigN,aquif,rhoimp,deltatime):
'''
This mimics refreezing in the PF domain by transferring water back to the MF dom depending on how much heat would be lost by water in PFdom
(see paragraph 2.3 of Wever 2016)
It depends on the tuning parameter bigN which represents number of preferential flow paths per unit area
Based on equations (6) and (7) of Wever 2016 but caution, misformulation in (7)
'''
coldlayers = np.where(Tz[0:aquif]<273.15)[0]
oknum = np.where(Ptheta[0:aquif]>crtn_theta/10)[0]
layers = np.intersect1d(coldlayers,oknum)
for ii in layers:
if rho[ii]<rhoimp:
bigQ = kth[ii]*abs(Tz[ii]-273.15)/(((1+bigF[ii])/(2*math.pi))**0.5-(bigF[ii]/math.pi)**0.5) # Wever 2016 (6)
transf = dz[ii] * 2*bigN*((math.pi*bigF[ii])**0.5*bigQ*deltatime)/(LF_I*RHO_W_KGM) # Wever 2016 (7) corrected version + expressed in water amount instead of volumetric water content
transf = min(transf,(Mtheta_sat[ii]-Mtheta[ii])*dz[ii]) # no oversaturation
transf = min(transf,(Ptheta[ii]-crtn_theta/10)*dz[ii]) # preserve numerical stability
transf = max(transf,0.) # make sure no negative values
Ptheta[ii] -= transf/dz[ii]
Mtheta[ii] += transf/dz[ii]
#print('layer and transf are:',ii, transf)
## Update all variables
Mthetar[ii] = min((0.02),0.9*Mtheta[ii])
if Mtheta[ii]<Mthetar[ii]+1e-6:
if Mtheta[ii]>crtn_theta/10:
Mthetar[ii] = Mtheta[ii] - crtn_theta/10
if Mtheta[ii]<=crtn_theta/10:
Mthetar[ii] = 0
MeffSat[ii] = (Mtheta[ii]-Mthetar[ii])/(Mtheta_sat[ii]-Mthetar[ii])
Mlwc[ii] = Mtheta[ii]*(dz[ii]) # update Plwc
PeffSat[ii] = Ptheta[ii] / Ptheta_sat[ii]
Plwc[ii] = Ptheta[ii]*dz[ii]
return Mtheta,Mthetar,MeffSat,Mlwc,Ptheta,PeffSat,Plwc
def Mrefreezing(dz,zstep,rho,grain,Tz,Mthetar_old,Mlwc,lwc_min_fr,Ptheta,PeffSat,Plwc,h_e,bigF,mu,crtn_theta,rhoimp,totrefrozen_lwc,refrozenlay,totrunoff):
'''
Proceed to refreezing according to cold content of every layer.
Adjust porosity and hydraulic properties accordingly.
Verify that we don't oversaturate PFdom. If so, we call for Psatexcess
'''
### Layers mass ###
mass = rho*dz
### Calculate the refreezing potential in every layer ###
cold_content = CP_I * mass * (T_MELT - Tz) # cold content of each box, i.e. how much heat to bring it to 273K [J]
cold_content_sum = cold_content.cumsum(axis=0) # cumulative cold content, starting from the surface [J]
refreeze_mass_pot = cold_content / LF_I # how much mass of the meltwater could be refrozen due to cold content [kg]
refreeze_mass_pot = np.maximum(refreeze_mass_pot,0.)
refreeze_mass_pot_sum = refreeze_mass_pot.cumsum(axis=0) # cumulative amount of meltwater that can be refrozen due to cold content [kg]
rho_pot = (mass + refreeze_mass_pot) / dz # density value of the boxes if the refreezemass refroze [kg/m3]
porosity_pot = 1 - rho_pot / RHO_I # porosity value of the boxes if the refreezemass refroze [/]
porespace_vol_pot = porosity_pot * dz # pore space of the boxes if the refreezemass refroze [m]
refreeze_vol_pot = refreeze_mass_pot/1000. # how much meters of the meltwater could be refrozen due to cold content [m]
refreeze_vol_pot_sum = refreeze_vol_pot.cumsum(axis=0) # cumulative amount of meltwater that can be refrozen due to cold content [m]
### Refreezing process ###
refrozen_vol = np.minimum(np.maximum((Mlwc-lwc_min_fr),0),refreeze_vol_pot) # Volume of water refrozen [mWE]
porlimit = np.zeros_like(refreeze_mass_pot) # Now we want to avoid exceeding RHO_I <-> reaching negative porosity
excrefr = np.where(porosity_pot<0)[0] # Spot where this might happen
if np.any(refrozen_vol < 0):
print('Negative refrozen_vol before excrefr')
for ii in excrefr:
porlimit[ii] = 1e-3*((RHO_I)*dz[ii]-mass[ii]) # [mWE] limit of possible refreeze due to pore space availability, safety margin provided by min value for porosity_refr
if porlimit[ii] < refrozen_vol[ii]: # If cold content and available LWC are to make the layer exceed RHO_I-1e-3
refrozen_vol[ii] = min(refrozen_vol[ii],porlimit[ii]) # we limit the volume we will refreeze
if refrozen_vol[ii] < 0:
print('Error due to a negative refrozen_vol')
#refrozen_vol[ii] = min(refrozen_vol[ii],0.)
if np.any(refrozen_vol < 0):
print('Negative refrozen_vol after excrefr')
refrozen_mass = 1000*refrozen_vol # Corresponding mass of water refrozen [kg]
Mlwc = Mlwc-refrozen_vol # New value of lwc [m]
refreeze_vol_pot = refreeze_vol_pot-refrozen_vol # what can still be refrozen after the refreezing (!=0 if lwc is limiting factor) [m]
refreeze_mass_pot = 1000*refreeze_vol_pot # what can still be refrozen after the refreezing (!=0 if lwc is limiting factor) [kg]
cold_content = refreeze_mass_pot*LF_I # remaining cold content [J]
totrefrozen_lwc += np.sum(refrozen_vol) # [mWE]
refrozenlay += refrozen_vol # [mWE]
lat_heat = refrozen_mass*LF_I # latent heat released in every layer [J]
if np.any(refrozen_mass < 0):
print('Error due to a negative refrozen_mass')
mass = mass + refrozen_mass # new mass: we added the mass of refrozen water
Tz = T_MELT - cold_content/(CP_I*mass) # the remaining cold content is equivalent to the energy to raise new mass from new Tz until T_MELT [K]
rho = mass/dz
Mtheta = Mlwc/dz
### Calculate pore space available in every layer --> water content at saturation
porosity = 1 - rho/RHO_I # Definition of porosity [/]
porespace_vol = porosity * dz # Pore space of each layer [m]
porosity_refr = porosity*RHO_I/RHO_W_KGM # space available for liq water volume once refrozen, Wever 2014 (9) [/]
#test
#porosity_refr = np.maximum(porosity_refr,1e-4) # allow space for minimum water content required in both domains for numerical stability, 1e-4 is equivalent to 916.9 density
porosity_refr = np.maximum(porosity_refr,17e-3) # allow space for minimum water content required in both domains for numerical stability, 17e-3 is equivalent to 900.0 density
porespace_refr_vol = porosity_refr*dz # Available pore space of each layer [m]
### Re-execute all necessary calculations ###
theta_sat = porosity_refr # value of volumetric water content in saturated conditions [/]
alpha_vG = 4.4e6*(rho/(2*grain))**(-0.98) # Hirashima 2014 (5) ; typical value ~35.0
n_vG = 1 + 2.7e-3*(rho/(2*grain))**(0.61) # Hirashima 2014 (6) ; typical value ~4.0
m_vG = 1 - 1/n_vG # Wever 2014 (8) ; typical value ~0.75
Sc = (1 + (alpha_vG*h_e)**n_vG)**(-m_vG) #Saturation at cut-off point [/], see Ippisch et al., 2006 eq(11)
Ksat = RHO_W_KGM*GRAVITY/mu * 3.0*(grain)**2*np.exp(-0.013*rho) # Hydraulic conductivity at saturation (>0) [m s-1], Formula of Calonne et al. 2012, see Wever 2015 (7) and D'Amboise 2017 (10)
Mtheta_sat = (1-bigF)*theta_sat
Ptheta_sat = bigF*theta_sat
if np.any(Mtheta > Mtheta_sat): # If any saturation exceeds maximal saturation, we have to proceed to a redistribution of water
Mtheta,Mthetar,MeffSat,Mlwc,totrunoff = Msatexcess(dz,rho,Mtheta,Mtheta_sat,crtn_theta,rhoimp,totrunoff) # could happen if water left in a layer that reached RHO_I-1e-3
## Update of Mthetar as in Wever 2014 ##
Mthetar = np.minimum((np.ones_like(Mtheta)*0.02),0.9*Mtheta) # residual water content [/]
## Update of effSat and head ##
MeffSat = (Mtheta-Mthetar)/(Mtheta_sat-Mthetar)
Mhead = -1*1/alpha_vG * ((Sc * MeffSat)**(-1/m_vG)-1)**(1/n_vG) # [m] Wever 2014 (3)
PeffSat = Ptheta/Ptheta_sat # This might have change as theta_sat has been decreased (but not Mtheta_we) -> Ptheta_sat decreased
# Ptheta_sat decreased -> PeffSat increased -> We might have oversaturated PF domain ! I think this would be really rare but implemented just in case.
if np.any(PeffSat>1):
#print('sum(Plwc) before Psatexcess:',sum(Plwc))
Ptheta,PeffSat,Plwc,totrunoff = Psatexcess(dz,rho,Ptheta,Ptheta_sat,crtn_theta,rhoimp,totrunoff)
#print('sum(Plwc) after Psatexcess:',sum(Plwc))
return rho,Tz,Mhead,Mtheta,Mthetar,MeffSat,Mlwc,Mtheta_sat,Ptheta,PeffSat,Plwc,Ptheta_sat,Ksat,theta_sat,alpha_vG,n_vG,m_vG,Sc,totrefrozen_lwc,refrozenlay,totrunoff
def runoff(dz,rho,Mhead,Mtheta,Mthetar,Mthetar_old,MeffSat,Mlwc,Mtheta_sat,theta_min_fr,crtn_theta,slope,rhoimp,aquif,deltatime,totrunoff):
'''
This is the Greenland specific runoff function of Zuo and Oerlemans 1996 (21)
We proceed to runoff only for layers of which water content is above theta_min_fr. CAUTION: this might be changed!!
We don't apply runoff in the aquifer
'''
c1Z = 1.5*24*3600 # value in Zuo and Oerlemans 1996, converted in seconds [s]
c2Z = 25*24*3600 # Zuo and Oerlemans 1996 [s]
c3Z = 140 # Zuo and Oerlemans 1996 [/]
tcarac = c1Z + c2Z*np.exp(-1*c3Z*slope) # Zuo and Oerlemans 1996 [s]
wet_layers = np.where(Mtheta>theta_min_fr)[0] # other possibility: apply runoff to all layers where there is water
#abice = np.where(rho>=rhoimp)[0] - 1
#absat = np.where(MeffSat>=0.9)[0] - 1
#abicesat = np.unique(np.append(abice,absat))
runoff_layers = np.copy(wet_layers)
#print('Runoff layers are:',runoff_layers)
runoff = np.zeros_like(dz) # lwc that will be moved by lateral runoff [m]
for index in runoff_layers:
if (index < aquif): # and (rho[index]<830)):
runoff[index] = deltatime*((Mtheta[index]*dz[index])-(Mthetar[index]*dz[index]))/tcarac # Zuo and Oerlemans 1996 (21), Lefebvre 2003 (1), Langen 2017 (13) [m]
runoff[index] = min(runoff[index],((Mtheta[index]*dz[index])-(Mthetar[index]*dz[index]))) # Don't let runoff decrease Mtheta below Mthetar
runoff[index] = min(runoff[index],(Mtheta[index]-crtn_theta/10)*dz[index]) # make sure to preserve numerical stability
runoff[index] = max(runoff[index],0.) # make sure no negative runoff value
Mtheta[index] -= runoff[index]/dz[index] # remove the runoff from the MFdom
## Update of all variables
Mthetar[index] = np.minimum(0.02,0.9*Mtheta[index])
#if Mtheta[index]<Mthetar[index]+1e-6:
#if Mtheta[index]>crtn_theta/10:
#Mthetar[index] = Mtheta[index] - crtn_theta/10
#if Mtheta[index]<=crtn_theta/10:
#Mthetar[index] = 0
Mlwc[index] = Mtheta[index]*dz[index]
MeffSat[index] = (Mtheta[index]-Mthetar[index])/(Mtheta_sat[index]-Mthetar[index])
totrunoff += sum(runoff) # total amount of runoff
return Mtheta,Mthetar,MeffSat,Mlwc,totrunoff
def Prefreezing(dz,rho,grain,Tz,Mthetar_old,Mtheta,Mlwc,lwc_min_fr,Ptheta,PeffSat,Plwc,bigF,h_e,mu,crtn_theta,dryfront,totrefrozen_lwc,refrozenlay,rhoimp,totrunoff):
'''
Not used in preferential flow scheme of Wever 2016 but might be a good idea to use.
Proceed to refreezing of Plwc until dryfront according to cold content of every layer.
Adjust porosity and hydraulic properties accordingly.
Also needs the variables Mtheta and dryfront as input parameters (vs refreezing())
'''
### Layers mass ###
mass = rho[0:dryfront+1]*dz[0:dryfront+1]
### Calculate the refreezing potential in every layer ###
cold_content = CP_I * mass[0:dryfront+1] * (T_MELT - Tz[0:dryfront+1]) # cold content of each box, i.e. how much heat to bring it to 273K [J]
cold_content_sum = cold_content.cumsum(axis=0) # cumulative cold content, starting from the surface [J]
refreeze_mass_pot = cold_content / LF_I # how much mass of the meltwater could be refrozen due to cold content [kg]
refreeze_mass_pot = np.maximum(refreeze_mass_pot,0.)
refreeze_mass_pot_sum = refreeze_mass_pot.cumsum(axis=0) # cumulative amount of meltwater that can be refrozen due to cold content [kg]
rho_pot = (mass[0:dryfront+1] + refreeze_mass_pot[0:dryfront+1]) / dz[0:dryfront+1] # density value of the boxes if the refreezemass refroze [kg/m3]
porosity_pot = 1 - rho_pot / RHO_I # porosity value of the boxes if the refreezemass refroze [/]
porespace_vol_pot = porosity_pot * dz[0:dryfront+1] # pore space of the boxes if the refreezemass refroze [m]
refreeze_vol_pot = refreeze_mass_pot/1000. # how much meters of the meltwater could be refrozen due to cold content [m]
refreeze_vol_pot_sum = refreeze_vol_pot.cumsum(axis=0) # cumulative amount of meltwater that can be refrozen due to cold content [m]
### Refreezing process ###
refrozen_vol = np.minimum(np.maximum(Plwc[0:dryfront+1]-lwc_min_fr[0:dryfront+1],0),refreeze_vol_pot) # Volume of water refrozen [mWE]
porlimit = np.zeros_like(refreeze_mass_pot) # Now we want to avoid exceeding RHO_I <-> reaching negative porosity
excrefr = np.where(porosity_pot<0)[0] # Spot where this might happen
if np.any(refrozen_vol < 0):
print('Negative refrozen_vol before excrefr')
for ii in excrefr:
#porlimit[ii] = 1e-3*((RHO_I-1e-3)*dz[ii]-mass[ii]) # [mWE] limit of possible refreeze due to pore space availability with little safety margin
porlimit[ii] = 1e-3*((RHO_I)*dz[ii]-mass[ii]) # [mWE] limit of possible refreeze due to pore space availability, safety margin provided by min value for porosity_refr
if porlimit[ii] < refrozen_vol[ii]: # If cold content and available LWC are to make the layer exceed RHO_I-1e-3
refrozen_vol[ii] = min(refrozen_vol[ii],porlimit[ii]) # we limit the volume we will refreeze
if refrozen_vol[ii] < 0:
print('Error due to a negative refrozen_vol')
#refrozen_vol[ii] = min(refrozen_vol[ii],0.)
if np.any(refrozen_vol < 0):
print('Negative refrozen_vol after excrefr')
refrozen_mass = 1000*refrozen_vol # Corresponding mass of water refrozen [kg]
Plwc[0:dryfront+1] = Plwc[0:dryfront+1]-refrozen_vol # New value of lwc [m]
refreeze_vol_pot = refreeze_vol_pot-refrozen_vol # what can still be refrozen after the refreezing (!=0 if lwc is limiting factor) [m]
refreeze_mass_pot = 1000*refreeze_vol_pot # what can still be refrozen after the refreezing (!=0 if lwc is limiting factor) [kg]
cold_content = refreeze_mass_pot*LF_I # remaining cold content [J]
# print('refrozen_vol[0:10] is:',refrozen_vol[0:10])
totrefrozen_lwc += np.sum(refrozen_vol) # [mWE]
refrozenlay[0:dryfront+1] += refrozen_vol # [mWE]
lat_heat = refrozen_mass*LF_I # latent heat released in every layer [J]
if np.any(refrozen_mass < 0):
print('Error due to a negative refrozen_mass')
mass = mass + refrozen_mass # new mass: we added the mass of refrozen water
Tz[0:dryfront+1] = T_MELT - cold_content/(CP_I*mass) # the remaining cold content is equivalent to the energy to raise new mass from new Tz until T_MELT [K]
rho[0:dryfront+1] = mass/dz[0:dryfront+1]
Ptheta[0:dryfront+1] = Plwc[0:dryfront+1]/dz[0:dryfront+1]
### Calculate pore space available in every layer --> water content at saturation
porosity = 1 - rho/RHO_I # Definition of porosity [/]
porespace_vol = porosity * dz # Pore space of each layer [m]
porosity_refr = porosity*RHO_I/RHO_W_KGM # space available for liq water volume once refrozen, Wever 2014 (9) [/]
#test
#porosity_refr = np.maximum(porosity_refr,1e-4) # allow space for minimum water content required in both domains for numerical stability, 1e-4 is equivalent to 916.9 density
porosity_refr = np.maximum(porosity_refr,17e-3) # allow space for minimum water content required in both domains for numerical stability, 17e-3 is equivalent to 900.0 density
porespace_refr_vol = porosity_refr*dz # Available pore space of each layer [m]
### Re-execute all necessary calculations ###
theta_sat = porosity_refr # value of volumetric water content in saturated conditions [/]
Mtheta_sat = (1-bigF)*theta_sat
Ptheta_sat = bigF*theta_sat
alpha_vG = 4.4e6*(rho/(2*grain))**(-0.98) # Hirashima 2014 (5) ; typical value ~35.0
n_vG = 1 + 2.7e-3*(rho/(2*grain))**(0.61) # Hirashima 2014 (6) ; typical value ~4.0
m_vG = 1 - 1/n_vG # Wever 2014 (8) ; typical value ~0.75
Sc = (1 + (alpha_vG*h_e)**n_vG)**(-m_vG) #Saturation at cut-off point [/], see Ippisch et al., 2006 eq(11)
Ksat = RHO_W_KGM*GRAVITY/mu * 3.0*(grain)**2*np.exp(-0.013*rho) # Hydraulic conductivity at saturation (>0) [m s-1], Formula of Calonne et al. 2012, see Wever 2015 (7) and D'Amboise 2017 (10)
Mtheta_sat = (1-bigF)*theta_sat
Ptheta_sat = bigF*theta_sat
if np.any(Mtheta > Mtheta_sat): # If any saturation exceeds maximal saturation, we have to proceed to a redistribution of water
Mtheta,Mthetar,MeffSat,Mlwc,totrunoff = Msatexcess(dz,rho,Mtheta,theta_sat,crtn_theta,rhoimp,totrunoff) # could happen if water left in a layer that reached RHO_I-1e-3
## Update of Mthetar as in Wever 2014 ##
Mthetar = np.minimum((np.ones_like(Mtheta)*0.02),0.9*Mtheta) # initial residual water content [/], Wever 2014 (10)
#low_Mtheta = np.where(Mtheta<Mthetar+crtn_theta/10) # for low theta values: same procedure as Wever 2014, appendix A3
#for indices in low_Mtheta[0]:
# if Mtheta[indices]>crtn_theta/10:
# Mthetar[indices] = Mtheta[indices] - crtn_theta/10
# if Mtheta[indices]<=crtn_theta/10:
# Mthetar[indices] = 0
## Update of effSat and head ##
MeffSat = (Mtheta-Mthetar)/(Mtheta_sat-Mthetar)
Mhead = -1*1/alpha_vG * ((Sc * MeffSat)**(-1/m_vG)-1)**(1/n_vG) # [m] Wever 2014 (3)
PeffSat = Ptheta/Ptheta_sat # This might have change as theta_sat has been decreased -> Ptheta_sat decreased
return rho,Tz,Mhead,Mtheta,Mthetar,MeffSat,Mlwc,Mtheta_sat,Ptheta,PeffSat,Plwc,Ptheta_sat,Ksat,theta_sat,alpha_vG,n_vG,m_vG,Sc,totrefrozen_lwc,refrozenlay,totrunoff
def distribute_tostore(dz,rho,tostore,Mlwc,Plwc,rhoimp,bigF,totrunoff):
### Calculate pore space available in every layer --> water content at saturation
porosity = 1 - rho/RHO_I # Definition of porosity [/]
porespace_vol = porosity * dz # Pore space of each layer [m]
porosity_refr = porosity*RHO_I/RHO_W_KGM # space available for liq water volume once refrozen, Wever 2014 (9) [/]
#porosity_refr = np.maximum(porosity_refr,1e-4) # allow space for minimum water content required in both domains for numerical stability, 1e-4 is equivalent to 916.9 density
porosity_refr = np.maximum(porosity_refr,17e-3) # allow space for minimum water content required in both domains for numerical stability, 17e-3 is equivalent to 900.0 density
porespace_refr_vol = porosity_refr*dz # Available pore space of each layer [m]
spaceavail = 0.999*porespace_refr_vol # Use a safety margin to avoid calculation problems for the head
jj = len(dz)-1 # start filling from the bottom layer
while tostore > 0: # as long as there is water to store, we continue the distribution
if rho[jj] < rhoimp: # don't put water in ice layers
toPF = min(bigF[jj]*spaceavail[jj]-Plwc[jj],tostore) # first fill the PFdom part of the porosity
Plwc[jj] += toPF
tostore -= toPF # tostore has been (partly) emptied
toMF = min((1-bigF[jj])*spaceavail[jj]-Mlwc[jj],tostore) # then fill the MFdom part of the porosity
Mlwc[jj] += toMF
tostore -= toMF # tostore has been (partly) emptied
jj -= 1 # go to layer above
if jj == -1: # if we reach surface
totrunoff += tostore # put the rest of to store as runoff
tostore = 0. # nothing to store anymore
return(Mlwc,Plwc,totrunoff)
| 58.868476
| 196
| 0.623821
|
4a1606b551d3612fb9dabfbd89b60be051f406be
| 3,857
|
py
|
Python
|
alpaca_trade_api/models/public_stream.py
|
164747/alpaca-trade-api-python
|
94134a2ea226eab653329e1f1790e1b1d9e74f16
|
[
"Apache-2.0"
] | null | null | null |
alpaca_trade_api/models/public_stream.py
|
164747/alpaca-trade-api-python
|
94134a2ea226eab653329e1f1790e1b1d9e74f16
|
[
"Apache-2.0"
] | null | null | null |
alpaca_trade_api/models/public_stream.py
|
164747/alpaca-trade-api-python
|
94134a2ea226eab653329e1f1790e1b1d9e74f16
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import typing
import pytz
from pydantic import BaseModel, Field
from alpaca_trade_api.models import public_rest
class SocketBase(BaseModel):
symbol: str = Field(alias='S')
event_type: str = Field(alias='T')
class Trade(SocketBase):
exchange_id: str = Field(alias='x')
trade_id: int = Field(alias='i')
tape: str = Field(alias='z')
price: float = Field(alias='p')
size: int = Field(alias='s')
trade_conditions: typing.List[str] = Field(alias='c', default=None)
utc: datetime.datetime = Field(alias='t')
class Config:
schema_extra = {
'example': {
"T": "t",
"i": 96921,
"S": "AAPL",
"x": "D",
"p": 126.55,
"s": 1,
"t": "2021-02-22T15:51:44.208Z",
"c": [
"@",
"I"
],
"z": "C"
}
}
@property
def age(self) -> datetime.timedelta:
return datetime.datetime.now(pytz.utc) - self.utc
@property
def trade_item(self) -> public_rest.TradeItem:
return public_rest.TradeItem(**self.dict())
class Quote(SocketBase):
bid_exchange_id: typing.Optional[str] = Field(alias='bx', default=None)
bid_price: typing.Optional[float] = Field(alias='bp', default=None)
bid_size: typing.Optional[int] = Field(alias='bs', default=None)
ask_exchange_id: typing.Optional[str] = Field(alias='ax', default=None)
ask_price: typing.Optional[float] = Field(alias='ap', default=None)
ask_size: typing.Optional[int] = Field(alias='as', default=None)
quote_conditions: typing.Optional[typing.List[str]] = Field(alias='c', default=None)
utc: datetime.datetime = Field(alias='t')
tape: typing.Optional[str] = Field(alias='z', default=None)
class Config:
schema_extra = {'example': {
"T": "q",
"S": "AMD",
"bx": "U",
"bp": 87.66,
"bs": 1,
"ax": "Q",
"ap": 87.68,
"as": 4,
"t": "2021-02-22T15:51:45.335689322Z",
"c": [
"R"
],
"z": "C"
}}
def __str__(self):
return f'{self.bid_size}:{self.bid_price} -- {self.ask_size}:{self.ask_price}'
@property
def is_complete(self) -> bool:
return self.bid_price is not None and self.ask_price is not None
@property
def age(self) -> datetime.timedelta:
return datetime.datetime.now(pytz.utc) - self.utc
@property
def payback(self) -> float:
return (self.bid_price / self.ask_price) ** .5
@property
def middle_price(self) -> float:
return (self.ask_price + self.bid_price) / 2.0
class Bar(SocketBase):
volume: int = Field(alias='v')
volume_today: int = Field(alias='av', default=None)
official_open_price: float = Field(alias='op', default=None)
vol_weight_price: float = Field(alias='vw', default=None)
open_price: float = Field(alias='o')
close_price: float = Field(alias='c')
high_price: float = Field(alias='h')
low_price: float = Field(alias='l')
avg_prive: float = Field(alias='a', default=None)
utc_start: datetime.datetime = Field(alias='t')
class Config:
schema_extra = {'example': {
"T": "b",
"S": "SPY",
"o": 388.985,
"h": 389.13,
"l": 388.975,
"c": 389.12,
"v": 49378,
"t": "2021-02-22T19:15:00Z"
}}
@property
def rest_bar(self) -> public_rest.Bar:
return public_rest.Bar(v=self.volume, o=self.open_price, c=self.close_price, h=self.high_price,
l=self.low_price,
t=self.utc_start)
| 30.132813
| 103
| 0.542909
|
4a16073c8890124ff7a3544859d5dbb11598f7b6
| 14,969
|
py
|
Python
|
compressor/management/commands/compress.py
|
andriyor/django-compressor
|
be782489af21b51cad2e8a60dd6b0a3bed20bff3
|
[
"MIT"
] | null | null | null |
compressor/management/commands/compress.py
|
andriyor/django-compressor
|
be782489af21b51cad2e8a60dd6b0a3bed20bff3
|
[
"MIT"
] | null | null | null |
compressor/management/commands/compress.py
|
andriyor/django-compressor
|
be782489af21b51cad2e8a60dd6b0a3bed20bff3
|
[
"MIT"
] | null | null | null |
# flake8: noqa
import os
import sys
import concurrent.futures
from threading import Lock
from collections import OrderedDict, defaultdict
from fnmatch import fnmatch
from importlib import import_module
import django
from django.core.management.base import BaseCommand, CommandError
import django.template
from django.template import Context
from django.utils.encoding import smart_str
from django.template.loader import get_template # noqa Leave this in to preload template locations
from django.template import engines
from compressor.cache import get_offline_hexdigest, write_offline_manifest, get_offline_manifest
from compressor.conf import settings
from compressor.exceptions import (OfflineGenerationError, TemplateSyntaxError,
TemplateDoesNotExist)
from compressor.utils import get_mod_func
offline_manifest_lock = Lock()
class Command(BaseCommand):
help = "Compress content outside of the request/response cycle"
def add_arguments(self, parser):
parser.add_argument('--extension', '-e', action='append', dest='extensions',
help='The file extension(s) to examine (default: ".html", '
'separate multiple extensions with commas, or use -e '
'multiple times)')
parser.add_argument('-f', '--force', default=False, action='store_true',
help="Force the generation of compressed content even if the "
"COMPRESS_ENABLED setting is not True.", dest='force')
parser.add_argument('--follow-links', default=False, action='store_true',
help="Follow symlinks when traversing the COMPRESS_ROOT "
"(which defaults to STATIC_ROOT). Be aware that using this "
"can lead to infinite recursion if a link points to a parent "
"directory of itself.", dest='follow_links')
parser.add_argument('--engine', default=[], action="append",
help="Specifies the templating engine. jinja2 and django are "
"supported. It may be a specified more than once for "
"multiple engines. If not specified, django engine is used.",
dest="engines")
def get_loaders(self):
template_source_loaders = []
for e in engines.all():
if hasattr(e, 'engine'):
template_source_loaders.extend(
e.engine.get_template_loaders(e.engine.loaders))
loaders = []
# If template loader is CachedTemplateLoader, return the loaders
# that it wraps around. So if we have
# TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )),
# )
# The loaders will return django.template.loaders.filesystem.Loader
# and django.template.loaders.app_directories.Loader
# The cached Loader and similar ones include a 'loaders' attribute
# so we look for that.
for loader in template_source_loaders:
if hasattr(loader, 'loaders'):
loaders.extend(loader.loaders)
else:
loaders.append(loader)
return loaders
def __get_parser(self, engine):
charset = (
settings.FILE_CHARSET if settings.is_overridden('FILE_CHARSET')
else 'utf-8'
)
if engine == "jinja2":
from compressor.offline.jinja2 import Jinja2Parser
env = settings.COMPRESS_JINJA2_GET_ENVIRONMENT()
parser = Jinja2Parser(charset=charset, env=env)
elif engine == "django":
from compressor.offline.django import DjangoParser
parser = DjangoParser(charset=charset)
else:
raise OfflineGenerationError("Invalid templating engine specified.")
return parser
def compress(self, engine, extensions, verbosity, follow_links, log):
"""
Searches templates containing 'compress' nodes and compresses them
"offline" -- outside of the request/response cycle.
The result is cached with a cache-key derived from the content of the
compress nodes (not the content of the possibly linked files!).
"""
if not self.get_loaders():
raise OfflineGenerationError("No template loaders defined. You "
"must set TEMPLATE_LOADERS in your "
"settings or set 'loaders' in your "
"TEMPLATES dictionary.")
templates = set()
if engine == 'django':
paths = set()
for loader in self.get_loaders():
try:
module = import_module(loader.__module__)
get_template_sources = getattr(module,
'get_template_sources', None)
if get_template_sources is None:
get_template_sources = loader.get_template_sources
paths.update(smart_str(origin) for origin in get_template_sources(''))
except (ImportError, AttributeError, TypeError):
# Yeah, this didn't work out so well, let's move on
pass
if not paths:
raise OfflineGenerationError("No template paths found. None of "
"the configured template loaders "
"provided template paths. See "
"https://docs.djangoproject.com/en/2.1/topics/templates/ "
"for more information on template "
"loaders.")
if verbosity >= 2:
log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
for path in paths:
for root, dirs, files in os.walk(path, followlinks=follow_links):
templates.update(os.path.relpath(os.path.join(root, name), path)
for name in files if not name.startswith('.') and
any(fnmatch(name, "*%s" % glob) for glob in extensions))
elif engine == 'jinja2':
env = settings.COMPRESS_JINJA2_GET_ENVIRONMENT()
if env and hasattr(env, 'list_templates'):
templates |= set([env.loader.get_source(env, template)[1] for template in
env.list_templates(filter_func=lambda _path:
os.path.splitext(_path)[-1] in extensions)])
if not templates:
raise OfflineGenerationError("No templates found. Make sure your "
"TEMPLATE_LOADERS and TEMPLATE_DIRS "
"settings are correct.")
if verbosity >= 2:
log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")
contexts = settings.COMPRESS_OFFLINE_CONTEXT
if isinstance(contexts, str):
try:
module, function = get_mod_func(contexts)
contexts = getattr(import_module(module), function)()
except (AttributeError, ImportError, TypeError) as e:
raise ImportError("Couldn't import offline context function %s: %s" %
(settings.COMPRESS_OFFLINE_CONTEXT, e))
elif not isinstance(contexts, (list, tuple)):
contexts = [contexts]
parser = self.__get_parser(engine)
fine_templates = []
if verbosity >= 1:
log.write("Compressing... ")
for template_name in templates:
try:
template = parser.parse(template_name)
template.template_name = template_name
fine_templates.append(template)
except IOError: # unreadable file -> ignore
if verbosity >= 1:
log.write("Unreadable template at: %s\n" % template_name)
continue
except TemplateSyntaxError as e: # broken template -> ignore
if verbosity >= 1:
log.write("Invalid template %s: %s\n" % (template_name, smart_str(e)))
continue
except TemplateDoesNotExist: # non existent template -> ignore
if verbosity >= 1:
log.write("Non-existent template at: %s\n" % template_name)
continue
except UnicodeDecodeError:
if verbosity >= 1:
log.write("UnicodeDecodeError while trying to read "
"template %s\n" % template_name)
continue
contexts_count = 0
nodes_count = 0
offline_manifest = OrderedDict()
errors = []
for context_dict in contexts:
compressor_nodes = OrderedDict()
for template in fine_templates:
context = Context(parser.get_init_context(context_dict))
try:
nodes = list(parser.walk_nodes(template, context=context))
except (TemplateDoesNotExist, TemplateSyntaxError) as e:
# Could be an error in some base template
if verbosity >= 1:
log.write("Error parsing template %s: %s\n" %
(template.template_name, smart_str(e)))
continue
if nodes:
template_nodes = compressor_nodes.setdefault(template, OrderedDict())
for node in nodes:
nodes_count += 1
template_nodes.setdefault(node, []).append(context)
pool = concurrent.futures.ThreadPoolExecutor(max_workers=4)
for template, nodes in compressor_nodes.items():
template._log = log
template._log_verbosity = verbosity
pool.submit(self._compress_template, offline_manifest, nodes, parser, template, errors)
pool.shutdown(wait=True)
contexts_count += 1
# If errors exist, raise the first one in the list
if errors:
raise errors[0]
elif not nodes_count:
raise OfflineGenerationError(
"No 'compress' template tags found in templates."
"Try running compress command with --follow-links and/or"
"--extension=EXTENSIONS")
if verbosity >= 1:
log.write("done\nCompressed %d block(s) from %d template(s) for %d context(s).\n" %
(len(offline_manifest), nodes_count, contexts_count))
return offline_manifest, len(offline_manifest), offline_manifest.values()
@staticmethod
def _compress_template(offline_manifest, nodes, parser, template, errors):
for node, node_contexts in nodes.items():
for context in node_contexts:
context.push()
if not parser.process_template(template, context):
continue
parser.process_node(template, context, node)
rendered = parser.render_nodelist(template, context, node)
key = get_offline_hexdigest(rendered)
# Atomically check if the key exists in offline manifest.
# If it doesn't, set a placeholder key (None). This is to prevent
# concurrent _compress_template instances from rendering the
# same node, and then writing to the same file.
with offline_manifest_lock:
if key in offline_manifest:
continue
offline_manifest[key] = None
try:
result = parser.render_node(template, context, node)
except Exception as e:
errors.append(CommandError("An error occurred during rendering %s: "
"%s" % (template.template_name, smart_str(e))))
del offline_manifest[key]
return
result = result.replace(
settings.COMPRESS_URL, settings.COMPRESS_URL_PLACEHOLDER
)
offline_manifest[key] = result
context.pop()
def handle_extensions(self, extensions=('html',)):
"""
organizes multiple extensions that are separated with commas or
passed by using --extension/-e multiple times.
for example: running 'django-admin compress -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def handle(self, **options):
self.handle_inner(**options)
def handle_inner(self, **options):
if not settings.COMPRESS_ENABLED and not options.get("force"):
raise CommandError(
"Compressor is disabled. Set the COMPRESS_ENABLED "
"setting or use --force to override.")
if not settings.COMPRESS_OFFLINE:
if not options.get("force"):
raise CommandError(
"Offline compression is disabled. Set "
"COMPRESS_OFFLINE or use the --force to override.")
log = options.get("log", sys.stdout)
verbosity = options.get("verbosity", 1)
follow_links = options.get("follow_links", False)
extensions = self.handle_extensions(options.get("extensions") or ["html"])
engines = [e.strip() for e in options.get("engines", [])] or ["django"]
final_offline_manifest = {}
final_block_count = 0
final_results = []
for engine in engines:
offline_manifest, block_count, results = self.compress(engine, extensions, verbosity, follow_links, log)
final_results.extend(results)
final_block_count += block_count
final_offline_manifest.update(offline_manifest)
write_offline_manifest(final_offline_manifest)
return final_block_count, final_results
Command.requires_system_checks = False
| 45.776758
| 116
| 0.56637
|
4a16079010024c573b51432c99c0b72c13d6606c
| 3,420
|
py
|
Python
|
paperlib/get_vta.py
|
guimarais/Hmode_Figures
|
964b93a56421fa2594687e2eebcd66762ae4b170
|
[
"MIT"
] | null | null | null |
paperlib/get_vta.py
|
guimarais/Hmode_Figures
|
964b93a56421fa2594687e2eebcd66762ae4b170
|
[
"MIT"
] | null | null | null |
paperlib/get_vta.py
|
guimarais/Hmode_Figures
|
964b93a56421fa2594687e2eebcd66762ae4b170
|
[
"MIT"
] | null | null | null |
import dd
import numpy as np
from ipfnpytools.trz_to_rhop import trz_to_rhop
class objview(object):
def __init__(self, d):
self.__dict__=d
def get_vta(shotnr, tBegin=0.0, tEnd=10.0, magdiag='EQH', verbose=False):
"""Reads the VTA(Vertical Thomson Array) shotfile and maps data to rho pol. The Edge and Core systems have different time bases so they are treated as independent systems.
Parameters
-----------
shotnr: int
Number of the shot
tBegin: float
Beginning of the time window for the data block
tEnd: float
End of the time window for the data block
magdiag: string ('EQH', 'EQI', 'FPP', 'IDE')
String for the magnetic equilibria to be used in mapping data to rho poloidal
verbose: bool
Flag to pass to trz_to_rhop to print the rho_pol progess
Returns
-----------
vta: object
Object containing data from the core and edge VTA. Data is already sorted by ascending rho_pol values.
For each system, Core (c) and Edge (e), the vta object has the following elements:
time_#: Timebase of the e/c system.
rho_#: Rho_pol of the e/c system.
ne_#: Density of the e/c system.
Te_#: Temperature of the e/c system.
Example
-----------
vta = get_vta(30733, tBegin=1.0, tEnd=1.5, magdiag='FPP')
"""
#Reads data from the VTA shotfile
vta = dd.shotfile('VTA', shotnr)
ne_c = vta('Ne_c', tBegin=tBegin, tEnd=tEnd)
te_c = vta('Te_c', tBegin=tBegin, tEnd=tEnd)
r_c = vta('R_core', tBegin=tBegin, tEnd=tEnd)
z_c = vta('Z_core', tBegin=tBegin, tEnd=tEnd)
ne_e = vta('Ne_e', tBegin=tBegin, tEnd=tEnd)
te_e = vta('Te_e', tBegin=tBegin, tEnd=tEnd)
r_e = vta('R_edge', tBegin=tBegin, tEnd=tEnd)
z_e = vta('Z_edge', tBegin=tBegin, tEnd=tEnd)
vta.close()
#Adjusts R and Z dimensions to use afterwards in 'trz_to_rhop'
## Edge
rmap_e = np.tile(r_e.data, [len(z_e.data), 1]).T
zmap_e = np.tile(z_e.data, [len(ne_e.data),1])
## Core
zmap_c = np.tile(z_c.data, [len(ne_c.data),1])
rmap_c = np.tile(r_c.data, [len(z_c.data), 1]).T
#Converts R and Z to rho_pol
rho_e = trz_to_rhop(ne_e.time, rmap_e, zmap_e, shot=shotnr, eq='FPP', squeeze=True, verbose=verbose)
rho_c = trz_to_rhop(ne_c.time, rmap_c, zmap_c, shot=shotnr, eq='FPP', squeeze=True, verbose=verbose)
#Sort by ascending rho
#Edge
rr_e = np.zeros_like(rho_e)
nn_e = np.zeros_like(rho_e)
tt_e = np.zeros_like(rho_e)
for i in range(len(rho_e)):
dum = np.argsort(rho_e[i])
rr_e[i,:] = rho_e[i, dum]
nn_e[i,:] = ne_e.data[i,dum]
tt_e[i,:] = te_e.data[i,dum]
#Core
rr_c = np.zeros_like(rho_c)
nn_c = np.zeros_like(rho_c)
tt_c = np.zeros_like(rho_c)
for i in range(len(rho_c)):
dum = np.argsort(rho_c[i])
rr_c[i,:] = rho_c[i, dum]
nn_c[i,:] = ne_c.data[i,dum]
tt_c[i,:] = te_c.data[i,dum]
return objview({'time_e': np.array(ne_e.time),
'rho_e': np.array(rr_e),
'ne_e': np.array(nn_e),
'Te_e': np.array(tt_e),
'time_c': np.array(ne_c.time),
'rho_c': np.array(rr_c),
'ne_c': np.array(nn_c),
'Te_c': np.array(tt_c)})
| 35.257732
| 175
| 0.589766
|
4a1607ee1443c07a6a7d62a28a959a6d488b4074
| 9,545
|
py
|
Python
|
mms/model_service_worker.py
|
zmhassan/mxnet-model-server
|
a491823309a7ef2845797512fe6b01f7eab03ce6
|
[
"Apache-2.0"
] | null | null | null |
mms/model_service_worker.py
|
zmhassan/mxnet-model-server
|
a491823309a7ef2845797512fe6b01f7eab03ce6
|
[
"Apache-2.0"
] | null | null | null |
mms/model_service_worker.py
|
zmhassan/mxnet-model-server
|
a491823309a7ef2845797512fe6b01f7eab03ce6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
ModelServiceWorker is the worker that is started by the MMS front-end.
Communication message format: binary encoding
"""
# pylint: disable=redefined-builtin
import logging
import os
import multiprocessing
import platform
import socket
import sys
import signal
from mms.arg_parser import ArgParser
from mms.model_loader import ModelLoaderFactory
from mms.protocol.otf_message_handler import retrieve_msg, create_load_model_response
from mms.service import emit_metrics
MAX_FAILURE_THRESHOLD = 5
SOCKET_ACCEPT_TIMEOUT = 30.0
DEBUG = False
class MXNetModelServiceWorker(object):
"""
Backend worker to handle Model Server's python service code
"""
def __init__(self, s_type=None, s_name=None, host_addr=None, port_num=None,
model_request=None, preload_model=False, tmp_dir="/tmp"):
if os.environ.get("OMP_NUM_THREADS") is None:
os.environ["OMP_NUM_THREADS"] = "1"
if os.environ.get("MXNET_USE_OPERATOR_TUNING") is None:
# work around issue: https://github.com/apache/incubator-mxnet/issues/12255
os.environ["MXNET_USE_OPERATOR_TUNING"] = "0"
self.sock_type = s_type
if s_type == "unix":
if s_name is None:
raise ValueError("Wrong arguments passed. No socket name given.")
self.sock_name, self.port = s_name, -1
try:
os.remove(s_name)
except OSError:
if os.path.exists(s_name):
raise RuntimeError("socket already in use: {}.".format(s_name))
elif s_type == "tcp":
self.sock_name = host_addr if host_addr is not None else "127.0.0.1"
if port_num is None:
raise ValueError("Wrong arguments passed. No socket port given.")
self.port = port_num
else:
raise ValueError("Invalid socket type provided")
logging.info("Listening on port: %s", s_name)
socket_family = socket.AF_INET if s_type == "tcp" else socket.AF_UNIX
self.sock = socket.socket(socket_family, socket.SOCK_STREAM)
self.preload = preload_model
self.service = None
self.model_meta_data = model_request
self.out = self.err = None
self.tmp_dir = tmp_dir
self.socket_name = s_name
def load_model(self, load_model_request=None):
"""
Expected command
{
"command" : "load", string
"modelPath" : "/path/to/model/file", string
"modelName" : "name", string
"gpu" : None if CPU else gpu_id, int
"handler" : service handler entry point if provided, string
"batchSize" : batch size, int
}
:param load_model_request:
:return:
"""
try:
model_dir = load_model_request["modelPath"].decode("utf-8")
model_name = load_model_request["modelName"].decode("utf-8")
handler = load_model_request["handler"].decode("utf-8")
batch_size = 1
if "batchSize" in load_model_request:
batch_size = int(load_model_request["batchSize"])
gpu = None
if "gpu" in load_model_request:
gpu = int(load_model_request["gpu"])
io_fd = None
if "ioFileDescriptor" in load_model_request:
io_fd = load_model_request.get("ioFileDescriptor").decode("utf-8")
self._create_io_files(self.tmp_dir, io_fd)
if self.service is None or self.preload is False:
model_loader = ModelLoaderFactory.get_model_loader(model_dir)
self.service = model_loader.load(model_name, model_dir, handler, gpu, batch_size)
logging.info("Model %s loaded io_fd=%s", model_name, str(io_fd))
return "loaded model {}. [PID]:{}".format(model_name, os.getpid()), 200
except MemoryError:
return "System out of memory", 507
def _create_io_files(self, tmp_dir, io_fd):
self.out = tmp_dir + '/' + io_fd + "-stdout"
self.err = tmp_dir + '/' + io_fd + "-stderr"
# TODO: Windows support
os.mkfifo(self.out)
os.mkfifo(self.err)
def _remap_io(self):
out_fd = open(self.out, "w")
err_fd = open(self.err, "w")
os.dup2(out_fd.fileno(), sys.stdout.fileno())
os.dup2(err_fd.fileno(), sys.stderr.fileno())
def handle_connection(self, cl_socket):
"""
Handle socket connection.
:param cl_socket:
:return:
"""
cl_socket.setblocking(True)
while True:
cmd, msg = retrieve_msg(cl_socket)
if cmd == b'I':
resp = self.service.predict(msg)
cl_socket.send(resp)
elif cmd == b'L':
result, code = self.load_model(msg)
resp = bytearray()
resp += create_load_model_response(code, result)
cl_socket.send(resp)
self._remap_io()
if code != 200:
raise RuntimeError("{} - {}".format(code, result))
else:
raise ValueError("Received unknown command: {}".format(cmd))
if self.service is not None and self.service.context is not None \
and self.service.context.metrics is not None:
emit_metrics(self.service.context.metrics.store)
def sigterm_handler(self):
for node in [self.socket_name, self.out, self.err]:
try:
os.remove(node)
except OSError:
pass
def start_worker(self, cl_socket):
"""
Method to start the worker threads. These worker threads use multiprocessing to spawn a new worker.
:param cl_socket:
:return:
"""
self.sock.close() # close listening socket in the fork
try:
signal.signal(signal.SIGTERM, lambda signum, frame: self.sigterm_handler())
self.handle_connection(cl_socket)
except Exception: # pylint: disable=broad-except
logging.error("Backend worker process die.", exc_info=True)
finally:
try:
os.remove(self.out)
os.remove(self.err)
finally:
cl_socket.shutdown(socket.SHUT_RDWR)
cl_socket.close()
os._exit(1)
def run_server(self):
"""
Run the backend worker process and listen on a socket
:return:
"""
if self.sock_type == "unix":
self.sock.bind(self.sock_name)
else:
self.sock.bind((self.sock_name, int(self.port)))
self.sock.listen(128)
logging.info("[PID] %d", os.getpid())
logging.info("MXNet worker started.")
logging.info("Python runtime: %s", platform.python_version())
while True:
if self.service is None and self.preload is True:
# Lazy loading the models
self.load_model(self.model_meta_data)
(cl_socket, _) = self.sock.accept()
# workaround error(35, 'Resource temporarily unavailable') on OSX
cl_socket.setblocking(True)
logging.info("Connection accepted: %s.", cl_socket.getsockname())
p = multiprocessing.Process(target=self.start_worker, args=(cl_socket,))
p.start()
cl_socket.close() # close accepted socket in the parent
if __name__ == "__main__":
# Remove mms dir from python path to avoid module name conflict.
mms_path = os.path.dirname(os.path.realpath(__file__))
while mms_path in sys.path:
sys.path.remove(mms_path)
sock_type = None
socket_name = None
# noinspection PyBroadException
try:
logging.basicConfig(stream=sys.stdout, format="%(message)s", level=logging.INFO)
logging.info("model_service_worker started with args: %s", " ".join(sys.argv[1:]))
model_req = dict()
args = ArgParser.model_service_worker_args().parse_args()
socket_name = args.sock_name
sock_type = args.sock_type
host = args.host
port = args.port
model_req["handler"] = args.handler.encode('utf-8')
model_req["modelPath"] = args.model_path.encode('utf-8')
model_req["modelName"] = args.model_name.encode('utf-8')
worker = MXNetModelServiceWorker(sock_type, socket_name, host, port, model_req,
args.preload_model, args.tmp_dir)
worker.run_server()
except socket.timeout:
logging.error("Backend worker did not receive connection in: %d", SOCKET_ACCEPT_TIMEOUT)
except Exception: # pylint: disable=broad-except
logging.error("Backend worker process die.", exc_info=True)
finally:
if sock_type == 'unix' and os.path.exists(socket_name):
os.remove(socket_name)
exit(1)
| 38.333333
| 107
| 0.608486
|
4a1608fb506ae3b138329b1b05400e02c1797868
| 8,589
|
py
|
Python
|
tests/test_04_dxf_high_level_structs/test_406_blocks_section.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | null | null | null |
tests/test_04_dxf_high_level_structs/test_406_blocks_section.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | null | null | null |
tests/test_04_dxf_high_level_structs/test_406_blocks_section.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2011-2019, Manfred Moitzi
# License: MIT License
import pytest
import ezdxf
from ezdxf.tools.test import load_entities
from ezdxf.sections.blocks import BlocksSection
from ezdxf.lldxf.tagwriter import TagCollector
from ezdxf.entities import factory
from ezdxf.lldxf.const import BLK_NON_CONSTANT_ATTRIBUTES
@pytest.fixture
def dxf12():
return ezdxf.new('R12')
@pytest.fixture
def blocks(dxf12):
return BlocksSection(dxf12, list(load_entities(TESTBLOCKS, 'BLOCKS')))
@pytest.fixture
def bounded_blocks(dxf12):
entities = list(load_entities(TESTBLOCKS, 'BLOCKS'))
for entity in entities:
factory.bind(entity, dxf12)
return BlocksSection(dxf12, entities)
@pytest.fixture
def doc():
doc = ezdxf.new()
doc.blocks.new('_ARCHTICK')
doc.blocks.new('_OPEN30')
return doc
def test_empty_section(dxf12):
blocks = BlocksSection(dxf12,
list(load_entities(EMPTYSEC, 'BLOCKS')))
# the NES creates automatically *Model_Space and *Paper_Space blocks
assert '*Model_Space' in blocks
assert '*Paper_Space' in blocks
collector = TagCollector(dxfversion=dxf12.dxfversion)
blocks.export_dxf(collector)
assert collector.tags[0] == (0, 'SECTION')
assert collector.tags[1] == (2, 'BLOCKS')
assert collector.tags[2] == (0, 'BLOCK')
# tag[3] is a arbitrary handle
assert collector.tags[3][0] == 5
assert collector.tags[4] == (8, '0') # default layer '0'
assert collector.tags[5] == (
2, '$Model_Space') # export modelspace with leading '$' for R12
assert collector.tags[-1] == (0, 'ENDSEC')
def test_key(blocks):
assert blocks.key('Test') == 'test'
block = blocks.new('TEST')
assert blocks.key(block) == 'test'
def test_is_layout_block(blocks):
block = blocks.new('TEST')
assert block.is_any_layout is False
# required modelspace block already created
msp = blocks.get('*Model_Space')
assert msp.is_modelspace is True
# required paperspace block already created
psp = blocks.get('*Paper_Space')
assert psp.is_any_paperspace is True
assert psp.is_active_paperspace is True
def test_overwrite_existing_block(blocks):
block = blocks.new('TEST')
assert block.dxf.name in blocks
old_len = len(blocks)
with pytest.raises(ezdxf.DXFTableEntryError):
# can not create block with existing name
blocks.new('Test') # block names are case insensitive
assert len(blocks) == old_len, 'should not create block "TEST"'
blocks.delete_block('Test', safe=False)
assert len(blocks) == old_len - 1, 'should remove existing block "TEST"'
blocks.new('Test')
assert len(blocks) == old_len, 'should create new block "Test"'
def test_not_in_blocks_section(blocks):
assert 'TEST' not in blocks
def test_getitem(blocks):
blocks.new('TEST')
block = blocks['TEST']
assert 'TEST' == block.name
block = blocks['Test']
assert 'TEST' == block.name
def test_new_block_layout(doc):
block = doc.blocks.new('NewBlockLayout')
block.add_point((0, 0, 0))
assert len(block) == 1
assert block.can_explode is True
assert block.scale_uniformly is False
block.can_explode = False
block.scale_uniformly = True
assert block.block_record.dxf.explode == 0
assert block.block_record.dxf.scale == 1
def test_case_insensitivity(blocks):
blocks.new('TEST')
assert 'TEST' in blocks
assert 'Test' in blocks
def test_iter_blocks(blocks):
blocks = list(blocks)
assert 4 == len(blocks)
def test_block_content_entity_drawing_attribute(blocks, dxf12):
archtick = blocks['_ARCHTICK']
entities = list(archtick)
assert 1 == len(entities) # VERTEX & SEQEND doesn't count
def test_delete_block(bounded_blocks, dxf12):
archtick = bounded_blocks['_ARCHTICK']
entities = list(archtick)
archtick_name = archtick.name
bounded_blocks.delete_block(archtick_name, safe=False)
assert archtick_name not in bounded_blocks
assert archtick.is_alive is False
for entity in entities:
assert entity.is_alive is False
def test_safe_delete_block(blocks, dxf12):
# block names are case insensitive
with pytest.raises(ezdxf.DXFBlockInUseError):
blocks.delete_block('_ArchTick', safe=True)
def test_do_not_delete_layouts_and_special_arrow_blocks(doc):
doc.blocks.delete_all_blocks()
assert len(doc.blocks) == 4
block_names = set(block.name for block in doc.blocks)
assert block_names == {'*Model_Space', '*Paper_Space', '_ARCHTICK',
'_OPEN30'}
def test_rename_block(blocks):
block = blocks.new('RENAME_ME')
assert block.dxf.name in blocks
blocks.rename_block('RENAME_ME', 'NEW_NAME')
assert 'NEW_NAME' in blocks
# block names are case insensitive
blocks.rename_block('New_Name', 'check_lower_case')
assert 'Check_Lower_Case' in blocks
# but originals name is preserved
assert blocks['Check_Lower_Case'].name == 'check_lower_case'
@pytest.fixture(scope='module')
def dxf2000():
return ezdxf.new('R2000')
@pytest.fixture
def dxf2000_blocks(dxf2000):
if 'TestBlock' not in dxf2000.blocks:
block = dxf2000.blocks.new('TestBlock')
block.add_line((0, 0), (10, 10))
block.add_line((0, 0), (10, 10))
block.add_line((0, 0), (10, 10))
return dxf2000.blocks
def test_dxf2000_dxf_block_structure(dxf2000_blocks, dxf2000):
assert 'TestBlock' in dxf2000_blocks
block = dxf2000_blocks['TestBlock']
block_record_handle = block.block_record_handle
# exists an associated block record entry?
block_record = dxf2000.tables.block_records.get(block.name)
assert block_record_handle == block_record.dxf.handle
assert block_record.dxf.name == block.name
def test_dxf2000_delete_block(dxf2000_blocks, dxf2000):
block = dxf2000_blocks['TestBlock']
block_name = block.name
entities = list(block)
block_record_handle = block.block_record_handle
block_count = len(dxf2000_blocks)
dxf2000_blocks.delete_block(block_name)
# removed from blocks load_section?
assert block_count - 1 == len(dxf2000_blocks)
assert block_name not in dxf2000_blocks
# all block related management data deleted?
assert block.is_alive is False
# removed from block records table?
assert block_name not in dxf2000.tables.block_records
# all entities deleted ?
for entity in entities:
assert entity.is_alive is False
# we are done!
def test_dxf2000_delete_all_blocks(dxf2000_blocks):
dxf2000_blocks.delete_all_blocks()
blocks = list(dxf2000_blocks)
# assure not deleting layout blocks or arrow blocks
assert len(blocks) == 2
block_names = [block.name for block in blocks]
block_names.sort()
assert ['*Model_Space', '*Paper_Space'] == block_names
def test_dxf2000_rename_block(dxf2000_blocks):
block = dxf2000_blocks.new('RENAME_ME')
assert block.dxf.name in dxf2000_blocks
dxf2000_blocks.rename_block('RENAME_ME', 'NEW_NAME')
assert 'NEW_NAME' in dxf2000_blocks
def test_update_block_flags(doc):
blk = doc.blocks.new('UPDATE_BLOCK_FLAGS')
blk.add_attdef('TEST', (0, 0))
assert blk.block.get_flag_state(BLK_NON_CONSTANT_ATTRIBUTES) is False
blk.update_block_flags()
assert blk.block.get_flag_state(BLK_NON_CONSTANT_ATTRIBUTES) is True
EMPTYSEC = """ 0
SECTION
2
BLOCKS
0
ENDSEC
"""
TESTBLOCKS = """ 0
SECTION
2
BLOCKS
0
BLOCK
8
0
2
$MODEL_SPACE
70
0
10
0.0
20
0.0
30
0.0
3
$MODEL_SPACE
1
0
ENDBLK
5
21
8
0
0
BLOCK
67
1
8
0
2
$PAPER_SPACE
70
0
10
0.0
20
0.0
30
0.0
3
$PAPER_SPACE
1
0
ENDBLK
5
5B
67
1
8
0
0
BLOCK
8
0
2
_ARCHTICK
70
0
10
0.0
20
0.0
30
0.0
3
_ARCHTICK
1
0
POLYLINE
5
239
8
0
6
BYBLOCK
62
0
66
1
10
0.0
20
0.0
30
0.0
40
0.15
41
0.15
0
VERTEX
5
403
8
0
6
BYBLOCK
62
0
10
-0.5
20
-0.5
30
0.0
0
VERTEX
5
404
8
0
6
BYBLOCK
62
0
10
0.5
20
0.5
30
0.0
0
SEQEND
5
405
8
0
6
BYBLOCK
62
0
0
ENDBLK
5
23B
8
0
0
BLOCK
8
0
2
_OPEN30
70
0
10
0.0
20
0.0
30
0.0
3
_OPEN30
1
0
LINE
5
23D
8
0
6
BYBLOCK
62
0
10
-1.0
20
0.26794919
30
0.0
11
0.0
21
0.0
31
0.0
0
LINE
5
23E
8
0
6
BYBLOCK
62
0
10
0.0
20
0.0
30
0.0
11
-1.0
21
-0.26794919
31
0.0
0
LINE
5
23F
8
0
6
BYBLOCK
62
0
10
0.0
20
0.0
30
0.0
11
-1.0
21
0.0
31
0.0
0
ENDBLK
5
241
8
0
0
ENDSEC
"""
| 17.109562
| 76
| 0.681686
|
4a1609e549990afe65668d7ed63c9ad6a61c2da3
| 6,104
|
py
|
Python
|
sdks/python/http_client/v1/polyaxon_sdk/models/v1_list_projects_response.py
|
erexer/polyaxon
|
be14dae1ed56d568983388736bcdaf27a7baa4a4
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/polyaxon_sdk/models/v1_list_projects_response.py
|
erexer/polyaxon
|
be14dae1ed56d568983388736bcdaf27a7baa4a4
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/polyaxon_sdk/models/v1_list_projects_response.py
|
erexer/polyaxon
|
be14dae1ed56d568983388736bcdaf27a7baa4a4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1ListProjectsResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"count": "int",
"results": "list[V1Project]",
"previous": "str",
"next": "str",
}
attribute_map = {
"count": "count",
"results": "results",
"previous": "previous",
"next": "next",
}
def __init__(
self,
count=None,
results=None,
previous=None,
next=None,
local_vars_configuration=None,
): # noqa: E501
"""V1ListProjectsResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._count = None
self._results = None
self._previous = None
self._next = None
self.discriminator = None
if count is not None:
self.count = count
if results is not None:
self.results = results
if previous is not None:
self.previous = previous
if next is not None:
self.next = next
@property
def count(self):
"""Gets the count of this V1ListProjectsResponse. # noqa: E501
:return: The count of this V1ListProjectsResponse. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this V1ListProjectsResponse.
:param count: The count of this V1ListProjectsResponse. # noqa: E501
:type: int
"""
self._count = count
@property
def results(self):
"""Gets the results of this V1ListProjectsResponse. # noqa: E501
:return: The results of this V1ListProjectsResponse. # noqa: E501
:rtype: list[V1Project]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this V1ListProjectsResponse.
:param results: The results of this V1ListProjectsResponse. # noqa: E501
:type: list[V1Project]
"""
self._results = results
@property
def previous(self):
"""Gets the previous of this V1ListProjectsResponse. # noqa: E501
:return: The previous of this V1ListProjectsResponse. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this V1ListProjectsResponse.
:param previous: The previous of this V1ListProjectsResponse. # noqa: E501
:type: str
"""
self._previous = previous
@property
def next(self):
"""Gets the next of this V1ListProjectsResponse. # noqa: E501
:return: The next of this V1ListProjectsResponse. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this V1ListProjectsResponse.
:param next: The next of this V1ListProjectsResponse. # noqa: E501
:type: str
"""
self._next = next
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ListProjectsResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ListProjectsResponse):
return True
return self.to_dict() != other.to_dict()
| 27.128889
| 85
| 0.586173
|
4a160a6521ed6df9d9de6090aa5ed89b5079a4c2
| 2,179
|
py
|
Python
|
lambda_sample/index.py
|
pahud/lambda-layer-eksctl
|
203966a6afc58c3e3dd8f42d680aff156a1160c8
|
[
"MIT-0"
] | 1
|
2019-11-15T05:48:27.000Z
|
2019-11-15T05:48:27.000Z
|
lambda_sample/index.py
|
pahud/lambda-layer-eksctl
|
203966a6afc58c3e3dd8f42d680aff156a1160c8
|
[
"MIT-0"
] | 4
|
2019-11-15T02:24:51.000Z
|
2020-06-12T04:30:26.000Z
|
lambda_sample/index.py
|
pahud/lambda-layer-eksctl
|
203966a6afc58c3e3dd8f42d680aff156a1160c8
|
[
"MIT-0"
] | 1
|
2020-04-21T22:28:18.000Z
|
2020-04-21T22:28:18.000Z
|
import subprocess
import os
import json
import logging
import botocore
logger = logging.getLogger()
logger.setLevel(logging.INFO)
os.environ['PATH'] = '/opt/eksctl:' + os.environ['PATH']
outdir = os.environ.get('TEST_OUTDIR', '/tmp')
def eksctl(*args, **kwargs):
output = subprocess.check_output(['eksctl']+list(args), stderr=subprocess.STDOUT)
# remove the bytes in the beginning to avoid the output break
output = " ".join(str(output.strip()).split(' ')[2:])
return output
def handler(event, context):
try:
logger.info(json.dumps(event))
cmnd = ['eksctl', 'version']
output = subprocess.check_output(cmnd, stderr=subprocess.STDOUT)
output = " ".join(str(output.strip()).split(' ')[2:])
except subprocess.CalledProcessError as exc:
raise Exception(exc.output)
else:
logger.info(output)
resp = {
'statusCode': '200',
'headers': {
'Content-Type': 'application/json'
},
'body': output
}
return resp
def on_event(event, context):
print(event)
request_type = event['RequestType']
if request_type == 'Create':
return on_create(event)
if request_type == 'Update':
return on_update(event)
if request_type == 'Delete':
return on_delete(event)
raise Exception("Invalid request type: %s" % request_type)
def on_create(event):
props = event["ResourceProperties"]
print("create new resource with props %s" % props)
# add your create code here...
physical_id = 'eksctlOutput'
data = {}
data['phase'] = 'on_create'
data['result'] = eksctl('version')
return {'PhysicalResourceId': physical_id, 'Data': data}
def on_update(event):
physical_id = event["PhysicalResourceId"]
props = event["ResourceProperties"]
print("update resource %s with props %s" % (physical_id, props))
# ...
data = {}
data['phase'] = 'on_update'
data['result'] = eksctl('version')
return {'PhysicalResourceId': physical_id, 'Data': data}
def on_delete(event):
physical_id = event["PhysicalResourceId"]
print("delete resource %s" % physical_id)
# ...
| 26.901235
| 85
| 0.636072
|
4a160b2f40eb772937cae0707b12ad9efd556abb
| 39,196
|
py
|
Python
|
src/genie/libs/parser/iosxr/show_routing.py
|
devbollinger/genieparser
|
ad5ce7ba8f5153d1aeb9cffcfc4dde0871f3401c
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxr/show_routing.py
|
devbollinger/genieparser
|
ad5ce7ba8f5153d1aeb9cffcfc4dde0871f3401c
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxr/show_routing.py
|
devbollinger/genieparser
|
ad5ce7ba8f5153d1aeb9cffcfc4dde0871f3401c
|
[
"Apache-2.0"
] | null | null | null |
'''
show_route.py
'''
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional
# ====================================================
# schema for show route ipv4
# ====================================================
class ShowRouteIpv4Schema(MetaParser):
"""Schema for show route ipv4"""
schema = {
'vrf': {
Any(): {
'address_family': {
Any(): {
Optional('routes'): {
Any(): {
'route': str,
'active': bool,
Optional('ip'): str,
Optional('mask'): str,
Optional('route_preference'): int,
Optional('metric'): int,
Optional('source_protocol'): str,
Optional('source_protocol_codes'): str,
Optional('known_via'): str,
Optional('distance'): int,
Optional('type'): str,
Optional('installed'): {
'date': str,
'for': str,
},
Optional('redist_advertisers'): {
Any(): {
'protoid': int,
'clientid': int,
},
},
'next_hop': {
Optional('outgoing_interface'): {
Any(): {
'outgoing_interface': str,
Optional('updated'): str,
Optional('metric'): int,
}
},
Optional('next_hop_list'): {
Any(): { # index
'index': int,
Optional('next_hop'): str,
Optional('outgoing_interface'): str,
Optional('updated'): str,
Optional('metric'): int,
Optional('from'): str,
}
}
}
}
}
}
}
}
}
}
class ShowRouteIpv4(ShowRouteIpv4Schema):
cli_command = [
'show route ipv4',
'show route vrf {vrf} ipv4',
'show route ipv4 {protocol}',
'show route vrf {vrf} ipv4 {protocol}',
'show route ipv4 {route}',
'show route vrf {vrf} ipv4 {route}'
]
"""
Codes: C - connected, S - static, R - RIP, B - BGP, (>) - Diversion path
D - EIGRP, EX - EIGRP external, O - OSPF, IA - OSPF inter area
N1 - OSPF NSSA external type 1, N2 - OSPF NSSA external type 2
E1 - OSPF external type 1, E2 - OSPF external type 2, E - EGP
i - ISIS, L1 - IS-IS level-1, L2 - IS-IS level-2
ia - IS-IS inter area, su - IS-IS summary null, * - candidate default
U - per-user static route, o - ODR, L - local, G - DAGR, l - LISP
A - access/subscriber, a - Application route
M - mobile route, r - RPL, t - Traffic Engineering, (!) - FRR Backup path
"""
source_protocol_dict = {
'ospf': ['O', 'IA', 'N1', 'N2', 'E1', 'E2'],
'odr': ['o'],
'isis': ['i', 'su', 'L1', 'L2', 'ia'],
'eigrp': ['D', 'EX'],
'static': ['S'],
'egp': ['E'],
'dagr': ['G'],
'rpl': ['r'],
'mobile router': ['M'],
'lisp': ['I', 'l'],
'nhrp': ['H'],
'local': ['L'],
'connected': ['C'],
'bgp': ['B'],
'rip': ['R'],
'per-user static route': ['U'],
'access/subscriber': ['A'],
'traffic engineering': ['t'],
}
protocol_set = {'ospf', 'odr', 'isis', 'eigrp', 'static', 'mobile',
'rip', 'lisp', 'nhrp', 'local', 'connected', 'bgp'}
def cli(self, vrf=None, route=None, protocol=None, output=None):
# Check if argument from device.parse is protocol or route
if protocol and protocol not in self.protocol_set:
route = protocol
protocol = None
if output is None:
if vrf and route:
cmd = self.cli_command[5].format(
vrf=vrf,
route=route
)
elif vrf and protocol:
cmd = self.cli_command[3].format(
vrf=vrf,
protocol=protocol
)
elif vrf:
cmd = self.cli_command[1].format(
vrf=vrf
)
elif protocol:
cmd = self.cli_command[2].format(
protocol=protocol
)
elif route:
cmd = self.cli_command[4].format(
route=route
)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# VRF: VRF501
p1 = re.compile(r'^\s*VRF: +(?P<vrf>[\w]+)$')
# R 1.0.0.0/8 [120/1] via 10.12.120.1, 1w0d, GigabitEthernet0/0/0/0.120
# B 10.21.33.33/32 [200/0] via 10.166.13.13, 00:52:31
# i L2 10.154.219.32/32 [115/100030] via 10.4.1.1, 1d06h, HundredGigE0/0/1/1 (!)
# S 10.36.3.3/32 [1/0] via 10.2.3.3, 01:51:13, GigabitEthernet0/0/0/1
# B 10.19.31.31/32 [200/0] via 10.229.11.11, 00:55:14
# i L1 10.76.23.23/32 [115/11] via 10.2.3.3, 00:52:41, GigabitEthernet0/0/0/1
# S* 192.168.4.4/10 [111/10] via 172.16.84.11, 1w0d
# R 10.145.110.10/4 [10/10] via 192.168.10.12, 12:03:42, GigabitEthernet0/0/1/1.1
# B 10.100.3.160/31 [200/0] via 172.23.6.198 (nexthop in vrf default), 5d13h
p2 = re.compile(r'^(?P<code1>[\w](\*)*)\s*(?P<code2>\S+)? +(?P<network>\S+) +'
r'\[(?P<route_preference>\d+)\/(?P<metric>\d+)\] +via +'
r'(?P<next_hop>\S+)( +\(nexthop +in +vrf +\w+\))?,'
r'( +(?P<date>[\w:]+),?)?( +(?P<interface>[\w\/\.\-]+))?'
r'( +(?P<code3>[\w\*\(\>\)\!]+))?$')
# [90/15360] via 10.23.90.3, 1w0d, GigabitEthernet0/0/0/1.90
# [110/2] via 10.1.2.1, 01:50:49, GigabitEthernet0/0/0/3
p3 = re.compile(r'^\[(?P<route_preference>\d+)\/(?P<metric>\d+)\] +via +'
r'(?P<next_hop>\S+),( +(?P<date>[\w:]+))?,? +'
r'(?P<interface>[\w\/\.\-]+)$')
# L 2.2.2.2/32 is directly connected, 3w5d, Loopback0
# is directly connected, 01:51:13, GigabitEthernet0/0/0/3
# S 10.4.1.1/32 is directly connected, 01:51:13, GigabitEthernet0/0/0/0
p4 = re.compile(r'^((?P<code1>[\w](\*)*)(\s*(?P<code2>\S+))? +'
r'(?P<network>\S+) +)?(is +directly +connected, +'
r'(?P<date>[\w:]+))?,? *(?P<interface>[\w\/\.\-]+)?$$')
# Routing entry for 10.151.0.0/24, 1 known subnets
# Routing entry for 0.0.0.0/0, supernet
# Routing entry for 192.168.154.0/24
p5 = re.compile(r'^Routing +entry +for +(?P<network>(?P<ip>[\w\:\.]+)'
r'\/(?P<mask>\d+))(?:, +(?P<net>[\w\s]+))?$')
# Known via "connected", distance 0, metric 0 (connected)
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "bgp 65161", distance 20, metric 0, candidate default path
p6 = re.compile(r'^Known +via +\"(?P<known_via>[\w ]+)\", +distance +'
r'(?P<distance>\d+), +metric +(?P<metric>\d+)( \(connected\))?'
r'(, +type +(?P<type>\S+))?(, +candidate +default +path)?$')
# * directly connected, via GigabitEthernet1.120
p7 = re.compile(r'^(\* +)?directly +connected, via +(?P<interface>\S+)$')
# Route metric is 10880, traffic share count is 1
p8 = re.compile(r'^Route +metric +is +(?P<metric>\d+)(, +'
r'traffic +share +count +is +(?P<share_count>\d+))?$')
# eigrp/100 (protoid=5, clientid=22)
p9 = re.compile(r'^(?P<redist_advertiser>\S+) +\(protoid=(?P<protoid>\d+)'
r', +clientid=(?P<clientid>\d+)\)$')
# Installed Oct 23 22:09:38.380 for 5d21h
p10 = re.compile(r'^Installed +(?P<date>[\S\s]+) +for +(?P<for>\S+)$')
# 10.12.90.1, from 10.12.90.1, via GigabitEthernet0/0/0/0.90
p11 = re.compile(r'^(?P<nexthop>\S+), from +(?P<from>\S+), '
r'+via +(?P<interface>\S+)$')
# R2_xrv#show route ipv4
# Routing Descriptor Blocks
# No advertising protos.
p12 = re.compile(r'^((\S+#)?(show +route))|(Routing +Descriptor +'
r'Blocks)|(No +advertising +protos\.)|(Redist +Advertisers:)')
# initial variables
ret_dict = {}
index = 0
address_family = 'ipv4'
if not vrf:
vrf = 'default'
for line in out.splitlines():
line = line.strip()
# R2_xrv#show route ipv4
# Routing Descriptor Blocks
# No advertising protos.
m = p12.match(line)
if m or not line:
continue
# VRF: VRF501
m = p1.match(line)
if m:
vrf = m.groupdict()['vrf']
continue
# R 1.0.0.0/8 [120/1] via 10.12.120.1, 1w0d, GigabitEthernet0/0/0/0.120
m = p2.match(line)
if m:
group = m.groupdict()
code1 = group['code1']
source_protocol_code = re.split('\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group['code2']
if code2:
code1 = '{} {}'.format(code1, code2)
code3 = group['code3']
if code3:
code1 = '{} {}'.format(code1, code3)
network = group['network']
route_preference = int(group['route_preference'])
metric = int(group['metric'])
next_hop = group['next_hop']
updated = group['date']
interface = group['interface']
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'active': True})
route_dict.update({'route_preference': route_preference})
route_dict.update({'metric': metric})
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
index = 1
next_hop_list_dict = route_dict.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}). \
setdefault(index, {})
next_hop_list_dict.update({'index': index})
next_hop_list_dict.update({'next_hop': next_hop})
if interface:
next_hop_list_dict.update({'outgoing_interface': interface})
if updated:
next_hop_list_dict.update({'updated': updated})
continue
# [90/15360] via 10.23.90.3, 1w0d, GigabitEthernet0/0/0/1.90
m = p3.match(line)
if m:
group = m.groupdict()
route_preference = int(group['route_preference'])
metric = int(group['metric'])
next_hop = group['next_hop']
updated = group['date']
interface = group['interface']
route_dict.update({'route_preference': route_preference})
route_dict.update({'metric': metric})
index += 1
next_hop_list_dict = route_dict.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}). \
setdefault(index, {})
next_hop_list_dict.update({'index': index})
next_hop_list_dict.update({'next_hop': next_hop})
if interface:
next_hop_list_dict.update({'outgoing_interface': interface})
if updated:
next_hop_list_dict.update({'updated': updated})
continue
# L 2.2.2.2/32 is directly connected, 3w5d, Loopback0
# is directly connected, 01:51:13, GigabitEthernet0/0/0/3
m = p4.match(line)
if m:
try:
group = m.groupdict()
code1 = group.get('code1', None)
source_protocol = None
network = group.get('network', None)
updated = group.get('date', None)
interface = group.get('interface', None)
if network:
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'active': True})
if code1:
source_protocol_code = re.split('\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group.get('code2', None)
if code2:
code1 = '{} {}'.format(code1, code2)
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('outgoing_interface', {}). \
setdefault(interface, {})
if interface:
outgoing_interface_dict.update({'outgoing_interface': interface})
if updated:
outgoing_interface_dict.update({'updated': updated})
except Exception:
print('--->'+line)
continue
# Routing entry for 10.151.0.0/24, 1 known subnets
# Routing entry for 0.0.0.0/0, supernet
# Routing entry for 192.168.154.0/24
m = p5.match(line)
if m:
group = m.groupdict()
network = group['network']
ip = group['ip']
mask = group['mask']
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'ip': ip})
route_dict.update({'mask': mask})
route_dict.update({'active': True})
continue
# Known via "static", distance 1, metric 0, candidate default path
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "rip", distance 120, metric 2
# Known via "connected", distance 0, metric 0 (connected)
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "bgp 65161", distance 20, metric 0, candidate default path
m = p6.match(line)
if m:
group = m.groupdict()
known_via = group['known_via']
metric = int(group['metric'])
distance = int(group['distance'])
_type = group['type']
route_dict.update({'known_via': known_via})
route_dict.update({'metric': metric})
route_dict.update({'distance': distance})
if _type:
route_dict.update({'type': _type})
continue
# * directly connected, via GigabitEthernet1.120
m = p7.match(line)
if m:
group = m.groupdict()
code1 = group.get('code1', None)
source_protocol = None
network = group.get('network', None)
updated = group.get('date', None)
interface = group.get('interface', None)
if network:
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'active': True})
if code1:
source_protocol_code = re.split('\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group.get('code2', None)
if code2:
code1 = '{} {}'.format(code1, code2)
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
if interface:
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('outgoing_interface', {}). \
setdefault(interface, {})
outgoing_interface_dict.update({'outgoing_interface': interface})
if updated:
outgoing_interface_dict.update({'updated': updated})
# Route metric is 10880, traffic share count is 1
m = p8.match(line)
if m:
group = m.groupdict()
metric = int(group['metric'])
outgoing_interface_dict.update({'metric': metric})
if group.get('share_count', None):
share_count = int(group['share_count'])
outgoing_interface_dict.update({'share_count': share_count})
# outgoing_interface_dict.update({k:v for k,v in group.items() if v})
continue
# eigrp/100 (protoid=5, clientid=22)
m = p9.match(line)
if m:
group = m.groupdict()
redist_advertiser = group['redist_advertiser']
protoid = int(group['protoid'])
clientid = int(group['clientid'])
redist_advertiser_dict = route_dict.setdefault('redist_advertisers', {}). \
setdefault(redist_advertiser, {})
redist_advertiser_dict.update({'protoid': protoid})
redist_advertiser_dict.update({'clientid': clientid})
continue
# Installed Oct 23 22:09:38.380 for 5d21h
m = p10.match(line)
if m:
group = m.groupdict()
installed_dict = route_dict.setdefault('installed', {})
installed_dict.update({k:v for k,v in group.items() if v})
continue
# 10.12.90.1, from 10.12.90.1, via GigabitEthernet0/0/0/0.90
m = p11.match(line)
if m:
group = m.groupdict()
nexthop = group['nexthop']
_from = group['from']
interface = group['interface']
index += 1
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}). \
setdefault(index, {})
outgoing_interface_dict.update({'index': index})
outgoing_interface_dict.update({'outgoing_interface': interface})
outgoing_interface_dict.update({'from': _from})
outgoing_interface_dict.update({'next_hop': nexthop})
continue
return ret_dict
# ====================================================
# parser for show route ipv6
# ====================================================
class ShowRouteIpv6(ShowRouteIpv4Schema):
"""Parser for :
show route ipv6
show route vrf <vrf> ipv6"""
cli_command = [
'show route ipv6',
'show route vrf {vrf} ipv6',
'show route ipv6 {protocol}',
'show route vrf {vrf} ipv6 {protocol}',
'show route ipv6 {route}',
'show route vrf {vrf} ipv6 {route}'
]
"""
Codes: C - connected, S - static, R - RIP, B - BGP, (>) - Diversion path
D - EIGRP, EX - EIGRP external, O - OSPF, IA - OSPF inter area
N1 - OSPF NSSA external type 1, N2 - OSPF NSSA external type 2
E1 - OSPF external type 1, E2 - OSPF external type 2, E - EGP
i - ISIS, L1 - IS-IS level-1, L2 - IS-IS level-2
ia - IS-IS inter area, su - IS-IS summary null, * - candidate default
U - per-user static route, o - ODR, L - local, G - DAGR, l - LISP
A - access/subscriber, a - Application route
M - mobile route, r - RPL, t - Traffic Engineering, (!) - FRR Backup path
"""
source_protocol_dict = {
'ospf': ['O', 'IA', 'N1', 'N2', 'E1', 'E2'],
'odr': ['o'],
'isis': ['i', 'su', 'L1', 'L2', 'ia'],
'eigrp': ['D', 'EX'],
'static': ['S'],
'egp': ['E'],
'dagr': ['G'],
'rpl': ['r'],
'mobile router': ['M'],
'lisp': ['I', 'l'],
'nhrp': ['H'],
'local': ['L'],
'connected': ['C'],
'bgp': ['B'],
'rip': ['R'],
'per-user static route': ['U'],
'access/subscriber': ['A'],
'traffic engineering': ['t'],
}
protocol_set = {'ospf', 'odr', 'isis', 'eigrp', 'static', 'mobile',
'rip', 'lisp', 'nhrp', 'local', 'connected', 'bgp'}
def cli(self, vrf=None, route=None, protocol=None, output=None):
# Check if argument from device.parse is protocol or route
if protocol and protocol not in self.protocol_set:
route = protocol
protocol = None
if output is None:
if vrf and route:
cmd = self.cli_command[5].format(
vrf=vrf,
route=route
)
elif vrf and protocol:
cmd = self.cli_command[3].format(
vrf=vrf,
protocol=protocol
)
elif vrf:
cmd = self.cli_command[1].format(
vrf=vrf
)
elif protocol:
cmd = self.cli_command[2].format(
protocol=protocol
)
elif route:
cmd = self.cli_command[4].format(
route=route
)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# VRF: VRF501
p1 = re.compile(r'^\s*VRF: +(?P<vrf>[\w]+)$')
# S 2001:1:1:1::1/128
p2 = re.compile(r'^(?P<code1>\w(\*)?) *(?P<code2>\w+)? +'
'(?P<network>[\w\:\/]+)$')
# [1/0] via 2001:20:1:2::1, 01:52:23, GigabitEthernet0/0/0/0
# [200/0] via 2001:13:13:13::13, 00:53:22
# [0/0] via ::, 5w2d
p3 = re.compile(r'^\[(?P<route_preference>\d+)\/(?P<metric>\d+)\] +'
'via +(?P<next_hop>\S+)( +\(nexthop +in +vrf +\w+\))?,'
'( +(?P<date>[\w:]+))?,?( +(?P<interface>[\w\/\.\-]+))?$')
# L 2001:2:2:2::2/128 is directly connected,
p4 = re.compile(r'^((?P<code1>[\w](\*)*)\s*(?P<code2>\S+)? +'
'(?P<network>\S+) +)?is +directly +connected,$')
# 01:52:24, Loopback0
p5 = re.compile(r'^(?P<date>[\w+:]+), +(?P<interface>\S+)$')
# Routing entry for 2001:1:1:1::1/128, 1 known subnets
# Routing entry for 2001:1:1:1::1/128, supernet
# Routing entry for 2001:1:1:1::1/128
p6 = re.compile(r'^Routing +entry +for +(?P<network>(?P<ip>[\w\:\.]+)'
r'\/(?P<mask>\d+))(?:, +(?P<net>[\w\s]+))?$')
# Known via "connected", distance 0, metric 0 (connected)
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "bgp 65161", distance 20, metric 0, candidate default path
p7 = re.compile(r'^Known +via +\"(?P<known_via>[\w ]+)\", +'
'distance +(?P<distance>\d+), +metric +(?P<metric>\d+)'
'( \(connected\))?(, +type +(?P<type>\S+))?(, +candidate +'
'default +path)?$')
# * directly connected, via GigabitEthernet1.120
p8 = re.compile(r'^(\* +)?directly +connected, via +(?P<interface>\S+)$')
# Route metric is 10880, traffic share count is 1
p9 = re.compile(r'^Route +metric +is +(?P<metric>\d+)(, +'
r'traffic +share +count +is +(?P<share_count>\d+))?$')
# eigrp/100 (protoid=5, clientid=22)
p10 = re.compile(r'^(?P<redist_advertiser>\S+) +\(protoid=(?P<protoid>\d+)'
r', +clientid=(?P<clientid>\d+)\)$')
# Installed Oct 23 22:09:38.380 for 5d21h
p11 = re.compile(r'^Installed +(?P<date>[\S\s]+) +for +(?P<for>\S+)$')
# fe80::f816:3eff:fe76:b56d, from fe80::f816:3eff:fe76:b56d, via GigabitEthernet0/0/0/0.390
p12 = re.compile(r'^(?P<nexthop>\S+), from +(?P<from>\S+), '
r'+via +(?P<interface>\S+)$')
# R2_xrv#show route ipv6
p13 = re.compile(r'^((\S+#)?(show +route))|(Routing +Descriptor +'
r'Blocks)|(No +advertising +protos\.)|(Redist +Advertisers:)')
ret_dict = {}
address_family = 'ipv6'
index = 0
if not vrf:
vrf = 'default'
for line in out.splitlines():
line = line.strip()
# R2_xrv#show route ipv6
# Routing Descriptor Blocks
# No advertising protos.
m = p13.match(line)
if m or not line:
continue
# VRF: VRF501
m = p1.match(line)
if m:
vrf = m.groupdict()['vrf']
continue
# S 2001:1:1:1::1/128
m = p2.match(line)
if m:
group = m.groupdict()
code1 = group['code1']
source_protocol_code = re.split('\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group['code2']
if code2:
code1 = '{} {}'.format(code1, code2)
network = group['network']
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
route_dict.update({'route': network})
route_dict.update({'active': True})
index = 0
continue
m = p3.match(line)
if m:
group = m.groupdict()
route_preference = int(group['route_preference'])
metric = int(group['metric'])
next_hop = group.get('next_hop', None)
updated = group.get('date', None)
interface = group.get('interface', None)
route_dict.update({'route_preference': route_preference})
route_dict.update({'metric': metric})
index += 1
next_hop_list_dict = route_dict.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}). \
setdefault(index, {})
next_hop_list_dict.update({'index': index})
if next_hop:
next_hop_list_dict.update({'next_hop': next_hop})
if interface:
next_hop_list_dict.update({'outgoing_interface': interface})
if updated:
next_hop_list_dict.update({'updated': updated})
continue
# L 2001:2:2:2::2/128 is directly connected,
m = p4.match(line)
if m:
group = m.groupdict()
code1 = group.get('code1', None)
source_protocol = None
network = group.get('network', None)
updated = group.get('date', None)
interface = group.get('interface', None)
if network:
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'active': True})
if code1:
source_protocol_code = re.split('\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group.get('code2', None)
if code2:
code1 = '{} {}'.format(code1, code2)
if source_protocol:
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
continue
# 01:52:24, Loopback0
m = p5.match(line)
if m:
group = m.groupdict()
updated = group['date']
interface = group['interface']
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('outgoing_interface', {}). \
setdefault(interface, {})
outgoing_interface_dict.update({'outgoing_interface': interface})
outgoing_interface_dict.update({'updated': updated})
continue
# Routing entry for 2001:1:1:1::1/128, 1 known subnets
# Routing entry for 2001:1:1:1::1/128, supernet
# Routing entry for 2001:1:1:1::1/128
m = p6.match(line)
if m:
group = m.groupdict()
network = group['network']
ip = group['ip']
mask = group['mask']
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'ip': ip})
route_dict.update({'mask': mask})
route_dict.update({'active': True})
continue
# Known via "static", distance 1, metric 0, candidate default path
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "rip", distance 120, metric 2
# Known via "connected", distance 0, metric 0 (connected)
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "bgp 65161", distance 20, metric 0, candidate default path
m = p7.match(line)
if m:
group = m.groupdict()
known_via = group['known_via']
metric = int(group['metric'])
distance = int(group['distance'])
_type = group['type']
route_dict.update({'known_via': known_via})
route_dict.update({'metric': metric})
route_dict.update({'distance': distance})
if _type:
route_dict.update({'type': _type})
continue
# * directly connected, via GigabitEthernet1.120
m = p8.match(line)
if m:
group = m.groupdict()
code1 = group.get('code1', None)
source_protocol = None
network = group.get('network', None)
updated = group.get('date', None)
interface = group.get('interface', None)
if network:
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'active': True})
if code1:
source_protocol_code = re.split('\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group.get('code2', None)
if code2:
code1 = '{} {}'.format(code1, code2)
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('outgoing_interface', {}). \
setdefault(interface, {})
if interface:
outgoing_interface_dict.update({'outgoing_interface': interface})
if updated:
outgoing_interface_dict.update({'updated': updated})
# Route metric is 10880, traffic share count is 1
m = p9.match(line)
if m:
group = m.groupdict()
metric = int(group['metric'])
outgoing_interface_dict.update({'metric': metric})
if group.get('share_count', None):
share_count = int(group['share_count'])
outgoing_interface_dict.update({'share_count': share_count})
# outgoing_interface_dict.update({k:v for k,v in group.items() if v})
continue
# eigrp/100 (protoid=5, clientid=22)
m = p10.match(line)
if m:
group = m.groupdict()
redist_advertiser = group['redist_advertiser']
protoid = int(group['protoid'])
clientid = int(group['clientid'])
redist_advertiser_dict = route_dict.setdefault('redist_advertisers', {}). \
setdefault(redist_advertiser, {})
redist_advertiser_dict.update({'protoid': protoid})
redist_advertiser_dict.update({'clientid': clientid})
continue
# Installed Oct 23 22:09:38.380 for 5d21h
m = p11.match(line)
if m:
group = m.groupdict()
installed_dict = route_dict.setdefault('installed', {})
installed_dict.update({k:v for k,v in group.items() if v})
continue
# fe80::f816:3eff:fe76:b56d, from fe80::f816:3eff:fe76:b56d, via GigabitEthernet0/0/0/0.390
m = p12.match(line)
if m:
group = m.groupdict()
nexthop = group['nexthop']
_from = group['from']
interface = group['interface']
index += 1
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}). \
setdefault(index, {})
outgoing_interface_dict.update({'index': index})
outgoing_interface_dict.update({'outgoing_interface': interface})
outgoing_interface_dict.update({'from': _from})
outgoing_interface_dict.update({'next_hop': nexthop})
continue
return ret_dict
| 42.837158
| 103
| 0.448158
|
4a160c22b83e59ed4cc03a215bd00cec8d9ce349
| 4,448
|
py
|
Python
|
StockAnalysisSystem/plugin/SubService/WebServiceProvider/sas_terminal.py
|
SleepySoft/StockAnalysisSystem
|
75f95738831614f7946f85d09118e447f7ac6dc7
|
[
"Apache-2.0"
] | 138
|
2018-01-03T03:32:49.000Z
|
2022-03-12T02:57:46.000Z
|
StockAnalysisSystem/plugin/SubService/WebServiceProvider/sas_terminal.py
|
SleepySoft/StockAnalysisSystem
|
75f95738831614f7946f85d09118e447f7ac6dc7
|
[
"Apache-2.0"
] | 9
|
2018-01-01T03:16:24.000Z
|
2021-05-27T09:57:24.000Z
|
StockAnalysisSystem/plugin/SubService/WebServiceProvider/sas_terminal.py
|
SleepySoft/StockAnalysisSystem
|
75f95738831614f7946f85d09118e447f7ac6dc7
|
[
"Apache-2.0"
] | 50
|
2019-08-05T01:02:30.000Z
|
2022-03-07T00:52:14.000Z
|
import StockAnalysisSystem.core.api as sasApi
from StockAnalysisSystem.interface.interface import SasInterface
from StockAnalysisSystem.core.Utility.digit_utility import to_int
class TerminalContext:
def __init__(self, result_handler, **kwargs):
self.context = kwargs
self.result_handler = result_handler
TEXT_SPLITTER = '\n---------------------------------\n'
class SasTerminal:
MIN_INPUT = 3
def __init__(self, sas_if: SasInterface, sas_api: sasApi):
self.__sas_if = sas_if
self.__sas_api = sas_api
self.__result_url = sas_api.config().get('analysis_result_url', 'http://sleepysoft.xyz/analysis?security=%s')
def interact(self, ctx: TerminalContext, input_text: str) -> str:
command, parameter = self.analysis_input_text(input_text)
result = self.dispatch_command(ctx, command, parameter)
return result
# ----------------------------------------------------------------------------------------
def analysis_input_text(self, input_text: str) -> (str, list or str):
if len(input_text) < SasTerminal.MIN_INPUT:
return 'help', ''
securities = self.__sas_api.data_utility().guess_securities(input_text)
if len(securities) > 0:
return 'analysis', securities
return 'help', ''
def dispatch_command(self, ctx: TerminalContext, command: str, parameter: list or str) -> str:
if command == 'help':
result = self.command_help()
elif command == 'analysis':
result = self.command_analysis(parameter)
else:
result = ''
return result
# ----------------------------------------------------------------------------------------
def command_help(self) -> str:
return '''直接输入股票名或股票代码:查看股票分析'''
def command_analysis(self, securities: str) -> str:
if len(securities) > 1:
return '你输入的股票有多种可能\n' + '\n'.join(securities)
elif len(securities) == 1:
pass
else:
return '你输入的股票不存在'
stock_identity = securities[0]
df = self.__sas_api.data_center().query('Result.Analyzer', stock_identity)
if df is None or df.empty:
return '无数据'
df = df.sort_values(by="period").drop_duplicates(subset=["analyzer"], keep="last")
stock_name = self.__sas_api.data_utility().stock_identity_to_name(stock_identity)
text = '%s [%s]' % (stock_name, stock_identity)
if df.empty:
return text + '无数据'
strategy_name_dict = self.__sas_api.strategy_entry().strategy_name_dict()
text_items = []
for analyzer, period, brief, score in \
zip(df['analyzer'], df['period'], df['brief'], df['score']):
if score is not None and to_int(score, 999) <= 60:
text_items.append('> %s: %s' % (strategy_name_dict.get(analyzer), brief))
if len(text_items) == 0:
text += '未发现风险项目'
else:
text += '风险项目'
text += TEXT_SPLITTER
text += '\n'.join(text_items)
# url = 'http://211.149.229.160/analysis?security=%s' % stock_identity
url = self.__result_url % stock_identity
result_link = '详情: %s' % url
text += TEXT_SPLITTER
text += result_link
return text
# # Warning: Advanced operation - Directly operate database collection
#
# from StockAnalysisSystem.core.DataHub.DataAgent import DataAgent
# from StockAnalysisSystem.core.UniversalDataDepot.DepotMongoDB import DepotMongoDB
#
# agent: DataAgent = self.__sas_api.data_center().get_data_agent('Result.Analyzer')
# if agent is None:
# return '数据不支持'
#
# prob = agent.prob()
# depot: DepotMongoDB = prob.get('depot', None)
# if not isinstance(depot, DepotMongoDB):
# return '数据不支持'
#
# collection = depot.raw()
# if collection is None:
# return '数据不支持'
#
# result = collection.aggregate([
# {'$match': {'stock_identity': securities[0]}},
# {'$sort': {'period': -1, 'analyzer': -1}},
# {'$group': {
# '_id': None,
# 'period': {'$last': '$period'},
# 'analyzer': {'$first': '$analyzer'}
# }}
# ])
# result_l = list(result)
| 35.584
| 117
| 0.558903
|
4a160c5eff50fe322558281fabf9dc4f7ec52a04
| 3,973
|
py
|
Python
|
examples/dfp/v201411/placement_service/create_placements.py
|
dietrichc/streamline-ppc-reports
|
256f79246aba3c2cf8f792d87a066391a2f471e0
|
[
"Apache-2.0"
] | 1
|
2015-08-12T14:47:40.000Z
|
2015-08-12T14:47:40.000Z
|
examples/dfp/v201411/placement_service/create_placements.py
|
dietrichc/streamline-ppc-reports
|
256f79246aba3c2cf8f792d87a066391a2f471e0
|
[
"Apache-2.0"
] | 1
|
2020-07-24T15:10:10.000Z
|
2020-07-24T15:10:10.000Z
|
examples/dfp/v201411/placement_service/create_placements.py
|
coxmediagroup/googleads-python-lib
|
f85d5d8ab771e93b03b616ef65e2d3082aeef484
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new placements for various ad unit sizes.
To determine which placements exist, run get_all_placements.py.
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201411')
inventory_service = client.GetService('InventoryService', version='v201411')
# Create placement object to store medium rectangle ad units.
medium_rectangle_ad_unit_placement = {
'name': 'Medium rectangle AdUnit Placement #%s' % uuid.uuid4(),
'description': 'Contains ad units that hold creatives of size 300x250',
'targetedAdUnitIds': []
}
# Create placement object to store skyscraper ad units.
skyscraper_ad_unit_placement = {
'name': 'Skyscraper AdUnit Placement #%s' % uuid.uuid4(),
'description': 'Contains ad units that hold creatives of size 120x600',
'targetedAdUnitIds': []
}
# Create placement object to store banner ad units.
banner_ad_unit_placement = {
'name': 'Banner AdUnit Placement #%s' % uuid.uuid4(),
'description': 'Contains ad units that hold creatives of size 468x60',
'targetedAdUnitIds': []
}
placement_list = []
# Create statement to get all the ad units.
statement = dfp.FilterStatement()
while True:
response = inventory_service.getAdUnitsByStatement(
statement.ToStatement())
if 'results' in response:
# Separate the ad units by size.
for ad_unit in response['results']:
if 'adUnitSizes' in ad_unit:
for ad_unit_size in ad_unit['adUnitSizes']:
size = ad_unit_size['size']
if size['width'] == '300' and size['height'] == '250':
medium_rectangle_ad_unit_placement['targetedAdUnitIds'].append(
ad_unit['id'])
elif size['width'] == '120' and size['height'] == '600':
skyscraper_ad_unit_placement['targetedAdUnitIds'].append(
ad_unit['id'])
elif size['width'] == '468' and size['height'] == '60':
banner_ad_unit_placement['targetedAdUnitIds'].append(
ad_unit['id'])
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
# Only create placements with one or more ad unit.
if medium_rectangle_ad_unit_placement['targetedAdUnitIds']:
placement_list.append(medium_rectangle_ad_unit_placement)
if skyscraper_ad_unit_placement['targetedAdUnitIds']:
placement_list.append(skyscraper_ad_unit_placement)
if banner_ad_unit_placement['targetedAdUnitIds']:
placement_list.append(banner_ad_unit_placement)
# Add placements.
placements = placement_service.createPlacements(placement_list)
# Display results.
for placement in placements:
ad_unit_ids = ''
if 'targetedAdUnitIds' in placement:
ad_unit_ids = ', '.join(placement['targetedAdUnitIds'])
print ('A Placement with ID \'%s\', name \'%s\', and containing ad units '
'{%s} was created.' % (placement['id'], placement['name'],
ad_unit_ids))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| 35.792793
| 78
| 0.687893
|
4a160c94b4b50e4dbd9b875c032174ea97dab589
| 99
|
py
|
Python
|
src/dataset/__init__.py
|
ireina7/gzsl-seg
|
9aad220274b4a58b59f5da430f873b5dfc21e458
|
[
"MIT"
] | 1
|
2022-03-15T04:46:00.000Z
|
2022-03-15T04:46:00.000Z
|
src/dataset/__init__.py
|
ireina7/gzsl-seg
|
9aad220274b4a58b59f5da430f873b5dfc21e458
|
[
"MIT"
] | null | null | null |
src/dataset/__init__.py
|
ireina7/gzsl-seg
|
9aad220274b4a58b59f5da430f873b5dfc21e458
|
[
"MIT"
] | null | null | null |
all = [
'common',
'transform_pixel',
'semantic',
'voc',
]
from dataset.common import *
| 8.25
| 28
| 0.585859
|
4a160db1433f15df935abb230d2f115373f5a5c5
| 7,407
|
py
|
Python
|
monai/networks/nets/unet.py
|
vsivan97/MONAI
|
33cb186b4664bb032fe9837b305c0a06cdf6d289
|
[
"Apache-2.0"
] | null | null | null |
monai/networks/nets/unet.py
|
vsivan97/MONAI
|
33cb186b4664bb032fe9837b305c0a06cdf6d289
|
[
"Apache-2.0"
] | null | null | null |
monai/networks/nets/unet.py
|
vsivan97/MONAI
|
33cb186b4664bb032fe9837b305c0a06cdf6d289
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Union, Optional
import torch
import torch.nn as nn
from monai.networks.blocks.convolutions import Convolution, ResidualUnit
from monai.networks.layers.factories import Act, Norm
from monai.networks.layers.simplelayers import SkipConnection
from monai.networks.blocks.evonorm import EvoNormLayer
from monai.utils import alias, export
__all__ = ["UNet", "Unet", "unet"]
@export("monai.networks.nets")
@alias("Unet")
class UNet(nn.Module):
def __init__(
self,
dimensions: int,
in_channels: int,
out_channels: int,
channels: Sequence[int],
strides: Sequence[int],
kernel_size: Union[Sequence[int], int] = 3,
up_kernel_size: Union[Sequence[int], int] = 3,
num_res_units: int = 0,
evonorm: Optional[EvoNormLayer] = None,
act=Act.PRELU,
norm=Norm.INSTANCE,
dropout=0.0,
) -> None:
"""
Enhanced version of UNet which has residual units implemented with the ResidualUnit class.
The residual part uses a convolution to change the input dimensions to match the output dimensions
if this is necessary but will use nn.Identity if not.
Refer to: https://link.springer.com/chapter/10.1007/978-3-030-12029-0_40.
Args:
dimensions: number of spatial dimensions.
in_channels: number of input channels.
out_channels: number of output channels.
channels: sequence of channels. Top block first.
strides: convolution stride.
kernel_size: convolution kernel size. Defaults to 3.
up_kernel_size: upsampling convolution kernel size. Defaults to 3.
num_res_units: number of residual units. Defaults to 0.
act: activation type and arguments. Defaults to PReLU.
norm: feature normalization type and arguments. Defaults to instance norm.
dropout: dropout ratio. Defaults to no dropout.
"""
super().__init__()
self.dimensions = dimensions
self.in_channels = in_channels
self.out_channels = out_channels
self.channels = channels
self.strides = strides
self.kernel_size = kernel_size
self.up_kernel_size = up_kernel_size
self.num_res_units = num_res_units
self.act = act
self.norm = norm
self.dropout = dropout
self.evonorm = evonorm
def _create_block(
inc: int, outc: int, channels: Sequence[int], strides: Sequence[int], is_top: bool
) -> nn.Sequential:
"""
Builds the UNet structure from the bottom up by recursing down to the bottom block, then creating sequential
blocks containing the downsample path, a skip connection around the previous block, and the upsample path.
Args:
inc: number of input channels.
outc: number of output channels.
channels: sequence of channels. Top block first.
strides: convolution stride.
is_top: True if this is the top block.
"""
c = channels[0]
s = strides[0]
subblock: nn.Module
if len(channels) > 2:
subblock = _create_block(c, c, channels[1:], strides[1:], False) # continue recursion down
upc = c * 2
else:
# the next layer is the bottom so stop recursion, create the bottom layer as the sublock for this layer
subblock = self._get_bottom_layer(c, channels[1])
upc = c + channels[1]
down = self._get_down_layer(inc, c, s, is_top) # create layer in downsampling path
up = self._get_up_layer(upc, outc, s, is_top) # create layer in upsampling path
return nn.Sequential(down, SkipConnection(subblock), up)
self.model = _create_block(in_channels, out_channels, self.channels, self.strides, True)
def _get_down_layer(self, in_channels: int, out_channels: int, strides: int, is_top: bool) -> nn.Module:
"""
Args:
in_channels: number of input channels.
out_channels: number of output channels.
strides: convolution stride.
is_top: True if this is the top block.
"""
if self.num_res_units > 0:
return ResidualUnit(
self.dimensions,
in_channels,
out_channels,
strides=strides,
kernel_size=self.kernel_size,
subunits=self.num_res_units,
act=self.act,
norm=self.norm,
evonorm=self.evonorm,
dropout=self.dropout,
)
return Convolution(
self.dimensions,
in_channels,
out_channels,
strides=strides,
kernel_size=self.kernel_size,
act=self.act,
norm=self.norm,
evonorm=self.evonorm,
dropout=self.dropout,
)
def _get_bottom_layer(self, in_channels: int, out_channels: int) -> nn.Module:
"""
Args:
in_channels: number of input channels.
out_channels: number of output channels.
"""
return self._get_down_layer(in_channels, out_channels, 1, False)
def _get_up_layer(self, in_channels: int, out_channels: int, strides: int, is_top: bool) -> nn.Module:
"""
Args:
in_channels: number of input channels.
out_channels: number of output channels.
strides: convolution stride.
is_top: True if this is the top block.
"""
conv: Union[Convolution, nn.Sequential]
conv = Convolution(
self.dimensions,
in_channels,
out_channels,
strides=strides,
kernel_size=self.up_kernel_size,
act=self.act,
norm=self.norm,
evonorm=self.evonorm,
dropout=self.dropout,
conv_only=is_top and self.num_res_units == 0,
is_transposed=True,
)
if self.num_res_units > 0:
ru = ResidualUnit(
self.dimensions,
out_channels,
out_channels,
strides=1,
kernel_size=self.kernel_size,
subunits=1,
act=self.act,
norm=self.norm,
evonorm=self.evonorm,
dropout=self.dropout,
last_conv_only=is_top,
)
conv = nn.Sequential(conv, ru)
return conv
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.model(x)
return x
Unet = unet = UNet
| 36.850746
| 120
| 0.599163
|
4a160e45981a120d40286f2ce15c44eff6ec9f4e
| 7,338
|
py
|
Python
|
rqt_dotgraph/rqt_dotgraph.py
|
xydesa/rqt_dotgraph
|
3a182142464f562d89e7aa48b12ac6a50b013b5a
|
[
"CC0-1.0"
] | null | null | null |
rqt_dotgraph/rqt_dotgraph.py
|
xydesa/rqt_dotgraph
|
3a182142464f562d89e7aa48b12ac6a50b013b5a
|
[
"CC0-1.0"
] | null | null | null |
rqt_dotgraph/rqt_dotgraph.py
|
xydesa/rqt_dotgraph
|
3a182142464f562d89e7aa48b12ac6a50b013b5a
|
[
"CC0-1.0"
] | null | null | null |
"""
rqt GUI plugin to visualize dot graphs.
This software was developed by employees of the Federal Government in the course of
their official duties. Pursuant to title 17 Section 105 of the United States Code, this
software is not subject to copyright protection and is in the public domain. The
Government assumes no responsibility whatsoever for its use by other parties, and the
software is provided "AS IS" without warranty or guarantee of any kind, express or
implied, including, but not limited to, the warranties of merchantability, fitness for a
particular purpose, and noninfringement. In no event shall the Government be liable for
any claim, damages or other liability, whether in an action of contract, tort or other
dealings in the software. The software is not designed for use in (i) the design,
construction, operation or maintenance of any nuclear facility; (ii) navigating or
operating aircraft or any manned vehicle; or (iii) any life-saving, life-support or
life-critical medical equipment. The Government has no obligation hereunder to provide
maintenance, support, updates, enhancements, or modifications. We would appreciate
acknowledgement if the software is used. This software can be redistributed and/or
modified freely provided that any derivative works bear some notice that they are
derived from it, and any modified versions bear some notice that they have been
modified.
"""
import contextlib
import io
import os
import sys
from ament_index_python import get_resource
# pylint doesn't support how python_qt_bindings modules are added:
# https://github.com/PyCQA/pylint/issues/3398
# pylint: disable=no-name-in-module,import-error
from python_qt_binding import loadUi
from python_qt_binding.QtGui import QImageWriter
from python_qt_binding.QtSvg import QSvgGenerator
from python_qt_binding.QtWidgets import QFileDialog, QWidget
# pylint: enable=no-name-in-module,import-error
from rqt_gui.main import Main
from rqt_gui_py.plugin import Plugin
from std_msgs.msg import String
from rqt_dotgraph.xdot_qt import DotWidget
class RqtDotGraphViewer(Plugin):
"""rqt GUI plugin to visualize dot graphs."""
def __init__(self, context):
"""Initialize the plugin."""
super().__init__(context)
self._context = context
self.subscription = None
self.graph = None
self.filename = None
# only declare the parameter if running standalone or it's the first instance
if self._context.serial_number() <= 1:
self._context.node.declare_parameter("title", "Dot Graph Viewer")
self.title = self._context.node.get_parameter("title").value
supported_formats = QImageWriter.supportedImageFormats()
self.image_filter = (
";;".join(["*.{}".format(fo.data().decode()) for fo in supported_formats])
+ ";;*.svg"
)
self._widget = QWidget()
self.setObjectName(self.title)
_, package_path = get_resource("packages", "rqt_dotgraph")
ui_file = os.path.join(
package_path, "share", "rqt_dotgraph", "resource", "rqt_dotgraph.ui"
)
loadUi(ui_file, self._widget, {"DotWidget": DotWidget})
self._widget.setObjectName(self.title + "UI")
self._widget.refreshButton.clicked[bool].connect(self.update_subscriber)
self._widget.loadButton.clicked[bool].connect(self.load_graph)
self._widget.saveButton.clicked[bool].connect(self.save_graph)
title = self.title
if self._context.serial_number() > 1:
title += " (%d)" % self._context.serial_number()
self._context.add_widget(self._widget)
self._widget.setWindowTitle(title)
# only set main window title if running standalone
if self._context.serial_number() < 1:
self._widget.window().setWindowTitle(self.title)
self.setup_subscription("dot_graph")
def update_subscriber(self):
"""Update ROS 2 subscription with topic from text box."""
if self.subscription is not None:
self._context.node.destroy_subscription(self.subscription)
self.subscription = None
self.graph = None
topic = self._widget.topicText.text()
self.setup_subscription(topic)
def setup_subscription(self, topic):
"""Create the ROS 2 subscription."""
self.subscription = self._context.node.create_subscription(
String, topic, self.plan_graph_callback, 10
)
self._widget.topicText.setText(self.subscription.topic_name)
def plan_graph_callback(self, msg):
"""Receive the dot graph string."""
zoom_to_fit = self.graph is None
self.graph = msg.data
self.refresh_graph(zoom_to_fit)
def refresh_graph(self, zoom_to_fit):
"""Update the dot graph displayed by the plugin."""
if self.graph is None:
return
self._context.node.get_logger().debug(self.graph)
# Capture stdout and stderr and output as an info level log
# because ROS2 logging levels when launching are broken.
new_out = io.StringIO()
new_err = io.StringIO()
with contextlib.redirect_stdout(new_out):
with contextlib.redirect_stderr(new_err):
self._widget.xdot_widget.set_dotcode(self.graph)
self._context.node.get_logger().debug(new_out.getvalue())
self._context.node.get_logger().debug(new_err.getvalue())
if zoom_to_fit:
self._widget.xdot_widget.zoom_to_fit()
self._widget.xdot_widget.update()
def load_graph(self):
"""Load a dot graph from a file."""
ret = QFileDialog.getOpenFileName(
self._widget, "Load graph", "untitled.dot", "Dot files (*.dot *.xdot)"
)
if ret[0]:
with open(ret[0], "r") as dotfile:
self.filename = ret[0]
self.graph = dotfile.read()
self.refresh_graph(True)
if self.subscription is not None:
self.subscription.destroy()
self.subscription = None
def save_graph(self):
"""Save the current dot graph as an image."""
if self.graph is None:
return
ret = QFileDialog.getSaveFileName(
self._widget, "Save graph as", "untitled.png", self.image_filter, "*.png"
)
if ret[0]:
_, extension = os.path.splitext(ret[0])
if extension == ".svg":
gen = QSvgGenerator()
gen.setFileName(ret[0])
gen.setSize(self._widget.xdot_widget.size())
gen.setViewBox(self._widget.xdot_widget.rect())
self._widget.xdot_widget.grab().save(ret[0])
self._widget.xdot_widget.render(gen)
else:
self._widget.xdot_widget.grab().save(ret[0])
# Qt methods
def shutdown_plugin(self):
"""Shutdown plugin."""
def save_settings(self, plugin_settings, instance_settings):
"""Save settings."""
def restore_settings(self, plugin_settings, instance_settings):
"""Restore settings."""
def main():
"""Run the plugin."""
sys.exit(Main().main(sys.argv, standalone="rqt_dotgraph.rqt_dotgraph"))
if __name__ == "__main__":
main()
| 38.21875
| 88
| 0.668711
|
4a160e50b3c647c22ad382fa3d6f07aefff0ef5d
| 5,021
|
py
|
Python
|
archive/old_plots/plot_metadata.py
|
garudlab/mother_infant
|
98a27c83bf5ece9497d5a030c6c9396a8c514781
|
[
"BSD-2-Clause"
] | 2
|
2020-08-09T06:19:11.000Z
|
2021-08-18T17:12:23.000Z
|
archive/old_plots/plot_metadata.py
|
garudlab/mother_infant
|
98a27c83bf5ece9497d5a030c6c9396a8c514781
|
[
"BSD-2-Clause"
] | null | null | null |
archive/old_plots/plot_metadata.py
|
garudlab/mother_infant
|
98a27c83bf5ece9497d5a030c6c9396a8c514781
|
[
"BSD-2-Clause"
] | 8
|
2019-02-20T22:21:55.000Z
|
2021-02-13T00:55:40.000Z
|
import matplotlib
matplotlib.use('Agg')
import parse_midas_data
import pylab
import sys
import numpy
import diversity_utils
import gene_diversity_utils
import stats_utils
import os
# Load time metadata
subject_sample_time_map = parse_midas_data.parse_subject_sample_time_map()
# store data in these variables
num_visnos={1:0,2:0,3:0}
num_samples_per_visno_aggregate={1:0}
num_samples_per_visno={1:{},2:{},3:{}}
days=[] # get a list of all days
days_by_visno={1:[],2:[],3:[]}
distance_between_days={'1-2':[],'1-3':[],'2-3':[]}
# iterate through data and store in variables above
for subject in subject_sample_time_map.keys():
num_visnos[len(subject_sample_time_map[subject].keys())] +=1
visnos=subject_sample_time_map[subject].keys()
for vis in visnos:
# compute num samples per vis
num_samples_per_vis= len(subject_sample_time_map[subject][vis])
if num_samples_per_vis not in num_samples_per_visno_aggregate.keys():
num_samples_per_visno_aggregate[num_samples_per_vis]=1
else:
num_samples_per_visno_aggregate[num_samples_per_vis]+=1
# store num samples per vis by visit number
if num_samples_per_vis not in num_samples_per_visno[vis].keys():
num_samples_per_visno[vis][num_samples_per_vis]=1
else:
num_samples_per_visno[vis][num_samples_per_vis]+=1
# store the days
day=subject_sample_time_map[subject][vis][0][1]
days.append(day)
days_by_visno[vis].append(day)
# compute differences between days
if 1 in visnos and 2 in visnos:
distance_between_days['1-2'].append(subject_sample_time_map[subject][2][0][1] - subject_sample_time_map[subject][1][0][1])
if 1 in visnos and 3 in visnos:
distance_between_days['1-3'].append(subject_sample_time_map[subject][3][0][1] - subject_sample_time_map[subject][1][0][1])
if 2 in visnos and 3 in visnos:
distance_between_days['2-3'].append(subject_sample_time_map[subject][3][0][1] - subject_sample_time_map[subject][2][0][1])
# plot the metadata
# plot distribution of days
pylab.figure()
pylab.xlabel('days')
pylab.ylabel('number of samples')
pylab.title('Distribution of days')
pylab.hist(days)
pylab.savefig('%s/metadata_days.png' % (parse_midas_data.analysis_directory),bbox_inches='tight', dpi=300)
# plot distribution of days for visnos 1-2, 1-3, 2-3:
fig=pylab.figure()
ax1=fig.add_subplot(311)
pylab.ylim(0,20)
pylab.xlim(0,500)
pylab.title('Days between visnos 1-2')
ax1.hist(distance_between_days['1-2'])
ax2=fig.add_subplot(312)
pylab.ylabel('number of samples')
pylab.ylim(0,20)
pylab.xlim(0,500)
pylab.title('Days between visnos 1-3')
ax2.hist(distance_between_days['1-3'])
ax3=fig.add_subplot(313)
pylab.ylim(0,20)
pylab.xlim(0,500)
pylab.xlabel('days')
pylab.title('Days between visnos 2-3')
ax3.hist(distance_between_days['2-3'])
fig.savefig('%s/metadata_days_by_visno.png' % (parse_midas_data.analysis_directory),bbox_inches='tight', dpi=300)
# Plot number of visnos per person
pylab.figure()
pylab.xlabel('Number of visits/subject')
pylab.ylabel('Number of subjects')
pylab.bar([1,2,3],[num_visnos[1],num_visnos[2],num_visnos[3]])
pylab.plot([1,2,3],[num_visnos[1],num_visnos[2],num_visnos[3]], color='y')
pylab.xticks([1,2,3], ['1', '2', '3'])
pylab.savefig('%s/metadata_num_visnos_per_subject.png' % (parse_midas_data.analysis_directory),bbox_inches='tight', dpi=300)
# Plot number of samples per visno
pylab.figure()
pylab.xlabel('Number of samples/visno/subject')
pylab.ylabel('Number of visnos')
pylab.bar([1,2,3,4],[num_samples_per_visno_aggregate[1],num_samples_per_visno_aggregate[2],num_samples_per_visno_aggregate[3],num_samples_per_visno_aggregate[4]])
pylab.plot([1,2,3,4],[num_samples_per_visno_aggregate[1],num_samples_per_visno_aggregate[2],num_samples_per_visno_aggregate[3],num_samples_per_visno_aggregate[4]], color='y')
pylab.xticks([1,2,3,4], ['1', '2', '3','4'])
pylab.savefig('%s/metadata_num_samples_per_visno.png' % (parse_midas_data.analysis_directory),bbox_inches='tight', dpi=300)
# plot distribution of num samples per visno
pylab.figure()
pylab.xlabel('Number of replicate samples/visit/subject')
pylab.ylabel('Number of subjects')
pylab.title('Number of replicate samples/visit/subject')
width=0.1
pylab.bar([1,2,3,4],[num_samples_per_visno[1][1],num_samples_per_visno[1][2],num_samples_per_visno[1][3],num_samples_per_visno[1][4]],width)
pylab.bar([1+width,2+width,3+width,4+width],[num_samples_per_visno[2][1],num_samples_per_visno[2][2],0,0 ],width, color='r')
pylab.bar([1+2*width,2+2*width,3+2*width,4+2*width],[num_samples_per_visno[3][1],num_samples_per_visno[3][2],0,0 ],width, color='g')
pylab.xticks([1,2,3,4], ['1', '2', '3','4'])
pylab.legend(['Visit1','Visit2','Visit3'],'upper right',prop={'size':6})
pylab.savefig('%s/metadata_distribution_of_num_samples_visno_all.png' % (parse_midas_data.analysis_directory),bbox_inches='tight', dpi=300)
| 38.328244
| 174
| 0.730532
|
4a160e5ae28a10966bbea903fc8c56c280016b1a
| 280
|
py
|
Python
|
src/dsrlib/filemgr/windows.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 8
|
2020-09-06T02:15:10.000Z
|
2022-01-12T22:49:20.000Z
|
src/dsrlib/filemgr/windows.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 5
|
2021-03-29T20:37:46.000Z
|
2021-09-19T13:20:24.000Z
|
src/dsrlib/filemgr/windows.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 2
|
2020-09-16T01:45:49.000Z
|
2021-06-12T12:38:15.000Z
|
#!/usr/bin/env python3
import os
from .base import FileManager
@FileManager.register
class FileManagerWindows:
@staticmethod
def showFile(filename):
code = os.system('explorer.exe /select,"%s"' % filename)
return code is None or code == 0
| 20
| 65
| 0.653571
|
4a160f8526d199779ca8ddf1d219b7b3f74c1078
| 280
|
py
|
Python
|
maintenance_repair_services/maintenance_and_repair_services/doctype/item_category/item_category.py
|
nismaHamdouna/mrs
|
6e45de16a4ddf3f7ecbee38f433ba430b4ff7081
|
[
"MIT"
] | 1
|
2019-05-28T13:43:14.000Z
|
2019-05-28T13:43:14.000Z
|
maintenance_repair_services/maintenance_and_repair_services/doctype/item_category/item_category.py
|
nismaHamdouna/mrs
|
6e45de16a4ddf3f7ecbee38f433ba430b4ff7081
|
[
"MIT"
] | null | null | null |
maintenance_repair_services/maintenance_and_repair_services/doctype/item_category/item_category.py
|
nismaHamdouna/mrs
|
6e45de16a4ddf3f7ecbee38f433ba430b4ff7081
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Maintenance and Repair Services and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ItemCategory(Document):
pass
| 25.454545
| 70
| 0.789286
|
4a16101c7a3ab68e507e736cee5fd57a8c349126
| 54,357
|
py
|
Python
|
Lib/configparser.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 1
|
2019-09-04T02:06:21.000Z
|
2019-09-04T02:06:21.000Z
|
Lib/configparser.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 4
|
2020-04-02T14:59:42.000Z
|
2021-02-10T14:30:18.000Z
|
Lib/configparser.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 2
|
2018-05-03T01:08:13.000Z
|
2019-12-02T03:03:43.000Z
|
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True, default_section='DEFAULT',
interpolation=<unset>, converters=<unset>):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
When `default_section' is given, the name of the special section is
named accordingly. By default it is called ``"DEFAULT"`` but this can
be customized to point to any other valid section name. Its current
value can be retrieved using the ``parser_instance.default_section``
attribute and may be modified at runtime.
When `interpolation` is given, it should be an Interpolation subclass
instance. It will be used as the handler for option value
pre-processing when using getters. RawConfigParser object s don't do
any sort of interpolation, whereas ConfigParser uses an instance of
BasicInterpolation. The library also provides a ``zc.buildbot``
inspired ExtendedInterpolation implementation.
When `converters` is given, it should be a dictionary where each key
represents the name of a type converter and each value is a callable
implementing the conversion from string to the desired datatype. Every
converter gets its corresponding get*() method on the parser object and
section proxies.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import ChainMap as _ChainMap
import functools
import io
import itertools
import os
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationMissingOptionError", "InterpolationSyntaxError",
"ParsingError", "MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"Interpolation", "BasicInterpolation", "ExtendedInterpolation",
"LegacyInterpolation", "SectionProxy", "ConverterMapping",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
_default_dict = dict
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution: option {!r} in section {!r} contains "
"an interpolation key {!r} which is not a valid option name. "
"Raw value: {!r}".format(option, section, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Recursion limit exceeded in value substitution: option {!r} "
"in section {!r} contains an interpolation key which "
"cannot be substituted in {} steps. Raw value: {!r}"
"".format(option, section, MAX_INTERPOLATION_DEPTH,
rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %r' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %r, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
rawval = parser.get(section, option, raw=True, fallback=rest)
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rawval)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rawval, var) from None
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('$')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
rawval = parser.get(section, option, raw=True, fallback=rest)
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rawval)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rawval, ":".join(path)) from None
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0]) from None
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET, converters=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._converters = ConverterMapping(self)
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
if converters is not _UNSET:
self._converters.update(converters)
if defaults:
self._read_defaults(defaults)
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section) from None
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, (str, bytes, os.PathLike)):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except OSError:
continue
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def _get_conv(self, section, option, conv, *, raw=False, vars=None,
fallback=_UNSET, **kwargs):
try:
return self._get(section, conv, option, raw=raw, vars=vars,
**kwargs)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
return fallback
# getint, getfloat and getboolean provided directly for backwards compat
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET, **kwargs):
return self._get_conv(section, option, int, raw=raw, vars=vars,
fallback=fallback, **kwargs)
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET, **kwargs):
return self._get_conv(section, option, float, raw=raw, vars=vars,
fallback=fallback, **kwargs)
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET, **kwargs):
return self._get_conv(section, option, self._convert_to_boolean,
raw=raw, vars=vars, fallback=fallback, **kwargs)
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
orig_keys = list(d.keys())
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in orig_keys]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section) from None
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
if key in self and self[key] is value:
return
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
self._join_multiline_values()
# if any parsing errors occurred, raise an exception
if e:
raise e
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _read_defaults(self, defaults):
"""Read the defaults passed in the initializer.
Note: values can be non-string."""
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section) from None
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
@property
def converters(self):
return self._converters
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
def _read_defaults(self, defaults):
"""Reads the defaults passed in the initializer, implicitly converting
values to strings like the rest of the API.
Does not perform interpolation for backwards compatibility.
"""
try:
hold_interpolation = self._interpolation
self._interpolation = Interpolation()
self.read_dict({self.default_section: defaults})
finally:
self._interpolation = hold_interpolation
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
for conv in parser.converters:
key = 'get' + conv
getter = functools.partial(self.get, _impl=getattr(parser, key))
setattr(self, key, getter)
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
def get(self, option, fallback=None, *, raw=False, vars=None,
_impl=None, **kwargs):
"""Get an option value.
Unless `fallback` is provided, `None` will be returned if the option
is not found.
"""
# If `_impl` is provided, it should be a getter method on the parser
# object that provides the desired type conversion.
if not _impl:
_impl = self._parser.get
return _impl(self._name, option, raw=raw, vars=vars,
fallback=fallback, **kwargs)
class ConverterMapping(MutableMapping):
"""Enables reuse of get*() methods between the parser and section proxies.
If a parser class implements a getter directly, the value for the given
key will be ``None``. The presence of the converter name here enables
section proxies to find and use the implementation on the parser class.
"""
GETTERCRE = re.compile(r"^get(?P<name>.+)$")
def __init__(self, parser):
self._parser = parser
self._data = {}
for getter in dir(self._parser):
m = self.GETTERCRE.match(getter)
if not m or not callable(getattr(self._parser, getter)):
continue
self._data[m.group('name')] = None # See class docstring.
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
try:
k = 'get' + key
except TypeError:
raise ValueError('Incompatible key: {} (type: {})'
''.format(key, type(key)))
if k == 'get':
raise ValueError('Incompatible key: cannot use "" as a name')
self._data[key] = value
func = functools.partial(self._parser._get_conv, conv=value)
func.converter = value
setattr(self._parser, k, func)
for proxy in self._parser.values():
getter = functools.partial(proxy.get, _impl=func)
setattr(proxy, k, getter)
def __delitem__(self, key):
try:
k = 'get' + (key or None)
except TypeError:
raise KeyError(key)
del self._data[key]
for inst in itertools.chain((self._parser,), self._parser.values()):
try:
delattr(inst, k)
except AttributeError:
# don't raise since the entry was present in _data, silently
# clean up
continue
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
| 39.851173
| 79
| 0.583752
|
4a1610799b3d8fb731b9481cd6b108f1866ff54b
| 3,238
|
py
|
Python
|
tests/ut/python/dataset/test_convertcolor.py
|
PowerOlive/mindspore
|
bda20724a94113cedd12c3ed9083141012da1f15
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
tests/ut/python/dataset/test_convertcolor.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
tests/ut/python/dataset/test_convertcolor.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing ConvertColor op in DE
"""
import cv2
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.utils as mode
from mindspore import log as logger
from util import visualize_image, diff_mse
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
IMAGE_FILE = "../data/dataset/apple.jpg"
def convert_color(ms_convert, cv_convert, plot=False):
"""
ConvertColor with different mode.
"""
# First dataset
dataset1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
decode_op = c_vision.Decode()
convertcolor_op = c_vision.ConvertColor(ms_convert)
dataset1 = dataset1.map(operations=decode_op, input_columns=["image"])
dataset1 = dataset1.map(operations=convertcolor_op, input_columns=["image"])
# Second dataset
dataset2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
dataset2 = dataset2.map(operations=decode_op, input_columns=["image"])
num_iter = 0
for data1, data2 in zip(dataset1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataset2.create_dict_iterator(num_epochs=1, output_numpy=True)):
if num_iter > 0:
break
convertcolor_ms = data1["image"]
original = data2["image"]
convertcolor_cv = cv2.cvtColor(original, cv_convert)
mse = diff_mse(convertcolor_ms, convertcolor_cv)
logger.info("convertcolor_{}, mse: {}".format(num_iter + 1, mse))
assert mse == 0
num_iter += 1
if plot:
visualize_image(original, convertcolor_ms, mse, convertcolor_cv)
def test_convertcolor_pipeline(plot=False):
"""
Test ConvertColor of c_transforms
"""
logger.info("test_convertcolor_pipeline")
convert_color(mode.ConvertMode.COLOR_BGR2GRAY, cv2.COLOR_BGR2GRAY, plot)
convert_color(mode.ConvertMode.COLOR_BGR2RGB, cv2.COLOR_BGR2RGB, plot)
convert_color(mode.ConvertMode.COLOR_BGR2BGRA, cv2.COLOR_BGR2BGRA, plot)
def test_convertcolor_eager():
"""
Test ConvertColor with eager mode
"""
logger.info("test_convertcolor")
img = cv2.imread(IMAGE_FILE)
img_ms = c_vision.ConvertColor(mode.ConvertMode.COLOR_BGR2GRAY)(img)
img_expect = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mse = diff_mse(img_ms, img_expect)
assert mse == 0
if __name__ == "__main__":
test_convertcolor_pipeline(plot=False)
test_convertcolor_eager()
| 36.795455
| 94
| 0.70908
|
4a1610f6c9cb9429adb8113e1112ea51ad93472f
| 4,030
|
py
|
Python
|
ctgan/synthesizer/tests/test_synthesizer.py
|
ljk423/ctgan-tf
|
916ae47e1932d5dd76152fc9e59e9de88142590d
|
[
"MIT"
] | 2
|
2020-11-26T18:59:04.000Z
|
2021-06-05T04:39:51.000Z
|
ctgan/synthesizer/tests/test_synthesizer.py
|
ljk423/ctgan-tf
|
916ae47e1932d5dd76152fc9e59e9de88142590d
|
[
"MIT"
] | null | null | null |
ctgan/synthesizer/tests/test_synthesizer.py
|
ljk423/ctgan-tf
|
916ae47e1932d5dd76152fc9e59e9de88142590d
|
[
"MIT"
] | 1
|
2021-01-17T15:20:39.000Z
|
2021-01-17T15:20:39.000Z
|
import tensorflow as tf
import numpy as np
import pandas as pd
import joblib
import os
from unittest import TestCase
from ctgan.utils import generate_data, get_test_variables
from ctgan.synthesizer import CTGANSynthesizer
class TestSynthesizer(TestCase):
def setUp(self):
self._vars = get_test_variables()
self._n_samples = 1
self._current_dir = os.path.dirname(os.path.abspath(__file__))
self._expected_values = joblib.load(os.path.join(
self._current_dir, 'expected_values.joblib'))
def tearDown(self):
model_path = os.path.join(self._current_dir, 'model_test.joblib')
if os.path.exists(model_path):
os.remove(model_path)
del self._vars
del self._n_samples
del self._current_dir
del self._expected_values
def _assert_train_equal(self, data, discrete):
model = CTGANSynthesizer(
batch_size=self._vars['batch_size'], pac=self._vars['pac'])
self.assertIsNotNone(model)
model.train(data, discrete, epochs=1)
outputs = {
'output_tensor': [x.numpy()
for x in model._transformer.output_tensor],
'cond_tensor': [x.numpy() for x in model._transformer.cond_tensor],
'gen_weights': model._generator.get_weights(),
'crt_weights': model._critic.get_weights(),
}
idx = int(len(discrete) > 0)
for o in outputs:
for i in range(len(outputs[o])):
np.testing.assert_almost_equal(
outputs[o][i], self._expected_values[idx][o][i],
decimal=self._vars['decimal'])
def test_train(self):
np.random.seed(0)
tf.random.set_seed(0)
data, discrete = generate_data(self._vars['batch_size'])
self._assert_train_equal(data, [])
self._assert_train_equal(data, discrete)
def test_sample(self):
np.random.seed(0)
tf.random.set_seed(0)
data, discrete = generate_data(self._vars['batch_size'])
model = CTGANSynthesizer(
batch_size=self._vars['batch_size'], pac=self._vars['pac'])
self.assertIsNotNone(model)
model.train(data, discrete, epochs=1)
output = model.sample(self._n_samples).values
expected_output = np.array([[0.4139329, 3.0]])
np.testing.assert_almost_equal(
output, expected_output, decimal=self._vars['decimal'])
def test_model_to_disk(self):
np.random.seed(0)
tf.random.set_seed(0)
data, discrete = generate_data(self._vars['batch_size'])
model = CTGANSynthesizer(
batch_size=self._vars['batch_size'], pac=self._vars['pac'])
self.assertIsNotNone(model)
model.train(data, discrete, epochs=1)
model_path = os.path.join(self._current_dir, 'model_test.joblib')
model.dump(model_path, overwrite=True)
loaded_model = CTGANSynthesizer(file_path=model_path)
self.assertIsNotNone(loaded_model)
for attr, value in loaded_model.__dict__.items():
self.assertTrue(attr in model.__dict__)
if type(value) in [int, float, tuple]:
self.assertEqual(value, model.__dict__[attr])
np.testing.assert_equal(
loaded_model._cond_generator.__dict__,
model._cond_generator.__dict__)
for attr, value in loaded_model._transformer.__dict__.items():
if isinstance(value, pd.Series):
pd.testing.assert_series_equal(
value, model._transformer.__dict__[attr])
elif isinstance(value, list) and isinstance(value[0], tf.Tensor):
tf.assert_equal(value, model._transformer.__dict__[attr])
else:
np.testing.assert_equal(
value, model._transformer.__dict__[attr])
np.testing.assert_equal(
loaded_model._generator.get_weights(),
model._generator.get_weights())
| 37.314815
| 79
| 0.628784
|
4a16115a4c772657d5e3d0429a04651aa027f693
| 6,542
|
py
|
Python
|
Computer science/Programming languages/Python/Python libraries/Networking/BeautifulSoup; working with HTML/documentation.py
|
chanchanchong/PYTHON-TRACK-IN-HYPERSKILL
|
462fe08ff4a2b183fd45a0235ab1ec7a788bd54c
|
[
"MIT"
] | null | null | null |
Computer science/Programming languages/Python/Python libraries/Networking/BeautifulSoup; working with HTML/documentation.py
|
chanchanchong/PYTHON-TRACK-IN-HYPERSKILL
|
462fe08ff4a2b183fd45a0235ab1ec7a788bd54c
|
[
"MIT"
] | null | null | null |
Computer science/Programming languages/Python/Python libraries/Networking/BeautifulSoup; working with HTML/documentation.py
|
chanchanchong/PYTHON-TRACK-IN-HYPERSKILL
|
462fe08ff4a2b183fd45a0235ab1ec7a788bd54c
|
[
"MIT"
] | null | null | null |
# Beautiful Soup Documentation
# Beautiful Soup is a Python library for pulling data out of
# HTML and XML files. It works with your favorite parser to
# provide idiomatic ways of navigating, searching , and
# modifying the parse tree. It commonly saves programmers hours
# or days of work.
# These instructions illustrate all major features of Beautiful
# Soup 4, with examples. I'll show you what the library is good for,
# how it works, how to use it,
# how to make it do what you want, and what to do when it
# violates your expectations.
# This document covers Beautiful Soup version 4.9.3. The
# examples in this documentation should work the same way in
# Python 2.7 and Python 3.8.
# You might be looking for the documentation for Beautiful Soup
# 3. If so, you should know that Beautiful Soup 3 is no longer
# being developed and that support for it will be dropped on or
# after December 31, 2020. If you want to learn about the
# differences between Beautiful Soup 3 and Beautiful Soup 4,
# see Porting code to BS4.
# Quick Start
# Here's an HTML document I'll be using as an example
# throughout this document. It's part of a story from Alice in
# Wonderland.
html_doc = """<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
# Running the "three sisters" document through Beautiful Soup
# gives us a BeautifulSoup object, which represents the document
# as a nested data structure:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc, 'html.parser')
# print(soup.prettify())
# Here are some simple ways to navigate that data structure:
# print(soup.title)
# <title>The Dormouse's story</title>
# print(soup.title.name)
# title
# print(soup.title.string)
# The Dormouse's story
# print(soup.title.parent.name)
# head
# print(soup.p)
# <p class="title"><b>The Dormouse's story</b></p>
# print(soup.p['class'])
# ['title']
# print(soup.a)
# <a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>
# print(soup.find_all('a'))
# # [<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
# # <a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>,
# # <a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>]
# print(soup.find(id="link3"))
# <a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>
# One common task is extracting all the URLs found within a
# page's <a> tags:
# for link in soup.find_all('a'):
# print(link.get('href'))
# http://example.com/elsie
# http://example.com/lacie
# http://example.com/tillie
# Another common task is extracting all the text from a page:
# print(soup.get_text())
# The Dormouse's story
#
# The Dormouse's story
#
# Once upon a time there were three little sisters; and their names were
# Elsie,
# Lacie and
# Tillie;
# and they lived at the bottom of a well.
#
# ...
# Kinds of objects
# Beautiful Soup transforms a complex HTML document into a
# complex tree of Python objects. But you'll only ever have to
# deal with about four kinds of objects: Tag, NavigableString,
# BeautifulSoup, and Comment.
# Tag
# A Tag object corresponds to an XML or HTML tag in the original
# document:
# soup = BeautifulSoup('<b class="boldest">Extremely bold</b>', 'html.parser')
# tag = soup.b
# print(type(tag))
# Tags have a lot of attributes and methods, and I'll cover most of
# them in Navigating the tree and Search the tree. For now,
# the most important features of a tag are its name and
# attributes.
# Name
# Every tag has a name, accessible as .name:
# print(tag.name)
# b
# If you change a tag's name, the change will be reflected in any
# HTML markup generated by Beautiful Soup:
# tag.name = "blockquote"
# print(tag)
# <blockquote class="boldest">Extremely bold</blockquote>
# Attributes
# A tag may have any number of attributes. The tag <b
# id="boldest"> has an attribute "id" whose value is "boldest". You
# can access a tag's attributes by treaing the tag like a
# dictionary:
# print(tag['id'])
# boldest
# You can access that directly as .attrs:
# print(tag.attrs)
# {'id': 'boldest'}
# You can add, remove and modify a tag's attributes. Again, this
# is one by treating the tag as a dictionary:
# tag['id'] = 'verybold'
# tag['another-attribute'] = 1
# print(tag)
# <b another-attribute="1" id="verybold"></b>
# del tag['id']
# del tag['another-attribute']
# print(tag)
# print(tag['id'])
# KeyError: 'id'
# print(tag.get('id'))
# None
# Multi-valued attributes
# HTML 4 defines a few attributes that can have multiple values.
# HTML 5 removes a couple of them, but defines a few more.
# The most common multi-valued attribute is class (that is, a tag
# can have more than one CSS class). Others include rel, rev,
# accept-charset, headers, and accesskey. Beautiful Soup presents
# the value(s) of a multi-valued attribute as a list:
# css_soup = BeautifulSoup('<p class="body"></p>', 'html.parser')
# print(css_soup.p['class'])
# ['body']
# css_soup = BeautifulSoup('<p class="body strikeout"></p>', 'html.parser')
# print(css_soup.p['class'])
# ['body', 'strikeout']
# If an attribute looks like it has more than one value, but it's not
# a multi-valued attribute as defined by any version of the HTML
# standard, Beautiful Soup will leave the attribute alone:
# id_soup = BeautifulSoup('<p id="my id"></p>', 'html.parser')
# print(id_soup.p['id'])
# 'my id'
# When you turn a tag back into a string, multiple attribute values
# are consolidated:
# rel_soup = BeautifulSoup('<p>Back to the <a rel="index">homepage</a></p>', 'html.parser')
# print(rel_soup.a['rel'])
anchor = """<a href="/translation/french-english/bonjour" class="translation ltr dict adv" data-pos="[adv]" data-pos-index="0" data-posgroup="1" data-freq="17821" lang="fr" title="<div class='nobold'>See examples translated by <em class='translation'>bonjour</em><br>Adverb<br>(+10k examples with alignment)</div>">
<div class="pos-mark">
<span class="adv" title="Adverb"></span>
</div>
bonjour</a>"""
soup = BeautifulSoup(anchor, 'html.parser')
print(soup.find_all('a', {'class': 'adv'}))
| 33.721649
| 315
| 0.701162
|
4a16117447d9f51eae4bd1073bfc807974032d2f
| 3,026
|
py
|
Python
|
indico/modules/events/registration/settings.py
|
bkmgit/indico
|
d77ee121e35880a416b9b05e6098ea912d870b5c
|
[
"MIT"
] | 1
|
2021-06-11T20:02:10.000Z
|
2021-06-11T20:02:10.000Z
|
indico/modules/events/registration/settings.py
|
bkmgit/indico
|
d77ee121e35880a416b9b05e6098ea912d870b5c
|
[
"MIT"
] | null | null | null |
indico/modules/events/registration/settings.py
|
bkmgit/indico
|
d77ee121e35880a416b9b05e6098ea912d870b5c
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.settings.converters import EnumConverter
from indico.modules.designer import PageOrientation, PageSize
from indico.modules.events.registration.models.items import PersonalDataType
from indico.modules.events.settings import EventSettingsProxy
DEFAULT_BADGE_SETTINGS = {
'top_margin': 1.6,
'bottom_margin': 1.1,
'left_margin': 1.6,
'right_margin': 1.4,
'margin_columns': 1.0,
'margin_rows': 0.0,
'page_size': PageSize.A4,
'page_orientation': PageOrientation.portrait,
'dashed_border': True,
'page_layout': None
}
BADGE_SETTING_CONVERTERS = {
'page_orientation': EnumConverter(PageOrientation),
'page_size': EnumConverter(PageSize)
}
class RegistrationSettingsProxy(EventSettingsProxy):
"""Store per-event registration settings."""
def get_participant_list_columns(self, event, form=None):
if form is None:
# Columns when forms are merged
return self.get(event, 'participant_list_columns')
else:
try:
# The int values are automatically converted to unicode when saved as JSON
form_columns = self.get(event, 'participant_list_form_columns')[str(form.id)]
return list(map(int, form_columns))
except (ValueError, KeyError):
# No settings for this form, default to the ones for the merged form
column_names = self.get_participant_list_columns(event)
return [form.get_personal_data_field_id(PersonalDataType[name]) for name in column_names]
def set_participant_list_columns(self, event, columns, form=None):
if form is None:
if columns:
self.set(event, 'participant_list_columns', columns)
else:
self.delete(event, 'participant_list_columns')
else:
form_columns = self.get(event, 'participant_list_form_columns')
if columns:
# The int values are automatically converted to unicode when saved
# as JSON. Do it explicitely so that it keeps working if the
# behavior changes and makes sense with the code above.
form_columns[str(form.id)] = columns
else:
form_columns.pop(str(form.id), None)
self.set(event, 'participant_list_form_columns', form_columns)
def get_participant_list_form_ids(self, event):
# Int values are converted to str when saved as JSON
return list(map(int, self.get(event, 'participant_list_forms')))
def set_participant_list_form_ids(self, event, form_ids):
self.set(event, 'participant_list_forms', form_ids)
event_badge_settings = EventSettingsProxy('badge', DEFAULT_BADGE_SETTINGS, converters=BADGE_SETTING_CONVERTERS)
| 39.815789
| 111
| 0.678784
|
4a1611c46c0282f7887721219bbe00dd1f5a8ae5
| 1,935
|
py
|
Python
|
easyp2p/platforms/peerberry.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 4
|
2019-07-18T10:58:28.000Z
|
2021-11-18T16:57:45.000Z
|
easyp2p/platforms/peerberry.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 1
|
2019-07-05T09:21:47.000Z
|
2019-07-05T09:21:47.000Z
|
easyp2p/platforms/peerberry.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 2
|
2019-07-05T08:56:34.000Z
|
2020-06-09T10:03:42.000Z
|
# Copyright (c) 2018-2020 Niko Sandschneider
"""
Download and parse PeerBerry statement.
"""
import json
from easyp2p.p2p_parser import P2PParser
from easyp2p.p2p_session import P2PSession
from easyp2p.platforms.base_platform import BasePlatform
class PeerBerry(BasePlatform):
"""
Contains methods for downloading/parsing PeerBerry account statements.
"""
NAME = 'PeerBerry'
SUFFIX = 'xlsx'
# Downloader settings
DOWNLOAD_METHOD = 'session'
LOGIN_URL = 'https://api.peerberry.com/v1/investor/login'
LOGOUT_URL = 'https://api.peerberry.com/v1/investor/logout'
# Parser settings
DATE_FORMAT = '%Y-%m-%d'
RENAME_COLUMNS = {
'Currency': P2PParser.CURRENCY,
'Date': P2PParser.DATE,
}
CASH_FLOW_TYPES = {
'BUYBACK_INTEREST': P2PParser.BUYBACK_INTEREST_PAYMENT,
'BUYBACK_PRINCIPAL': P2PParser.BUYBACK_PAYMENT,
'INVESTMENT': P2PParser.INVESTMENT_PAYMENT,
'REPAYMENT_INTEREST': P2PParser.INTEREST_PAYMENT,
'REPAYMENT_PRINCIPAL': P2PParser.REDEMPTION_PAYMENT,
}
ORIG_CF_COLUMN = 'Type'
VALUE_COLUMN = 'Amount'
def _session_download(self, sess: P2PSession) -> None:
"""
Generate and download the PeerBerry account statement for given date
range.
Args:
sess: P2PSession instance.
"""
resp = sess.log_into_page(self.LOGIN_URL, 'email', 'password')
access_token = json.loads(resp.text)['access_token']
sess.sess.headers.update(
{'Authorization': f'Bearer {access_token}'})
statement_url = (
f'https://api.peerberry.com/v1/investor/transactions/import?'
f'startDate={self.date_range[0].strftime("%Y-%m-%d")}&'
f'endDate={self.date_range[1].strftime("%Y-%m-%d")}&'
f'transactionType=0&lang=en')
sess.download_statement(statement_url, self.statement, 'get')
| 30.234375
| 76
| 0.658915
|
4a1612723bb7d4de6bff793a73dc6ca566e54f4b
| 6,218
|
py
|
Python
|
radiomicsfeatureextractionpipeline/backend/test/mock_ups/dal/series_repository.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | null | null | null |
radiomicsfeatureextractionpipeline/backend/test/mock_ups/dal/series_repository.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | 6
|
2021-06-09T19:39:27.000Z
|
2021-09-30T16:41:40.000Z
|
radiomicsfeatureextractionpipeline/backend/test/mock_ups/dal/series_repository.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | null | null | null |
from typing import List, Any, Dict, Optional
from dal.database_connector import DatabaseConnector
from dal.series_repository import SeriesRepository
from logic.entities.patient import Patient
from logic.entities.series import Series
from logic.entities.study import Study
from test.mock_ups.dal.repository import RepositoryMockUp
class SeriesRepositoryMockUp(SeriesRepository, RepositoryMockUp):
def __init__(self, database_connector: DatabaseConnector, query_directory: str) -> None:
super().__init__(database_connector, query_directory)
self.get_all_series_called_with_parameters: List[Dict[Optional[str], Any]] = []
self.get_all_series_return_value: Any = None
self.get_all_series_of_modality_type_called_with_parameters: List[Dict[Optional[str], Any]] = []
self.get_all_series_of_modality_type_return_value: Any = None
self.get_all_series_from_study_called_with_parameters: List[Dict[Optional[str], Any]] = []
self.get_all_series_from_study_return_value: Any = None
self.get_all_series_from_study_of_modality_type_called_with_parameters: List[Dict[Optional[str], Any]] = []
self.get_all_series_from_study_of_modality_type_return_value: Any = None
self.get_all_series_from_patient_called_with_parameters: List[Dict[Optional[str], Any]] = []
self.get_all_series_from_patient_return_type: Any = None
self.get_all_series_from_patient_of_modality_type_called_with_parameters: List[Dict[Optional[str], Any]] = []
self.get_all_series_from_patient_of_modality_type_return_type: Any = None
self.get_series_from_id_called_with_parameters: List[Dict[Optional[str], Any]] = []
self.get_series_from_id_return_value: Any = None
def get_all_series(self) -> List[Series]:
self.get_all_series_called_with_parameters.append(
{
None: None
}
)
return self.get_all_series_return_value
def get_all_series_of_modality_type(self, modality: str) -> List[Series]:
self.get_all_series_of_modality_type_called_with_parameters.append(
{
'modality': modality
}
)
return self.get_all_series_of_modality_type_return_value
def get_all_series_from_study(self, study: Study) -> List[Series]:
self.get_all_series_from_study_called_with_parameters.append(
{
'study': study
}
)
return self.get_all_series_from_study_return_value
def get_all_series_from_study_of_modality_type(self, study: Study, modality: str) -> List[Series]:
self.get_all_series_from_study_of_modality_type_called_with_parameters.append(
{
'study': study,
'modality': modality
}
)
return self.get_all_series_from_study_of_modality_type_return_value
def get_all_series_from_patient(self, patient: Patient) -> List[Series]:
self.get_all_series_from_patient_called_with_parameters.append(
{
'patient': patient
}
)
return self.get_all_series_from_patient_return_type
def get_all_series_from_patient_of_modality_type(self, patient: Patient, modality: str) -> List[Series]:
self.get_all_series_from_patient_of_modality_type_called_with_parameters.append(
{
'patient': patient,
'modality': modality
}
)
return self.get_all_series_from_patient_of_modality_type_return_type
def get_series_from_id(self, series_id: str) -> Optional[Series]:
self.get_series_from_id_called_with_parameters.append(
{
'series_id': series_id
}
)
return self.get_series_from_id_return_value
def get_get_all_series_called_with_parameters(self) -> List[Dict[Optional[str], Any]]:
return self.get_all_series_called_with_parameters
def set_get_all_series_return_value(self, return_value: Any) -> None:
self.get_all_series_return_value = return_value
def get_get_all_series_of_modality_type_called_with_parameters(self) -> List[Dict[Optional[str], Any]]:
return self.get_all_series_of_modality_type_called_with_parameters
def set_get_all_series_of_modality_type_return_type(self, return_value: Any) -> None:
self.get_all_series_of_modality_type_return_value = return_value
def get_get_all_series_from_study_called_with_parameters(self) -> List[Dict[Optional[str], Any]]:
return self.get_all_series_from_study_called_with_parameters
def set_get_all_series_from_study_return_value(self, return_value: Any) -> None:
self.get_all_series_from_study_return_value = return_value
def get_get_all_series_from_study_of_modality_type_called_with_parameters(self) -> List[Dict[Optional[str], Any]]:
return self.get_all_series_from_study_of_modality_type_called_with_parameters
def set_get_all_series_from_study_of_modality_type_return_value(self, return_value: Any):
self.get_all_series_from_study_of_modality_type_return_value = return_value
def get_get_all_series_from_patient_called_with_parameters(self) -> List[Dict[Optional[str], Any]]:
return self.get_all_series_from_patient_called_with_parameters
def set_get_all_series_from_patient_return_value(self, return_value = Any) -> None:
self.get_all_series_from_patient_return_type = return_value
def get_get_all_series_from_patient_of_modality_type_called_with_parameters(self) -> List[Dict[Optional[str], Any]]:
return self.get_all_series_from_patient_of_modality_type_called_with_parameters
def set_get_all_series_from_patient_of_modality_type_return_value(self, return_value: Any) -> None:
self.get_all_series_from_patient_of_modality_type_return_type = return_value
def get_get_series_from_id_called_with_parameters(self) -> List[Dict[Optional[str], Any]]:
return self.get_series_from_id_called_with_parameters
def set_get_series_from_id_return_value(self, return_value: Optional[Series]):
self.get_series_from_id_return_value = return_value
| 45.720588
| 120
| 0.745417
|
4a161286967e230220bc2976dd4eca63d5659bd7
| 289
|
py
|
Python
|
rethinkdb/helpers.py
|
MichalMazurek/rethinkdb-python
|
28a21960ad5a303e4690c6b3fb3da5b8d6c547ca
|
[
"Apache-2.0"
] | 1
|
2020-08-01T23:15:00.000Z
|
2020-08-01T23:15:00.000Z
|
rethinkdb/helpers.py
|
MichalMazurek/rethinkdb-python
|
28a21960ad5a303e4690c6b3fb3da5b8d6c547ca
|
[
"Apache-2.0"
] | null | null | null |
rethinkdb/helpers.py
|
MichalMazurek/rethinkdb-python
|
28a21960ad5a303e4690c6b3fb3da5b8d6c547ca
|
[
"Apache-2.0"
] | null | null | null |
import six
def decode_utf8(string, encoding='utf-8'):
if hasattr(string, 'decode'):
return string.decode(encoding)
return string
def chain_to_bytes(*strings):
return b''.join([six.b(string) if isinstance(string, six.string_types) else string for string in strings])
| 26.272727
| 110
| 0.712803
|
4a1612a00115805e1da874f66292cb43007da142
| 7,929
|
py
|
Python
|
i18n/generate.py
|
eduNEXT/i18n-tools
|
99b20c17d1a0ca07a8839f33e0e9068248a581e5
|
[
"Apache-2.0"
] | 1
|
2021-04-01T17:26:41.000Z
|
2021-04-01T17:26:41.000Z
|
i18n/generate.py
|
eduNEXT/i18n-tools
|
99b20c17d1a0ca07a8839f33e0e9068248a581e5
|
[
"Apache-2.0"
] | null | null | null |
i18n/generate.py
|
eduNEXT/i18n-tools
|
99b20c17d1a0ca07a8839f33e0e9068248a581e5
|
[
"Apache-2.0"
] | 1
|
2019-02-03T03:18:21.000Z
|
2019-02-03T03:18:21.000Z
|
#!/usr/bin/env python
"""
See https://edx-wiki.atlassian.net/wiki/display/ENG/PO+File+workflow
This task merges and compiles the human-readable .po files on the
local filesystem into machine-readable .mo files. This is typically
necessary as part of the build process since these .mo files are
needed by Django when serving the web app.
The configuration file (in edx-platform/conf/locale/config.yaml) specifies which
languages to generate.
"""
import codecs
import logging
import os
import re
import sys
from path import Path as path
from polib import pofile
from i18n import Runner
from i18n.execute import execute
LOG = logging.getLogger(__name__)
DEVNULL = open(os.devnull, "wb")
DUPLICATE_ENTRY_PATTERN = re.compile('#-#-#-#-#.*#-#-#-#-#')
def merge(configuration, locale, target='django.po', sources=('django-partial.po',), fail_if_missing=True):
"""
For the given locale, merge the `sources` files to become the `target`
file. Note that the target file might also be one of the sources.
If fail_if_missing is true, and the files to be merged are missing,
throw an Exception, otherwise return silently.
If fail_if_missing is false, and the files to be merged are missing,
just return silently.
"""
LOG.info('Merging %s locale %s', target, locale)
locale_directory = configuration.get_messages_dir(locale)
try:
validate_files(locale_directory, sources)
except Exception: # pylint: disable=broad-except
if not fail_if_missing:
return
raise
# merged file is merged.po
merge_cmd = 'msgcat -o merged.po ' + ' '.join(sources)
execute(merge_cmd, working_directory=locale_directory)
# clean up redunancies in the metadata
merged_filename = locale_directory.joinpath('merged.po')
duplicate_entries = clean_pofile(merged_filename)
# rename merged.po -> django.po (default)
target_filename = locale_directory.joinpath(target)
os.rename(merged_filename, target_filename)
# Write duplicate messages to a file
if duplicate_entries:
dup_file = target_filename.replace(".po", ".dup")
with codecs.open(dup_file, "w", encoding="utf8") as dfile:
for (entry, translations) in duplicate_entries:
dfile.write(u"{}\n".format(entry))
dfile.write(u"Translations found were:\n\t{}\n\n".format(translations))
LOG.warning(" %s duplicates in %s, details in .dup file", len(duplicate_entries), target_filename)
def merge_files(configuration, locale, fail_if_missing=True):
"""
Merge all the files in `locale`, as specified in config.yaml.
"""
for target, sources in configuration.generate_merge.items():
merge(configuration, locale, target, sources, fail_if_missing)
def clean_pofile(pofile_path):
"""
Clean various aspect of a .po file.
Fixes:
- Removes the fuzzy flag on metadata.
- Removes occurrence line numbers so that the generated files don't
generate a lot of line noise when they're committed.
Returns a list of any duplicate entries found.
"""
# Reading in the .po file and saving it again fixes redundancies.
pomsgs = pofile(pofile_path)
# The msgcat tool marks the metadata as fuzzy, but it's ok as it is.
pomsgs.metadata_is_fuzzy = False
duplicate_entries = []
for entry in pomsgs:
# Remove line numbers
entry.occurrences = [(filename, None) for filename, __ in entry.occurrences]
# Check for merge conflicts. Pick the first, and emit a warning.
if 'fuzzy' in entry.flags:
# Remove fuzzy from flags
entry.flags = [f for f in entry.flags if f != 'fuzzy']
# Save a warning message
dup_msg = 'Multiple translations found for single string.\n\tString "{0}"\n\tPresent in files {1}'.format(
entry.msgid,
[f for (f, __) in entry.occurrences]
)
duplicate_entries.append((dup_msg, entry.msgstr))
# Pick the first entry
for msgstr in DUPLICATE_ENTRY_PATTERN.split(entry.msgstr):
# Ignore any empty strings that may result from the split call
if msgstr:
# Set the first one we find to be the right one. Strip to remove extraneous
# new lines that exist.
entry.msgstr = msgstr.strip()
# Raise error if there's new lines starting or ending the id string.
if entry.msgid.startswith('\n') or entry.msgid.endswith('\n'):
raise ValueError(
u'{} starts or ends with a new line character, which is not allowed. '
'Please fix before continuing. Source string is found in {}'.format(
entry.msgid, entry.occurrences
).encode('utf-8')
)
break
pomsgs.save()
return duplicate_entries
def validate_files(directory, files_to_merge):
"""
Asserts that the given files exist.
files_to_merge is a list of file names (no directories).
directory is the directory (a path object from path.py) in which the files should appear.
raises an Exception if any of the files are not in dir.
"""
for file_path in files_to_merge:
pathname = directory.joinpath(file_path)
if not pathname.exists():
raise Exception("I18N: Cannot generate because file not found: {0}".format(pathname))
# clean sources
clean_pofile(pathname)
class Generate(Runner):
"""Generate merged and compiled message files."""
def add_args(self):
self.parser.description = "Generate merged and compiled message files."
self.parser.add_argument("--strict", action='store_true', help="Complain about missing files.")
self.parser.add_argument("--ltr", action='store_true', help="Only generate for LTR languages.")
self.parser.add_argument("--rtl", action='store_true', help="Only generate for RTL languages.")
def run(self, args):
"""
Main entry point for script
"""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
configuration = self.configuration
if args.ltr:
langs = configuration.ltr_langs
elif args.rtl:
langs = configuration.rtl_langs
else:
langs = configuration.translated_locales
for locale in langs:
merge_files(configuration, locale, fail_if_missing=args.strict)
# Dummy text is not required. Don't raise exception if files are missing.
for locale in configuration.dummy_locales:
merge_files(configuration, locale, fail_if_missing=False)
# Merge the source locale, so we have the canonical .po files.
if configuration.source_locale not in langs:
merge_files(configuration, configuration.source_locale, fail_if_missing=args.strict)
compile_cmd = 'django-admin.py compilemessages -v{}'.format(args.verbose)
if args.verbose:
stderr = None
else:
stderr = DEVNULL
execute(compile_cmd, working_directory=configuration.root_dir, stderr=stderr)
# Check for any mapped languages and copy directories around accordingly
for source_locale, dest_locale in configuration.edx_lang_map.items():
source_dirname = configuration.get_messages_dir(source_locale)
dest_dirname = configuration.get_messages_dir(dest_locale)
LOG.info("Copying mapped locale %s to %s", source_dirname, dest_dirname)
path.rmtree_p(path(dest_dirname))
path.copytree(path(source_dirname), path(dest_dirname))
main = Generate() # pylint: disable=invalid-name
if __name__ == '__main__':
main()
| 38.678049
| 118
| 0.656829
|
4a1612d2e148834369dfbed64f45d0799d7f7ce3
| 491
|
py
|
Python
|
gallery/migrations/0001_initial.py
|
gabyxbinnaeah/Photo-Gallery
|
6155df3a70d0955a01e6f2257789076c6a85abf4
|
[
"MIT"
] | null | null | null |
gallery/migrations/0001_initial.py
|
gabyxbinnaeah/Photo-Gallery
|
6155df3a70d0955a01e6f2257789076c6a85abf4
|
[
"MIT"
] | null | null | null |
gallery/migrations/0001_initial.py
|
gabyxbinnaeah/Photo-Gallery
|
6155df3a70d0955a01e6f2257789076c6a85abf4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-04 10:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Locations',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
]
| 22.318182
| 117
| 0.578411
|
4a1613270b9760a5203acaee92592581a40d8f21
| 7,006
|
py
|
Python
|
aiida/backends/tests/cmdline/commands/test_database.py
|
borellim/aiida_core
|
eebef392c81e8b130834a92e1d7abf5e2e30b3ce
|
[
"BSD-2-Clause"
] | 1
|
2019-03-15T10:37:53.000Z
|
2019-03-15T10:37:53.000Z
|
aiida/backends/tests/cmdline/commands/test_database.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/tests/cmdline/commands/test_database.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,protected-access
"""Tests for `verdi database`."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import enum
from click.testing import CliRunner
from aiida.backends.testbase import AiidaTestCase
from aiida.cmdline.commands import cmd_database
from aiida.common.links import LinkType
from aiida.orm import Data, Node, CalculationNode, WorkflowNode
class TestVerdiDatabasaIntegrity(AiidaTestCase):
"""Tests for `verdi database integrity`."""
def setUp(self):
self.cli_runner = CliRunner()
def tearDown(self):
self.reset_database()
def test_detect_invalid_links_workflow_create(self):
"""Test `verdi database integrity detect-invalid-links` outgoing `create` from `workflow`."""
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertEqual(result.exit_code, 0)
self.assertClickResultNoException(result)
# Create an invalid link: outgoing `create` from a workflow
data = Data().store().backend_entity
workflow = WorkflowNode().store().backend_entity
data.add_incoming(workflow, link_type=LinkType.CREATE, link_label='create')
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertNotEqual(result.exit_code, 0)
self.assertIsNotNone(result.exception)
def test_detect_invalid_links_calculation_return(self):
"""Test `verdi database integrity detect-invalid-links` outgoing `return` from `calculation`."""
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertEqual(result.exit_code, 0)
self.assertClickResultNoException(result)
# Create an invalid link: outgoing `return` from a calculation
data = Data().store().backend_entity
calculation = CalculationNode().store().backend_entity
data.add_incoming(calculation, link_type=LinkType.RETURN, link_label='return')
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertNotEqual(result.exit_code, 0)
self.assertIsNotNone(result.exception)
def test_detect_invalid_links_calculation_call(self):
"""Test `verdi database integrity detect-invalid-links` outgoing `call` from `calculation`."""
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertEqual(result.exit_code, 0)
self.assertClickResultNoException(result)
# Create an invalid link: outgoing `call` from a calculation
worklow = WorkflowNode().store().backend_entity
calculation = CalculationNode().store().backend_entity
worklow.add_incoming(calculation, link_type=LinkType.CALL_WORK, link_label='call')
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertNotEqual(result.exit_code, 0)
self.assertIsNotNone(result.exception)
def test_detect_invalid_links_create_links(self):
"""Test `verdi database integrity detect-invalid-links` when there are multiple incoming `create` links."""
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertEqual(result.exit_code, 0)
self.assertClickResultNoException(result)
# Create an invalid link: two `create` links
data = Data().store().backend_entity
calculation = CalculationNode().store().backend_entity
data.add_incoming(calculation, link_type=LinkType.CREATE, link_label='create')
data.add_incoming(calculation, link_type=LinkType.CREATE, link_label='create')
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertNotEqual(result.exit_code, 0)
self.assertIsNotNone(result.exception)
def test_detect_invalid_links_call_links(self):
"""Test `verdi database integrity detect-invalid-links` when there are multiple incoming `call` links."""
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertEqual(result.exit_code, 0)
self.assertClickResultNoException(result)
# Create an invalid link: two `call` links
workflow = WorkflowNode().store().backend_entity
calculation = CalculationNode().store().backend_entity
calculation.add_incoming(workflow, link_type=LinkType.CALL_CALC, link_label='call')
calculation.add_incoming(workflow, link_type=LinkType.CALL_CALC, link_label='call')
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertNotEqual(result.exit_code, 0)
self.assertIsNotNone(result.exception)
def test_detect_invalid_links_unknown_link_type(self):
"""Test `verdi database integrity detect-invalid-links` when link type is invalid."""
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertEqual(result.exit_code, 0)
self.assertClickResultNoException(result)
class WrongLinkType(enum.Enum):
WRONG_CREATE = 'wrong_create'
# Create an invalid link: invalid link type
data = Data().store().backend_entity
calculation = CalculationNode().store().backend_entity
data.add_incoming(calculation, link_type=WrongLinkType.WRONG_CREATE, link_label='create')
result = self.cli_runner.invoke(cmd_database.detect_invalid_links, [])
self.assertNotEqual(result.exit_code, 0)
self.assertIsNotNone(result.exception)
def test_detect_invalid_nodes_unknown_node_type(self):
"""Test `verdi database integrity detect-invalid-nodes` when node type is invalid."""
result = self.cli_runner.invoke(cmd_database.detect_invalid_nodes, [])
self.assertEqual(result.exit_code, 0)
self.assertClickResultNoException(result)
# Create a node with invalid type: a base Node type string is considered invalid
# Note that there is guard against storing base Nodes for this reason, which we temporarily disable
Node._storable = True
Node().store()
Node._storable = False
result = self.cli_runner.invoke(cmd_database.detect_invalid_nodes, [])
self.assertNotEqual(result.exit_code, 0)
self.assertIsNotNone(result.exception)
| 46.092105
| 115
| 0.690979
|
4a161330fe015383fbc607159b9aeb5ee0be2b81
| 284
|
py
|
Python
|
boids/record_fixture.py
|
ioanadiana/badboids
|
3982c722a16dd59298e9374837690b3f2da708ca
|
[
"MIT"
] | null | null | null |
boids/record_fixture.py
|
ioanadiana/badboids
|
3982c722a16dd59298e9374837690b3f2da708ca
|
[
"MIT"
] | null | null | null |
boids/record_fixture.py
|
ioanadiana/badboids
|
3982c722a16dd59298e9374837690b3f2da708ca
|
[
"MIT"
] | null | null | null |
import yaml
import boids
from copy import deepcopy
before=deepcopy(boids.boids)
boids.update_boids(boids.boids)
after=boids.boids
fixture={"before":before,"after":after}
fixture_file=open("fixture_update_position.yml",'w')
fixture_file.write(yaml.dump(fixture))
fixture_file.close()
| 23.666667
| 52
| 0.809859
|
4a161355c96432da19609497891858fded684d17
| 253
|
py
|
Python
|
gabrieltool/statemachine/predicate_zoo.py
|
phtruongan/state-machine-editor
|
f0bbf4260c5821237d253870a9fb07304111f94b
|
[
"Apache-2.0"
] | null | null | null |
gabrieltool/statemachine/predicate_zoo.py
|
phtruongan/state-machine-editor
|
f0bbf4260c5821237d253870a9fb07304111f94b
|
[
"Apache-2.0"
] | null | null | null |
gabrieltool/statemachine/predicate_zoo.py
|
phtruongan/state-machine-editor
|
f0bbf4260c5821237d253870a9fb07304111f94b
|
[
"Apache-2.0"
] | 1
|
2019-07-02T12:15:56.000Z
|
2019-07-02T12:15:56.000Z
|
# -*- coding: utf-8 -*-
"""Processing Function on State Machine Inputs.
"""
def has_obj_cls(app_state, cls_name):
return (cls_name in app_state)
def always(app_state):
return True
def tpod_dnn(img, **kwargs):
print('tpod_dnn called!')
| 15.8125
| 47
| 0.675889
|
4a1613998e7c861d69287648fc13969365539cef
| 1,237
|
py
|
Python
|
tests/test_regions.py
|
i4s-pserrano/python-nomad
|
0f8dd9dfa1d448465be490f0acf9f5df96cd893f
|
[
"MIT"
] | 109
|
2016-06-06T09:18:02.000Z
|
2022-03-17T17:41:20.000Z
|
tests/test_regions.py
|
i4s-pserrano/python-nomad
|
0f8dd9dfa1d448465be490f0acf9f5df96cd893f
|
[
"MIT"
] | 104
|
2016-06-04T23:06:06.000Z
|
2021-12-08T04:49:43.000Z
|
tests/test_regions.py
|
i4s-pserrano/python-nomad
|
0f8dd9dfa1d448465be490f0acf9f5df96cd893f
|
[
"MIT"
] | 80
|
2016-06-05T00:33:23.000Z
|
2021-11-20T15:17:38.000Z
|
import pytest
import sys
# integration tests requires nomad Vagrant VM or Binary running
def test_get_regions(nomad_setup):
assert isinstance(nomad_setup.regions.get_regions(), list) == True
def test_dunder_getitem_exist(nomad_setup):
n = nomad_setup.regions["global"]
if int(sys.version[0]) == 3:
assert isinstance(n, str)
else:
assert isinstance(n, unicode)
def test_dunder_getitem_not_exist(nomad_setup):
with pytest.raises(KeyError):
j = nomad_setup.regions["us-east-1"]
def test_dunder_contain_exists(nomad_setup):
assert "global" in nomad_setup.regions
def test_dunder_contain_not_exist(nomad_setup):
assert "us-east-1" not in nomad_setup.regions
def test_dunder_str(nomad_setup):
assert isinstance(str(nomad_setup.regions), str)
def test_dunder_repr(nomad_setup):
assert isinstance(repr(nomad_setup.regions), str)
def test_dunder_getattr(nomad_setup):
with pytest.raises(AttributeError):
d = nomad_setup.regions.does_not_exist
def test_dunder_iter(nomad_setup):
assert hasattr(nomad_setup.regions, '__iter__')
for j in nomad_setup.regions:
pass
def test_dunder_len(nomad_setup):
assert len(nomad_setup.regions) >= 0
| 22.907407
| 70
| 0.743735
|
4a1613d7fd2c83d5e078983d841accb7c774d6bf
| 41,125
|
py
|
Python
|
backend/venv/lib/python3.9/site-packages/pip/_vendor/distlib/wheel.py
|
LucaCilibrasi/docker_viruclust
|
88149c17fd4b94a54397d0cb4a9daece00122c49
|
[
"Apache-2.0"
] | null | null | null |
backend/venv/lib/python3.9/site-packages/pip/_vendor/distlib/wheel.py
|
LucaCilibrasi/docker_viruclust
|
88149c17fd4b94a54397d0cb4a9daece00122c49
|
[
"Apache-2.0"
] | null | null | null |
backend/venv/lib/python3.9/site-packages/pip/_vendor/distlib/wheel.py
|
LucaCilibrasi/docker_viruclust
|
88149c17fd4b94a54397d0cb4a9daece00122c49
|
[
"Apache-2.0"
] | 1
|
2022-01-13T10:05:55.000Z
|
2022-01-13T10:05:55.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2017 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from email import message_from_file
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import (Metadata, WHEEL_METADATA_FILENAME,
LEGACY_METADATA_FILENAME)
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'): # pragma: no cover
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'): # pragma: no cover
IMP_PREFIX = 'jy'
elif sys.platform == 'cli': # pragma: no cover
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# if file_version < (1, 1):
# fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME,
# LEGACY_METADATA_FILENAME]
# else:
# fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]
result = None
for fn in fns:
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
if result:
break
except KeyError:
pass
if not result:
raise ValueError('Invalid wheel, because metadata is '
'missing: looked in %s' % ', '.join(fns))
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
records = list(records) # make a copy, as mutated
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# sort the entries by archive path. Not needed by any spec, but it
# keeps the archive listing and RECORD tidier than they would otherwise
# be. Use the number of path segments to keep directory entries together,
# and keep the dist-info stuff at the end.
def sorter(t):
ap = t[0]
n = ap.count('/')
if '.dist-info' in ap:
n += 10000
return (n, ap)
archive_paths = sorted(archive_paths, key=sorter)
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def skip_entry(self, arcname):
"""
Determine whether an archive entry should be skipped when verifying
or installing.
"""
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
# We also skip directories, as they won't be in RECORD
# either. See:
#
# https://github.com/pypa/wheel/issues/294
# https://github.com/pypa/wheel/issues/287
# https://github.com/pypa/wheel/pull/289
#
return arcname.endswith(('/', '/RECORD.jws'))
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
bytecode will try to use file-hash based invalidation (PEP-552) on
supported interpreter versions (CPython 2.7+).
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if self.skip_entry(u_arcname):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile,
hashed_invalidation=bc_hashed_invalidation)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' [%s]' % ','.join(v.flags)
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
'%s.%s' % sys.version_info[:2])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# See issue #115: some wheels have .. in their entries, but
# in the filename ... e.g. __main__..py ! So the check is
# updated to look for .. in the directory portions
p = u_arcname.split('/')
if '..' in p:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
if self.skip_entry(u_arcname):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = path.endswith(LEGACY_METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| 40.358194
| 101
| 0.50203
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.