content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
###############################################################################
# 1) This is a test case to verify that the deposit case works fine.
# 2) It also checks whether duplicate requests are processed correctly.
###############################################################################
####################################
# Client Settings
# The client configuration is a dictionary where each key is a list of
# all the clients of a particular bank. Each entry in the list is a key:value
# pair of all the configurations of that client
####################################
client_conf = { 'CITI':
[
{'index':0, 'account_no': 9999,'client_time_out': 8, 'num_retransmits':3, 'resend_to_new_head':1, 'msg_loss_freq':0},
],}
#The clients will issue the following requests in that order to the servers
client_seq = [('getBalance', ('UID1', 8888)),
('deposit', ('UID1', 8888, 100)),
('deposit', ('UID2', 8888, 100)),
('deposit', ('UID3', 8888, 100)),
('deposit', ('UID4', 8888, 100)),
('deposit', ('UID5', 8888, 100)),
('withdraw', ('UID6', 8888, 100)),
('withdraw', ('UID7', 8888, 100)),
('withdraw', ('UID8', 8888, 100)),
('withdraw', ('UID9', 8888, 100)),
('withdraw', ('UID10', 8888, 100)),
('getBalance', ('UID1', 8888))
]
#random(seed, numReq, probGetBalance, probDeposit, probWithdraw, probTransfer)
#client_prob_conf = [
#{'index':0, 'seed':450, 'numReq':10, 'prob':[('getBalance',0.10), ('deposit',0.5), ('withdraw',0.4), ('transfer',0)]}
#]
####################################
# Server Settings
# The server configuration is a dictionary where each key is a list of
# all the servers of a particular bank. Each entry in the list is a key:value
# pair of all the configurations of that server
####################################
server_conf = { 'CITI':
[
{'index':0, 'startup_delay': 0, 'rcv_lifetime':1000, 'snd_lifetime':1000, 'ip_addr': '127.0.0.1', 'port': 1001, 'heartbeat_interval':1},
{'index':1, 'startup_delay': 13, 'rcv_lifetime':1000, 'snd_lifetime':1000, 'ip_addr': '127.0.0.1', 'port': 1002, 'heartbeat_interval':1},
{'index':2, 'startup_delay': 0, 'rcv_lifetime':1000, 'snd_lifetime':1000, 'ip_addr': '127.0.0.1', 'port': 1003, 'heartbeat_interval':1}
],}
master_conf = { 'master_interval':5}
| config/config1_3_add_server_1.py | 2,428 | 1) This is a test case to verify that the deposit case works fine. 2) It also checks whether duplicate requests are processed correctly. Client Settings The client configuration is a dictionary where each key is a list of all the clients of a particular bank. Each entry in the list is a key:value pair of all the configurations of that clientThe clients will issue the following requests in that order to the serversrandom(seed, numReq, probGetBalance, probDeposit, probWithdraw, probTransfer)client_prob_conf = [{'index':0, 'seed':450, 'numReq':10, 'prob':[('getBalance',0.10), ('deposit',0.5), ('withdraw',0.4), ('transfer',0)]}] Server Settings The server configuration is a dictionary where each key is a list of all the servers of a particular bank. Each entry in the list is a key:value pair of all the configurations of that server | 849 | en | 0.853306 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from setuptools.extern.pyparsing import Literal as L # noqa
from ._compat import string_types
from ._typing import TYPE_CHECKING
from .specifiers import Specifier, InvalidSpecifier
if TYPE_CHECKING: # pragma: no cover
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
Operator = Callable[[str, str], bool]
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
# type: (Any) -> None
self.value = value
def __str__(self):
# type: () -> str
return str(self.value)
def __repr__(self):
# type: () -> str
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
# type: () -> str
raise NotImplementedError
class Variable(Node):
def serialize(self):
# type: () -> str
return str(self)
class Value(Node):
def serialize(self):
# type: () -> str
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
# type: () -> str
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
| L("os.name") # PEP-345
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
| L("python_implementation") # undocumented setuptools legacy
| L("extra") # PEP-508
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
# type: (Union[ParseResults, List[Any]]) -> List[Any]
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
# type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
} # type: Dict[str, Operator]
def _eval_op(lhs, op, rhs):
# type: (str, Op, str) -> bool
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize()) # type: Optional[Operator]
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
class Undefined(object):
pass
_undefined = Undefined()
def _get_env(environment, name):
# type: (Dict[str, str], str) -> str
value = environment.get(name, _undefined) # type: Union[str, Undefined]
if isinstance(value, Undefined):
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
# type: (List[Any], Dict[str, str]) -> bool
groups = [[]] # type: List[List[bool]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
# type: (sys._version_info) -> str
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment():
# type: () -> Dict[str, str]
if hasattr(sys, "implementation"):
# Ignoring the `sys.implementation` reference for type checking due to
# mypy not liking that the attribute doesn't exist in Python 2.7 when
# run with the `--py27` flag.
iver = format_full_version(sys.implementation.version) # type: ignore
implementation_name = sys.implementation.name # type: ignore
else:
iver = "0"
implementation_name = ""
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
# type: (str) -> None
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc : e.loc + 8]
)
raise InvalidMarker(err_str)
def __str__(self):
# type: () -> str
return _format_marker(self._markers)
def __repr__(self):
# type: () -> str
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
# type: (Optional[Dict[str, str]]) -> bool
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| env/env/lib/python3.6/site-packages/setuptools/_vendor/packaging/markers.py | 9,837 | An invalid marker was found, users should refer to PEP 508.
An invalid operation was attempted on a value that doesn't support it.
A name was attempted to be used that does not exist inside of the
environment.
Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
This file is dual licensed under the terms of the Apache License, Version 2.0, and the BSD License. See the LICENSE file in the root of this repository for complete details. noqa pragma: no cover type: (Any) -> None type: () -> str type: () -> str type: () -> str type: () -> str type: () -> str type: () -> str PEP-345 PEP-345 PEP-345 PEP-345 PEP-345 undocumented setuptools legacy PEP-508 type: (Union[ParseResults, List[Any]]) -> List[Any] type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str Sometimes we have a structure like [[...]] which is a single item list where the single item is itself it's own list. In that case we want skip the rest of this function so that we don't get extraneous () on the outside. type: Dict[str, Operator] type: (str, Op, str) -> bool type: Optional[Operator] type: (Dict[str, str], str) -> str type: Union[str, Undefined] type: (List[Any], Dict[str, str]) -> bool type: List[List[bool]] type: (sys._version_info) -> str type: () -> Dict[str, str] Ignoring the `sys.implementation` reference for type checking due to mypy not liking that the attribute doesn't exist in Python 2.7 when run with the `--py27` flag. type: ignore type: ignore type: (str) -> None type: () -> str type: () -> str type: (Optional[Dict[str, str]]) -> bool | 1,750 | en | 0.80725 |
# --------------------- Telegram.py --------------------------------- #
# Allows the integration with Telegram Bot.
# ------------------------------------------------------------------- #
from numpy.core.fromnumeric import around, std
import requests
import Elo
from Models import Models
import Helper
import pandas as pd
import numpy as np
class TelegramBot():
"""
Allows integration with the Telegram Bot.
"""
def __init__(self):
self.url = 'https://api.telegram.org/'
with open('secrets/telegram_secrets') as f:
lines = f.readlines()
self.bot_token = lines[0].strip()
self.chat_id = lines[1].strip()
def send_message(self, d:dict):
df = pd.read_csv('past_data/2021_2022/split_stats_per_game.csv')
df = Helper.add_features_to_df(df)
n = 3
train_df = pd.read_csv('past_data/average_seasons/average_NSeasons_prod.csv')
# Standardize the DataFrame
std_df, scaler = Helper.standardize_DataFrame(train_df)
clf = Models.build_RF_classifier(std_df)
text = "🏀 Tonight's Games: Home vs. Away 🏀\n\n"
for home, away in d.items():
last_N_games_away = df.loc[df['Team_away'] == away].tail(n)
last_N_games_home = df.loc[df['Team_home'] == home].tail(n)
to_predict = pd.concat(
[
last_N_games_away[Models.away_features].mean(),
last_N_games_home[Models.home_features].mean()
],
axis=0)[Models.features]
prob_home_rf, prob_away_rf = clf.predict_proba(scaler.transform(to_predict.values.reshape(1,-1)))[0]
prob_away_elo, prob_home_elo = Elo.get_probas(away, home)
if ((prob_home_rf > 0.5) and (prob_home_elo > 0.5)):
prob_home = str(around((prob_home_rf + prob_home_elo)/2, decimals=3))
odds_home = str(around(1/float(prob_home), decimals=2))
if float(prob_home) >= 0.6:
text = text + home + '(' + prob_home + ' --> ' + odds_home + ') vs. ' + away + '\n\
RF Prob.: ' + str(around(prob_home_rf, decimals=3)) + '\n\
Elo Prob.: ' + str(around(prob_home_elo, decimals=3)) + '\n\n'
if ((prob_away_rf > 0.5) and (prob_away_elo > 0.5)):
prob_away = str(around((prob_away_rf + prob_away_elo)/2, decimals=3))
odds_away = str(around(1/float(prob_away), decimals=2))
if float(prob_away) >= 0.6:
text = text + home + ' vs. ' + away + '(' + prob_away + ' --> ' + odds_away + ')' + '\n\
RF Prob.: ' + str(around(prob_away_rf, decimals=3)) + '\n\
Elo Prob.: ' + str(around(prob_away_elo, decimals=3)) + '\n\n'
query = self.url + self.bot_token + '/sendMessage?' + self.chat_id + '&text=' + text
requests.request("POST", query) | NBABet/Telegram.py | 2,992 | Allows integration with the Telegram Bot.
--------------------- Telegram.py --------------------------------- Allows the integration with Telegram Bot. ------------------------------------------------------------------- Standardize the DataFrame | 249 | en | 0.263577 |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 09:47:07 2019
@author: student
"""
import numpy as np
import os
np.set_printoptions(precision=3, linewidth=200, suppress=True)
LINE_WIDTH = 60
N_SIMULATION = 4000 # number of time steps simulated
dt = 0.002 # controller time step
q0 = np.array([ 0. , -1.0, 0.7, 0. , 0. , 0. ]) # initial configuration
# REFERENCE SINUSOIDAL TRAJECTORY
amp = np.array([0*0.02, 0.1, 0.10]) # amplitude
phi = np.array([0.0, 0.5*np.pi, 0.0]) # phase
two_pi_f = 1.4*2*np.pi*np.array([1.0, 0.5, 0.5]) # frequency (time 2 PI)
offset = np.array([0.0, 0.0, 0.0])
w_ee = 1.0 # weight of end-effector task
w_posture = 1e-3 # weight of joint posture task
w_torque_bounds = 1.0 # weight of the torque bounds
w_joint_bounds = 1.0
kp_ee = 5.0 # proportional gain of end-effector constraint
kp_posture = 1.0 # proportional gain of joint posture task
tau_max_scaling = 0.4 # scaling factor of torque bounds
v_max_scaling = 0.4
ee_frame_name = "ee_fixed_joint" # end-effector frame name
ee_task_mask = np.array([1., 1, 1, 0, 0, 0])
PRINT_N = 500 # print every PRINT_N time steps
DISPLAY_N = 20 # update robot configuration in viwewer every DISPLAY_N time steps
CAMERA_TRANSFORM = [2.582354784011841, 1.620774507522583, 1.0674564838409424, 0.2770655155181885, 0.5401807427406311, 0.6969326734542847, 0.3817386031150818]
SPHERE_RADIUS = 0.03
REF_SPHERE_RADIUS = 0.03
EE_SPHERE_COLOR = (1, 0.5, 0, 0.5)
EE_REF_SPHERE_COLOR = (1, 0, 0, 0.5)
from example_robot_data.robots_loader import getModelPath
urdf = "/ur_description/urdf/ur5_robot.urdf"
path = getModelPath(urdf)
urdf = path+urdf
| exercizes/ur5_conf.py | 1,860 | Created on Thu Apr 18 09:47:07 2019
@author: student
-*- coding: utf-8 -*- number of time steps simulated controller time step initial configuration REFERENCE SINUSOIDAL TRAJECTORY amplitude phase frequency (time 2 PI) weight of end-effector task weight of joint posture task weight of the torque bounds proportional gain of end-effector constraint proportional gain of joint posture task scaling factor of torque bounds end-effector frame name print every PRINT_N time steps update robot configuration in viwewer every DISPLAY_N time steps | 543 | en | 0.683743 |
"""
Contains Catalyst DAO implementations.
"""
from django.conf import settings
from restclients.mock_http import MockHTTP
from restclients.dao_implementation import get_timeout
from restclients.dao_implementation.live import get_con_pool, get_live_url
from restclients.dao_implementation.mock import get_mockdata_url
import datetime
import hashlib
import pytz
class File(object):
"""
The File DAO implementation returns generally static content. Use this
DAO with this configuration:
RESTCLIENTS_CANVAS_DAO_CLASS =
'restclients.dao_implementation.catalyst.File'
"""
def getURL(self, url, headers):
return get_mockdata_url("catalyst", "file", url, headers)
class Live(object):
"""
This DAO provides real data. It requires further configuration, e.g.
For cert auth:
RESTCLIENTS_CATALYST_CERT_FILE='/path/to/an/authorized/cert.cert',
RESTCLIENTS_CATALYST_KEY_FILE='/path/to/the/certs_key.key',
SolAuth Authentication (personal only):
RESTCLIENTS_CATALYST_SOL_AUTH_PUBLIC_KEY='public_key'
RESTCLIENTS_CATALYST_SOL_AUTH_PRIVATE_KEY='12345'
SolAuth tokens are available at https://catalyst.uw.edu/rest_user
For an alternate host:
RESTCLIENTS_CATALYST_HOST = 'https://my-dev-server/'
"""
pool = None
def getURL(self, url, headers):
host = settings.RESTCLIENTS_CATALYST_HOST
if hasattr(settings, "RESTCLIENTS_CATALYST_CERT_FILE"):
Live.pool = get_con_pool(host,
settings.RESTCLIENTS_CATALYST_KEY_FILE,
settings.RESTCLIENTS_CATALYST_CERT_FILE,
socket_timeout=get_timeout("catalyst"))
else:
Live.pool = get_con_pool(host,
socket_timeout=get_timeout("catalyst"))
if hasattr(settings, "RESTCLIENTS_CATALYST_SOL_AUTH_PRIVATE_KEY"):
# Use js_rest instead of rest, to avoid certificate issues
url = url.replace("/rest/", "/js_rest/")
now_with_tz = datetime.datetime.now(pytz.utc).strftime(
"%a %b %d %H:%M:%S %Z %Y")
header_base = "%s\nGET\n%s\n%s\n" % (
settings.RESTCLIENTS_CATALYST_SOL_AUTH_PRIVATE_KEY,
url,
now_with_tz
)
public_key = settings.RESTCLIENTS_CATALYST_SOL_AUTH_PUBLIC_KEY
hashed = hashlib.sha1(header_base).hexdigest()
headers["Authorization"] = "SolAuth %s:%s" % (public_key, hashed)
headers["Date"] = now_with_tz
return get_live_url(Live.pool, "GET", host, url, headers=headers)
| restclients/dao_implementation/catalyst.py | 2,679 | The File DAO implementation returns generally static content. Use this
DAO with this configuration:
RESTCLIENTS_CANVAS_DAO_CLASS =
'restclients.dao_implementation.catalyst.File'
This DAO provides real data. It requires further configuration, e.g.
For cert auth:
RESTCLIENTS_CATALYST_CERT_FILE='/path/to/an/authorized/cert.cert',
RESTCLIENTS_CATALYST_KEY_FILE='/path/to/the/certs_key.key',
SolAuth Authentication (personal only):
RESTCLIENTS_CATALYST_SOL_AUTH_PUBLIC_KEY='public_key'
RESTCLIENTS_CATALYST_SOL_AUTH_PRIVATE_KEY='12345'
SolAuth tokens are available at https://catalyst.uw.edu/rest_user
For an alternate host:
RESTCLIENTS_CATALYST_HOST = 'https://my-dev-server/'
Contains Catalyst DAO implementations.
Use js_rest instead of rest, to avoid certificate issues | 779 | en | 0.61775 |
import hashlib
import math
from http import HTTPStatus
from quart import jsonify, url_for, request
from lnurl import LnurlPayResponse, LnurlPayActionResponse, LnurlErrorResponse # type: ignore
from lnbits.core.services import create_invoice
from lnbits.utils.exchange_rates import get_fiat_rate_satoshis
from . import lnurlp_ext
from .crud import increment_pay_link
@lnurlp_ext.route("/api/v1/lnurl/<link_id>", methods=["GET"])
async def api_lnurl_response(link_id):
link = await increment_pay_link(link_id, served_meta=1)
if not link:
return (
jsonify({"status": "ERROR", "reason": "LNURL-pay not found."}),
HTTPStatus.OK,
)
rate = await get_fiat_rate_satoshis(link.currency) if link.currency else 1
resp = LnurlPayResponse(
callback=url_for("lnurlp.api_lnurl_callback", link_id=link.id, _external=True),
min_sendable=math.ceil(link.min * rate) * 1000,
max_sendable=round(link.max * rate) * 1000,
metadata=link.lnurlpay_metadata,
)
params = resp.dict()
if link.comment_chars > 0:
params["commentAllowed"] = link.comment_chars
return jsonify(params), HTTPStatus.OK
@lnurlp_ext.route("/api/v1/lnurl/cb/<link_id>", methods=["GET"])
async def api_lnurl_callback(link_id):
link = await increment_pay_link(link_id, served_pr=1)
if not link:
return (
jsonify({"status": "ERROR", "reason": "LNURL-pay not found."}),
HTTPStatus.OK,
)
min, max = link.min, link.max
rate = await get_fiat_rate_satoshis(link.currency) if link.currency else 1
if link.currency:
# allow some fluctuation (as the fiat price may have changed between the calls)
min = rate * 995 * link.min
max = rate * 1010 * link.max
else:
min = link.min * 1000
max = link.max * 1000
amount_received = int(request.args.get("amount"))
if amount_received < min:
return (
jsonify(
LnurlErrorResponse(
reason=f"Amount {amount_received} is smaller than minimum {min}."
).dict()
),
HTTPStatus.OK,
)
elif amount_received > max:
return (
jsonify(
LnurlErrorResponse(
reason=f"Amount {amount_received} is greater than maximum {max}."
).dict()
),
HTTPStatus.OK,
)
comment = request.args.get("comment")
if len(comment or "") > link.comment_chars:
return (
jsonify(
LnurlErrorResponse(
reason=f"Got a comment with {len(comment)} characters, but can only accept {link.comment_chars}"
).dict()
),
HTTPStatus.OK,
)
payment_hash, payment_request = await create_invoice(
wallet_id=link.wallet,
amount=int(amount_received / 1000),
memo=link.description,
description_hash=hashlib.sha256(
link.lnurlpay_metadata.encode("utf-8")
).digest(),
extra={"tag": "lnurlp", "link": link.id, "comment": comment},
)
resp = LnurlPayActionResponse(
pr=payment_request,
success_action=link.success_action(payment_hash),
routes=[],
)
return jsonify(resp.dict()), HTTPStatus.OK
| lnbits/extensions/lnurlp/lnurl.py | 3,361 | type: ignore allow some fluctuation (as the fiat price may have changed between the calls) | 90 | en | 0.972904 |
# AMZ-Driverless
# Copyright (c) 2019 Authors:
# - Huub Hendrikx <hhendrik@ethz.ch>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import os
import time
import yaml
from rbb_tools.common.shell import CommandGroup
from rbb_tools.extraction.extractor import AbstractExtractionPlugin
from rbb_tools.extraction.registry import Product
class RvizRecorderPlugin(AbstractExtractionPlugin):
def __init__(self, configuration, logger, resource_directory):
super(RvizRecorderPlugin, self).__init__(configuration, logger, resource_directory)
def check_topics(self, topics):
return True
def get_plugin_meta_data(self):
return {
'name': 'RViz Recorder',
'version': '0.0.1'
}
def get_default_configuration(self):
return {
'height': 1080,
'width': 1920,
'margin_left': -20,
'margin_right': -20,
'rewrite_rviz_file': True,
'headless': 'auto',
'color_depth': 24,
'title': 'RViz Recording'
}
def _rewrite_rviz_file(self, rviz_file, tmp_dir):
data = None
with open(rviz_file, 'r') as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
self._logger.warning("Cannot rewrite malformed rviz file: %s" % str(exc))
return rviz_file
data['Window Geometry']['Height'] = self.config('height')
data['Window Geometry']['Width'] = self.config('width') - \
self.config('margin_left') - self.config('margin_right')
data['Window Geometry']['Y'] = 0
data['Window Geometry']['X'] = self.config('margin_left')
data['Window Geometry']['Hide Left Dock'] = True
data['Window Geometry']['Hide Right Dock'] = True
data['Window Geometry']['Selection']['collapsed'] = True
data['Window Geometry']['Time']['collapsed'] = False
data['Window Geometry']['Tool Properties']['collapsed'] = True
data['Window Geometry']['Views']['collapsed'] = True
data['Window Geometry']['Displays']['collapsed'] = True
rewritten_rviz_file = tmp_dir + "/rviz.rviz"
with open(rewritten_rviz_file, 'w') as outfile:
yaml.safe_dump(data, outfile, default_flow_style=False)
return rewritten_rviz_file
def extract(self, bag_file, topics, tmp_dir, output_dir, product_factory):
video_file = output_dir + "/output.mp4"
xdisplay_id = 99
logo_file = self._resource_directory + "/" + self._configuration['logo']
font_file = self._resource_directory + "/" + self._configuration['font']
rviz_file = self._resource_directory + "/" + self._configuration['rviz_file']
if self.config("rewrite_rviz_file"):
rviz_file = self._rewrite_rviz_file(rviz_file, tmp_dir)
# TODO: Output a temporary rviz file that rewrites the configuration to collapse all panels and spawn window at 0,0
if os.path.exists(video_file):
return False
# TODO: Include fingerprints
name = os.path.basename(bag_file)
rviz_name = os.path.basename(self._configuration['rviz_file'])
text = name + " | " + rviz_name + " | " + str(datetime.datetime.today())
self._logger.info("Video id: " + text)
xephyr_size = "%dx%d" % (self.config('width'), self.config('height'))
xephyr_cmd = "Xephyr -ac -nocursor -screen %s -br -reset -terminate :%d" % (xephyr_size, xdisplay_id)
xvfb_size = "%dx%dx%d" % (self.config('width'), self.config('height'), self.config('color_depth'))
xvfb_cmd = "Xvfb :%d -screen 0 %s" % (xdisplay_id, xvfb_size)
roscore_cmd = "roscore"
rosbag_player_cmd = "rosbag play --clock --hz 1000 -q %s" % (bag_file)
rviz_splash_option = ""
if self.config('splash_screen'):
rviz_splash_option = " -s %s" % (self._resource_directory + "/" + self.config('splash_screen'))
if os.environ.get('DISPLAY') is not None:
# We assume hardware acceleration is available and run through VGL
rviz_vgl_cmd = "vglrun -- rviz -d %s%s" % (rviz_file, rviz_splash_option) #DISPAY
else:
# We assume hardware acceleration is NOT available and run directly on Xephyr/Xvfb
# software acceleration (mesa with llvm) should be isntalled
rviz_vgl_cmd = "rviz -d %s%s" % (rviz_file, rviz_splash_option) #DISPAY
ffmpeg_grab_size = "%dx%d" % (self.config('width'), self.config('height'))
ffmpeg_cmd = "ffmpeg -loglevel warning -video_size %s -framerate 25 -f x11grab -i :%d.0+0,0" \
" -i %s -filter_complex \"overlay=%d:%d,drawtext=text=\\'%s\\':x=%d:y=%d:fontfile=%s:fontsize=16:fontcolor=white:shadowcolor=black:shadowx=2:shadowy=2\" " \
"-movflags +faststart %s" % (ffmpeg_grab_size, xdisplay_id, logo_file, self._configuration['logo_x'],self._configuration['logo_y'], text, self._configuration['text_x'], self._configuration['text_y'], font_file, output_dir + "/output.mp4")
self._logger.info(ffmpeg_cmd)
##################
# Run the commands
##################
cmd_group = CommandGroup()
try:
roscore = cmd_group.Command(roscore_cmd)
if os.environ.get('RBB_HEADLESS') == "force":
# Force headless in server environments
headless = True
else:
headless = self.config('headless')
if headless == 'auto':
headless = os.environ.get('RBB_HEADLESS') == 1
if headless:
print ("Running in headless mode! (Xvfb)")
framebuffer = cmd_group.Command(xvfb_cmd)
else:
framebuffer = cmd_group.Command(xephyr_cmd)
rviz = cmd_group.Command(rviz_vgl_cmd, {'DISPLAY': ":%d.0" % (xdisplay_id)})
ffmpeg = cmd_group.Command(ffmpeg_cmd)
rosbag_player = cmd_group.Command(rosbag_player_cmd)
move_mouse = cmd_group.Command("xdotool mousemove %d %d" % (self.config('width'), self.config('height')), {'DISPLAY': ":%d.0" % (xdisplay_id)})
rosparam_sim_time = cmd_group.Command("rosparam set use_sim_time true")
roscore.run()
framebuffer.run()
# Make sure they don't directly crash
time.sleep(1)
if roscore.is_running() and framebuffer.is_running():
self._logger.info("Roscore&Xephyr up!")
rviz.run()
ffmpeg.run()
time.sleep(0.5)
if rviz.is_running() and ffmpeg.is_running():
move_mouse.run()
rosparam_sim_time.run()
self._logger.info("Rviz&ffmpeg up!")
rosbag_player.run()
while rosbag_player.is_running() and rviz.is_running():
time.sleep(1)
ffmpeg.send_sigint()
ffmpeg.join()
else:
self._logger.failure("Couldnt start roscore or xephyr")
finally:
cmd_group.ensure_terminated()
# Register the product
product = product_factory.new() # type: Product
product.set_type("video")
product.set_title(self.config('title'))
product.set_topics(topics)
product.add_file("video.mp4", "output.mp4", mime="video/mp4")
return [product]
plugin = RvizRecorderPlugin
| rbb_tools/src/rbb_tools/plugins/rviz_recorder.py | 8,669 | AMZ-Driverless Copyright (c) 2019 Authors: - Huub Hendrikx <hhendrik@ethz.ch> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. TODO: Output a temporary rviz file that rewrites the configuration to collapse all panels and spawn window at 0,0 TODO: Include fingerprints We assume hardware acceleration is available and run through VGLDISPAY We assume hardware acceleration is NOT available and run directly on Xephyr/Xvfb software acceleration (mesa with llvm) should be isntalledDISPAY Run the commands Force headless in server environments Make sure they don't directly crash Register the product type: Product | 1,585 | en | 0.845342 |
"""
Min Stack
-----
A LIFO abstract data type that serves as a collection of elements.
Supports retrieving the min from the stack in constant time.
"""
class MinStack(object):
def __init__(self):
"""
Attributes:
data (arr): data stored in the stack
minimum (arr): minimum values of data stored
"""
self.data = []
self.minimum = []
def empty(self):
"""
Returns whether or not the stack is empty.
Time Complexity: O(1)
Returns:
bool: whether or not the stack is empty
"""
return len(self.data) == 0
def push(self, x):
"""
Pushes an element onto the stack.
Time Complexity: O(1)
Args:
x: item to be added
"""
self.data.append(x)
if not self.minimum or x <= self.minimum[-1]:
self.minimum.append(x)
def pop(self):
"""
Pops an element off the stack.
Time Complexity: O(1)
Returns:
any: the last element on the stack
"""
x = self.data.pop()
if x == self.minimum[-1]:
self.minimum.pop()
return x
def peek(self):
"""
Returns the last item on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.data[-1]
def peek_min(self):
"""
Returns the min on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.minimum[-1]
| libalgs-py/data_structures/min_stack.py | 1,261 | Attributes:
data (arr): data stored in the stack
minimum (arr): minimum values of data stored
Returns whether or not the stack is empty.
Time Complexity: O(1)
Returns:
bool: whether or not the stack is empty
Returns the last item on the stack but doesn't remove it.
Time Complexity: O(1)
Returns the min on the stack but doesn't remove it.
Time Complexity: O(1)
Pops an element off the stack.
Time Complexity: O(1)
Returns:
any: the last element on the stack
Pushes an element onto the stack.
Time Complexity: O(1)
Args:
x: item to be added
Min Stack
-----
A LIFO abstract data type that serves as a collection of elements.
Supports retrieving the min from the stack in constant time. | 733 | en | 0.804399 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import KeyVaultManagementClientConfiguration
from .operations import VaultsOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import PrivateLinkResourcesOperations
from .operations import ManagedHsmsOperations
from .operations import MHSMPrivateEndpointConnectionsOperations
from .operations import MHSMPrivateLinkResourcesOperations
from .operations import Operations
from . import models
class KeyVaultManagementClient(object):
"""The Azure management API provides a RESTful set of web services that interact with Azure Key Vault.
:ivar vaults: VaultsOperations operations
:vartype vaults: azure.mgmt.keyvault.v2021_04_01_preview.operations.VaultsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.keyvault.v2021_04_01_preview.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.keyvault.v2021_04_01_preview.operations.PrivateLinkResourcesOperations
:ivar managed_hsms: ManagedHsmsOperations operations
:vartype managed_hsms: azure.mgmt.keyvault.v2021_04_01_preview.operations.ManagedHsmsOperations
:ivar mhsm_private_endpoint_connections: MHSMPrivateEndpointConnectionsOperations operations
:vartype mhsm_private_endpoint_connections: azure.mgmt.keyvault.v2021_04_01_preview.operations.MHSMPrivateEndpointConnectionsOperations
:ivar mhsm_private_link_resources: MHSMPrivateLinkResourcesOperations operations
:vartype mhsm_private_link_resources: azure.mgmt.keyvault.v2021_04_01_preview.operations.MHSMPrivateLinkResourcesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.keyvault.v2021_04_01_preview.operations.Operations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = KeyVaultManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.vaults = VaultsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.managed_hsms = ManagedHsmsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.mhsm_private_endpoint_connections = MHSMPrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.mhsm_private_link_resources = MHSMPrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> KeyVaultManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_04_01_preview/_key_vault_management_client.py | 6,327 | The Azure management API provides a RESTful set of web services that interact with Azure Key Vault.
:ivar vaults: VaultsOperations operations
:vartype vaults: azure.mgmt.keyvault.v2021_04_01_preview.operations.VaultsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.keyvault.v2021_04_01_preview.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.keyvault.v2021_04_01_preview.operations.PrivateLinkResourcesOperations
:ivar managed_hsms: ManagedHsmsOperations operations
:vartype managed_hsms: azure.mgmt.keyvault.v2021_04_01_preview.operations.ManagedHsmsOperations
:ivar mhsm_private_endpoint_connections: MHSMPrivateEndpointConnectionsOperations operations
:vartype mhsm_private_endpoint_connections: azure.mgmt.keyvault.v2021_04_01_preview.operations.MHSMPrivateEndpointConnectionsOperations
:ivar mhsm_private_link_resources: MHSMPrivateLinkResourcesOperations operations
:vartype mhsm_private_link_resources: azure.mgmt.keyvault.v2021_04_01_preview.operations.MHSMPrivateLinkResourcesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.keyvault.v2021_04_01_preview.operations.Operations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- pylint: disable=unused-import,ungrouped-imports type: "TokenCredential" type: str type: Optional[str] type: Any type: (...) -> None type: (HttpRequest, Any) -> HttpResponse type: () -> None type: () -> KeyVaultManagementClient type: (Any) -> None | 2,944 | en | 0.611251 |
import os
import time
import requests
from bs4 import BeautifulSoup
import datetime
from twilio.rest import Client
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import math
class Data():
def __init__(self,link): # Automatically stores the data from parsed link as the object's attribute
self.data = self.update(link)
self.prev = None
def update(self,link): # Parses the site's HTML code
result = requests.get(link)
soup = BeautifulSoup(result.content,'lxml')
return soup
class Hawaii(Data):
def __init__(self,link): # Automatically stores the data from parsed link as the object's attribute (Same constructor as Class Data)
super().__init__(link)
def do_sms(self,numbers): # Creates SMS notification with the COVID data for each island
for number in numbers:
smsNotification.notify(number,self.get_data())
smsNotification.save_number(numbers)
def get_data(self): # Returns the data from today:
# Gathering all the data
today = self.get_dataframe()
order = ["Total cases","Hawai’i","Oahu","Kaua’i","Maui","Pending","Residents diagnosed outside of Hawai‘i","Required Hospitalization","Hawaii deaths","Lanai","Molokai"]
data = today.to_numpy()[0]
message = ""
for index in range(len(order)):
diff = int(data[index+1]) - int(self.prev[index])
if diff >= 0:
diff = "+" + str(diff)
else:
diff = "-" + str(diff)
line = order[index] + ": " + str(data[index+1]) + " (" + diff + ") \n"
message = message + line
return message
def get_dataframe(self): # Returns the data structure for today's data
date = self.get_date()
names = self.data.find_all('span',{'class': 'label'})
values = self.data.find_all('span',{'class': 'value'})
df = pd.DataFrame()
# Formats the names and values
for i in range(len(names)):
names[i] = names[i].text.replace(":","")
values[i] = int(values[i].text.replace("§","").replace("†","").replace("‡","").replace("*","").split(" ")[0])
# Orders the names and values in the order of the .csv
order = ["Total cases","Hawai’i","Oahu","Kaua’i","Maui","Pending","Residents diagnosed outside of Hawai‘i","Required Hospitalization","Hawaii deaths","Lanai","Molokai"]
namesOrdered = ["","","","","","","","","","",""]
valuesOrdered = ["","","","","","","","","","",""]
for i in range(len(order)):
for j in range(len(names)):
if order[i] == names[j]:
namesOrdered[i] = names[j]
valuesOrdered[i] = values[j]
dfNew = pd.DataFrame({
"Date": date,
namesOrdered[0]: valuesOrdered[0],
namesOrdered[1]: valuesOrdered[1],
namesOrdered[2]: valuesOrdered[2],
namesOrdered[3]: valuesOrdered[3],
namesOrdered[4]: valuesOrdered[4],
namesOrdered[5]: valuesOrdered[5],
namesOrdered[6]: valuesOrdered[6],
namesOrdered[7]: valuesOrdered[7],
namesOrdered[8]: valuesOrdered[8],
namesOrdered[9]: valuesOrdered[9],
namesOrdered[10]: valuesOrdered[10],
}, index = [0])
return dfNew
def get_date(self): # Returns the update date of the data in the datetime format
# Formatting
date = self.data.find_all('dd',{'class': 'small'})
date = date[0].text[33:]
date = datetime.datetime.strptime(date, '%B %d, %Y')
date = str(date.date())
return date
def do_update(self): # Does an update if the history.txt is not updated
# If the history.txt is not updated relevant to the available data, the update proceeds
if self.check_update() == False:
# Checks if the data on the website is updated; Loops the program until the data is updated
if self.get_date() != str(datetime.date.today()):
print("Data not updated. Sleeping for 1 minute.\n")
time.sleep(60)
print("Rechecking.\n")
self.do_update()
return
dfOld = pd.read_csv('data.csv', index_col = False)
dfOld = dfOld.append(self.get_dataframe())
dfOld.to_csv('data.csv', index=False)
file = "phoneNumbers.txt"
numbers = open(file,"r")
# Checks if there are any recently saved numbers
if(os.stat(file).st_size) == 0:
print("No recent phone numbers found. Please enter your phone numbers including area code and no dashes into the phoneNumbers.txt file, with each phone number tabbed.")
return
else:
paste=[]
for line in numbers:
paste.append(line.replace("\n",""))
self.do_sms(paste)
def check_update(self): # Checks when the data.csv was last updated; Returns True if already updated today; Returns False if not
file = "data.csv"
history = open(file,'r')
# Checks if the file is empty ahead of time to prevent crash and formats the document if it is empty
if(os.stat(file).st_size) == 0:
File.append_file(file, "Date,Total cases,Hawai’i,Oahu,Kaua’i,Maui,Pending,Residents diagnosed outside of Hawai‘i,Required Hospitalization,Hawaii deaths,Lanai,Molokai")
return False
# Finds the last line in the .txt
for line in history:
pass
lastLine = line
history.close()
# Checks if the last updated date was today
if self.get_date() in lastLine:
return True
# Formats the data from .csv to a Python list
lastLine = lastLine.split(",")
lastLine.pop(0)
self.prev = lastLine
return False
class smsNotification:
@staticmethod
def notify(toNumber,message): # Creates SMS notifications; (IMPORTANT) List your Twilio account sid, auth token, and phone number in the token.txt file by tabbing each token
f = open('token.txt','r')
accountSid, authToken, fromNumber = f.readlines()
accountSid = accountSid.replace("\n","")
authToken = authToken.replace("\n","")
fromNumber = fromNumber.replace("\n","")
client = Client(accountSid, authToken)
client.messages.create(to=toNumber,from_=fromNumber,body=message)
print("SMS sent")
@staticmethod
def save_number(paste): # Saves the recently used phone number on file
numbers = open("phoneNumbers.txt","w")
for number in paste:
numbers.write(str(number) + "\n")
class Graph:
@staticmethod
def display_graph(islands,scope=[],graphType='Cases'): # Displays data in a graph format where islands is a list containing the statistics that should be included, the scope is the time period, and the graph type differentiates between cases vs change in cases
if graphType == 'Cases': # For graphing cases
df = pd.read_csv('data.csv', index_col = False)
else: # For graphing the change in cases
df = App.get_df_change()
if scope[0] == 0: # Adjust the scope to not include the first entry since there is no change observerd on that day
scope[0] = 1
plt.figure(figsize=(8,8))
min_ = -1
max_ = -1
for island in islands: # Plots data for each island on the same plot
plt.plot(df["Date"], df[island], label = island)
if graphType == 'Cases':
if scope != []:
if min_ == - 1 and max_ == -1:
min_ = df[island].get(scope[0])
max_ = df[island].get(scope[1])
else:
minNow = df[island].get(scope[0])
maxNow = df[island].get(scope[1])
if minNow < min_:
min_ = minNow
elif maxNow > max_:
max_ = maxNow
plt.ylim(min_,max_)
title = "COVID Cases vs Time"
if scope != []: # Scales the interval to the scope
intervals = (scope[1]-scope[0])/4
if intervals < 1:
intervals = 1
plt.gca().xaxis.set_major_locator(matplotlib.dates.DayLocator(interval=math.floor(intervals)))
plt.xlim(scope[0],scope[1])
title = title + " (" + df["Date"].get(scope[0]) + " to " + df["Date"].get(scope[1]) + ")" # Title formatting
else:
plt.gca().xaxis.set_major_locator(matplotlib.dates.DayLocator(interval=30)) # Automatically sets the scale if there is no scale
plt.xlabel("Date")
if graphType == 'Cases':
plt.ylabel("# of Cases")
else:
plt.ylabel("Change in Cases")
title = title.replace("COVID Cases","Change in COVID Cases")
plt.title(title)
plt.grid()
plt.legend()
plt.show()
class File:
@staticmethod # Appends the passed file with the passed text
def append_file(file,text):
history = open(file,'a')
history.write(text)
history.close()
class App:
@staticmethod
def get_df(): # Returns the dataframe
return pd.read_csv('data.csv', index_col = False)
def format_date(date): # Receives the data and returns the index of the date
df = pd.read_csv('data.csv', index_col = False)
for x in range(len(df["Date"])):
if df["Date"][x] == date:
return x
def get_last_index(): # Returns the index of the last element in the dataframe
df = pd.read_csv('data.csv', index_col = False)
for index in range(len(df["Date"])):
pass
return index
def get_df_change(): # Returns the change over time dataframe
df = pd.read_csv('data.csv', index_col = False)
dates = df['Date']
dates = pd.DataFrame(dates) # Save datafrmae
df = df.drop(columns=['Date']) # Must drop the dates since the dataframe diff() function will produce an unideal dataframe otherwise
dfDiff = df.diff()
dfDiff = dates.join(dfDiff) # Rejoin dataframes
dfDiff = dfDiff.iloc[1:] # Get rid of bad data from first row
return dfDiff
if __name__ == "__main__":
data=Hawaii("https://health.hawaii.gov/coronavirusdisease2019/")
data.do_update()
lastIndex = App.get_last_index()
firstIndex = lastIndex - 6 # The scope is automatically set to the past 7 days
Graph.display_graph(["Total cases"],[firstIndex,lastIndex],"Change") # Displays total cases over the past seven days
| app.py | 11,219 | Automatically stores the data from parsed link as the object's attribute Parses the site's HTML code Automatically stores the data from parsed link as the object's attribute (Same constructor as Class Data) Creates SMS notification with the COVID data for each island Returns the data from today: Gathering all the data Returns the data structure for today's data Formats the names and values Orders the names and values in the order of the .csv Returns the update date of the data in the datetime format Formatting Does an update if the history.txt is not updated If the history.txt is not updated relevant to the available data, the update proceeds Checks if the data on the website is updated; Loops the program until the data is updated Checks if there are any recently saved numbers Checks when the data.csv was last updated; Returns True if already updated today; Returns False if not Checks if the file is empty ahead of time to prevent crash and formats the document if it is empty Finds the last line in the .txt Checks if the last updated date was today Formats the data from .csv to a Python list Creates SMS notifications; (IMPORTANT) List your Twilio account sid, auth token, and phone number in the token.txt file by tabbing each token Saves the recently used phone number on file Displays data in a graph format where islands is a list containing the statistics that should be included, the scope is the time period, and the graph type differentiates between cases vs change in cases For graphing cases For graphing the change in cases Adjust the scope to not include the first entry since there is no change observerd on that day Plots data for each island on the same plot Scales the interval to the scope Title formatting Automatically sets the scale if there is no scale Appends the passed file with the passed text Returns the dataframe Receives the data and returns the index of the date Returns the index of the last element in the dataframe Returns the change over time dataframe Save datafrmae Must drop the dates since the dataframe diff() function will produce an unideal dataframe otherwise Rejoin dataframes Get rid of bad data from first row The scope is automatically set to the past 7 days Displays total cases over the past seven days | 2,268 | en | 0.77367 |
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.beerchain import *
from test_framework.beerchainconfig import *
from test_framework.blocktools import *
import time
NUM_OUTPUTS = 1000
class BeerchainGasLimit(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-staking=1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.node = self.nodes[0]
self.node.generate(100+COINBASE_MATURITY)
tx = CTransaction()
"""
contract Test {
function() {
while(msg.gas > 0) {}
}
}
"""
contract_address = self.node.createcontract("60606040523415600e57600080fd5b5b605080601c6000396000f30060606040525b3415600f57600080fd5b60225b5b60005a1115601f576013565b5b565b0000a165627a7a72305820efcd4d663aac9e7a94b44502e712d9eb63cd640efe3aebf9e79210ab63ea6ff60029")['address']
self.node.generate(1)
# Create a tx with 2000 outputs each with a gas stipend of 5*10^8 calling the contract.
tx = CTransaction()
tx.vin = [make_vin(self.node, NUM_OUTPUTS*5*COIN)]
tx.vout = [CTxOut(0, CScript([b"\x04", int(5*COIN), BEERCHAIN_MIN_GAS_PRICE, b"\x00", bytes.fromhex(contract_address), OP_CALL])) for i in range(NUM_OUTPUTS)]
tx.rehash()
signed_tx_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
# We may want to reject transactions which exceed the gas limit outright.
try:
self.node.sendrawtransaction(signed_tx_hex)
except:
pass
print("Tx size", len(signed_tx_hex))
t = time.time()
self.node.generate(1)
execution_time = time.time() - t
print('execution time:', execution_time, 'seconds')
assert(execution_time < 60)
if __name__ == '__main__':
BeerchainGasLimit().main()
| test/functional/beerchain_gas_limit.py | 2,179 | !/usr/bin/env python3 Create a tx with 2000 outputs each with a gas stipend of 5*10^8 calling the contract. We may want to reject transactions which exceed the gas limit outright. | 179 | en | 0.794427 |
import QueueLinkedList as queue
"""
n1
/\
n2 n3
/\ /\
n4 n5 n6 n7
"""
class BinaryTree:
def __init__(self, size) -> None:
self.customList = size * [None]
self.lastUsedIndex = 0
self.maxSize = size
def inserNode(self, value):
if self.lastUsedIndex + 1 == self.maxSize:
return "Full"
self.customList[self.lastUsedIndex + 1] = value
self.lastUsedIndex += 1
return "Inserted"
def searchNode(self, value):
if value in self.customList:
return "Success"
return "Not found"
def preOrderTraversal(self, index):
# root -> left -> right
if index > self.lastUsedIndex:
return
print(self.customList[index])
self.preOrderTraversal(index * 2)
self.preOrderTraversal(index * 2 + 1)
def inOrderTraversal(self, index):
# left -> root -> right
if index > self.lastUsedIndex:
return
self.inOrderTraversal(index * 2)
print(self.customList[index])
self.inOrderTraversal(index * 2 + 1)
def postOrderTraversal(self, index):
# left -> right -> root
if index > self.lastUsedIndex:
return
self.postOrderTraversal(index * 2)
self.postOrderTraversal(index * 2 + 1)
print(self.customList[index])
def levelOrderTraversal(self, index):
for i in range(index, self.lastUsedIndex + 1):
print(self.customList[i])
def deleteNode(self, value):
if self.lastUsedIndex == 0:
return "Nothing to delete"
for i in range(1, self.lastUsedIndex + 1):
if self.customList[i] == value:
self.customList[i] = self.customList[self.lastUsedIndex]
self.customList[self.lastUsedIndex] = None
self.lastUsedIndex -= 1
return "Deleted"
def deleteTree(self):
self.customList = None
return "Deleted"
newBT = BinaryTree(8)
print(newBT.inserNode("N1"))
print(newBT.inserNode("N2"))
print(newBT.inserNode("N3"))
print(newBT.inserNode("N4"))
print(newBT.inserNode("N5"))
print(newBT.inserNode("N6"))
print(newBT.inserNode("N7"))
print(newBT.inserNode("N8"))
print(newBT.searchNode("N1"))
print(newBT.searchNode("N8"))
print("preOrderTraversal")
newBT.preOrderTraversal(1)
print("inOrderTraversal")
newBT.inOrderTraversal(1)
print("postOrderTraversal")
newBT.postOrderTraversal(1)
print("levelOrderTraversal")
newBT.levelOrderTraversal(1)
print(newBT.deleteNode("N4"))
newBT.levelOrderTraversal(1)
print(newBT.deleteTree())
| Trees/BinaryTreePL.py | 2,622 | root -> left -> right left -> root -> right left -> right -> root | 65 | en | 0.469026 |
"""hackernews URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| hackernews/hackernews/urls.py | 773 | hackernews URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) | 626 | en | 0.614812 |
import numpy as np
import cv2 as cv
import math
from server.cv_utils import *
def filterGaussian(img,size=(5,5),stdv=0):
"""Summary of filterGaussian
This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth
the image to lower the sensitivity to noise. (The smaller the size the less visible the blur)
To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will
set each pixel value equal to the weighted average of its neighboor pixels
The Gaussian distribution:
Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2))
i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1)
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if not isinstance(size,tuple):
raise ValueError('filterGaussian: Size for Gaussian filter not tuple')
return cv.GaussianBlur(img,size,stdv)
def filterCanny(img,min_val=50,max_val=150,size=(5,5),stdv=0):
"""
The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection,
which will reduce complexity of the image much further.
The algorithm will detect sharp changes in luminosity and will define them as edges.
The algorithm has the following stages:
- Noise reduction
- Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal
- Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it
- Hysteresis thresholding
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if min_val >= max_val:
raise ValueError('filterCanny: Value order incorrect')
gray_scale = toGrayScale(img)
#cv.imshow('Gray Scale image',gray_scale)
gaussian = filterGaussian(gray_scale,size=size,stdv=stdv)
#cv.imshow('Gaussian filter',gaussian)
return cv.Canny(gaussian,min_val,max_val)
def segmentRegionOfInterest(img):
height = img.shape[0]
polygons = np.array([
[(200, height), (1100, height), (550, 250)]
])
mask = np.zeros_like(img)
# Fill poly-function deals with multiple polygon
cv.fillPoly(mask, polygons, 255)
# Bitwise operation between canny image and mask image
masked_image = cv.bitwise_and(img, mask)
return masked_image
def houghFilter(frame,distance_resolution=2,angle_resolution=np.pi/180,min_n_intersections=50,min_line_size=30,max_line_gap=5):
"""
Params:
frame
distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision
angle_resolution: angle of accumulator in radians, larger ==> less precision
min_n_intersections: minimum number of intersections
min_line_size: minimum length of line in pixels
max_line_gap: maximum distance in pixels between disconnected lines
"""
placeholder = np.array([])
hough = cv.HoughLinesP(frame,distance_resolution,angle_resolution,min_n_intersections,placeholder,min_line_size,max_line_gap)
return hough
def calculateLines(img,lines):
"""
Combines line segments into one or two lanes
Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)
"""
def calculateCoordinates(img,line_params):
"""
Calculates the coordinates for a road lane
"""
#y = m*x +b, m= slope, b=intercept
height, width, _ = img.shape
m, b = line_params
y1 = height
y2 = int(y1 * (1/2)) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - b) / m)))
x2 = max(-width, min(2 * width, int((y2 - b) / m)))
return np.array([x1,y1, x2,y2])
lane_lines = []
if lines is None:
return np.array(lane_lines)
height, width, _ = img.shape
left_lines, right_lines = [], []
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line in lines:
x1,y1, x2,y2 = line.reshape(4)
if x1 == x2:
#Vertical line
continue
#Fit a polynomial to the points to get the slope and intercept
line_params = np.polyfit((x1,x2), (y1,y2), 1)
slope,intercept = line_params[0], line_params[1]
if slope < 0: #left side
if x1 < left_region_boundary and x2 < left_region_boundary:
left_lines.append((slope,intercept))
else: #right
if x1 > right_region_boundary and x2 > right_region_boundary:
right_lines.append((slope,intercept))
left_lines_avg = np.average(left_lines,axis=0)
right_lines_avg = np.average(right_lines,axis=0)
if len(left_lines) > 0:
left_line = calculateCoordinates(img,left_lines_avg)
lane_lines.append(left_line)
if len(right_lines) > 0:
right_line = calculateCoordinates(img,right_lines_avg)
lane_lines.append(right_line)
return np.array(lane_lines)
def showMidLine(img,steering_angle,color=(0, 255, 0),thickness=5):
line_image = np.zeros_like(img)
height, width, _ = img.shape
# Note: the steering angle of:
# 0-89 degree: turn left
# 90 degree: going straight
# 91-180 degree: turn right
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv.line(line_image, (x1, y1), (x2, y2), color, thickness)
return line_image
def showLines(img,lines,color=(255,0,0),thickness=5):
line_img = np.zeros(img.shape, dtype=np.uint8)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv.line(line_img, (x1,y1), (x2,y2), color, thickness)
return line_img
def calculateSteeringAngle(img,lines):
if len(lines) == 0:
return -90
height, width, _ = img.shape
if len(lines) == 1:
x1, _, x2, _ = lines[0]
x_offset = x2 - x1
else: #2 lines
_, _, left_x2, _ = lines[0]
_, _, right_x2, _ = lines[1]
camera_mid_offset_percent = 0.0 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
# find the steering angle, which is angle between navigation direction to end of center line
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line
steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel
return steering_angle
def stabilizeSteeringAngle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=2, max_angle_deviation_one_lane=1):
"""
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
"""
if num_of_lane_lines == 1:
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
else:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
return stabilized_steering_angle
| CarlaDriving/server/lane_detection/utils.py | 8,242 | Calculates the coordinates for a road lane
Combines line segments into one or two lanes
Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)
The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection,
which will reduce complexity of the image much further.
The algorithm will detect sharp changes in luminosity and will define them as edges.
The algorithm has the following stages:
- Noise reduction
- Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal
- Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it
- Hysteresis thresholding
Summary of filterGaussian
This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth
the image to lower the sensitivity to noise. (The smaller the size the less visible the blur)
To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will
set each pixel value equal to the weighted average of its neighboor pixels
The Gaussian distribution:
Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2))
i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1)
Params:
frame
distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision
angle_resolution: angle of accumulator in radians, larger ==> less precision
min_n_intersections: minimum number of intersections
min_line_size: minimum length of line in pixels
max_line_gap: maximum distance in pixels between disconnected lines
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
cv.imshow('Gray Scale image',gray_scale)cv.imshow('Gaussian filter',gaussian) Fill poly-function deals with multiple polygon Bitwise operation between canny image and mask image y = m*x +b, m= slope, b=intercept make points from middle of the frame down bound the coordinates within the frame left lane line segment should be on left 2/3 of the screen right lane line segment should be on left 2/3 of the screenVertical lineFit a polynomial to the points to get the slope and interceptleft sideright Note: the steering angle of: 0-89 degree: turn left 90 degree: going straight 91-180 degree: turn right 2 lines 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right find the steering angle, which is angle between navigation direction to end of center line angle (in radian) to center vertical line angle (in degrees) to center vertical line this is the steering angle needed by picar front wheel if only one lane detected, don't deviate too much if both lane lines detected, then we can deviate more | 2,940 | en | 0.856531 |
"""
A small utility aiming to create programatically sound.
"""
from __future__ import annotations
from importlib import metadata
__version__ = metadata.version("sarada")
| sarada/__init__.py | 173 | A small utility aiming to create programatically sound. | 55 | en | 0.821822 |
import imp
from tkinter import *
from sys import exit
from teste.testeCores.corFunc import formatar
conta2x2 = 'x2y=5\n3x-5y=4'
root = Tk()
text = Text(root, width=20, height=10)
text.config(font='arial 20 bold')
text.insert(END, conta2x2)
text.pack()
def q_evento(event):
exit()
root.bind('q', q_evento)
cs = conta2x2.split('\n')
print('cs', cs)
posicao = cs[0].find('y')
print('posicao:', posicao)
p1 = p2 = '1.'
p1 += str(posicao)
p2 += str(posicao+1)
print('p1:', p1, 'p2:', p2)
conta = conta2x2.split('\n')
formatado = list()
text.config(background='black', foreground='white')
for i, c in enumerate(conta):
formatado.append(formatar(i, c))
fs = formatado[0][0]
print(fs)
print(fs['p1'])
for f1 in formatado:
for f in f1:
text.tag_add(f['nome'], f['p1'], f['p2'])
text.tag_config(f['nome'], foreground=f['fg'])
# text.tag_add("y1", p1, p2)
# text.tag_config("y1", background="black", foreground="green")
text.tag_config('1', foreground="green")
root.mainloop() | 06-sistemaLinear/sistemaLinear_v11/teste/testeCores/cores2.py | 1,001 | text.tag_add("y1", p1, p2) text.tag_config("y1", background="black", foreground="green") | 88 | en | 0.152978 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import random
import signal
import sys
import threading
from cotyledon import _utils
LOG = logging.getLogger(__name__)
class Service(object):
"""Base class for a service
This class will be executed in a new child process/worker
:py:class:`ServiceWorker` of a :py:class:`ServiceManager`. It registers
signals to manager the reloading and the ending of the process.
Methods :py:meth:`run`, :py:meth:`terminate` and :py:meth:`reload` are
optional.
"""
name = None
"""Service name used in the process title and the log messages in additionnal
of the worker_id."""
graceful_shutdown_timeout = None
"""Timeout after which a gracefully shutdown service will exit. zero means
endless wait. None means same as ServiceManager that launch the service"""
def __init__(self, worker_id):
"""Create a new Service
:param worker_id: the identifier of this service instance
:type worker_id: int
The identifier of the worker can be used for workload repartition
because it's consistent and always the same.
For example, if the number of workers for this service is 3,
one will got 0, the second got 1 and the last got 2.
if worker_id 1 died, the new spawned process will got 1 again.
"""
super(Service, self).__init__()
self._initialize(worker_id)
def _initialize(self, worker_id):
if getattr(self, '_initialized', False):
return
self._initialized = True
if self.name is None:
self.name = self.__class__.__name__
self.worker_id = worker_id
self.pid = os.getpid()
self._signal_lock = threading.Lock()
# Only used by oslo_config_glue for now, so we don't need
# to have a list of hook
self._on_reload_internal_hook = self._noop_hook
def _noop_hook(self, service):
pass
def terminate(self):
"""Gracefully shutdown the service
This method will be executed when the Service has to shutdown cleanly.
If not implemented the process will just end with status 0.
To customize the exit code, the :py:class:`SystemExit` exception can be
used.
Any exceptions raised by this method will be logged and the worker will
exit with status 1.
"""
def reload(self):
"""Reloading of the service
This method will be executed when the Service receives a SIGHUP.
If not implemented the process will just end with status 0 and
:py:class:`ServiceRunner` will start a new fresh process for this
service with the same worker_id.
Any exceptions raised by this method will be logged and the worker will
exit with status 1.
"""
os.kill(os.getpid(), signal.SIGTERM)
def run(self):
"""Method representing the service activity
If not implemented the process will just wait to receive an ending
signal.
This method is ran into the thread and can block or return as needed
Any exceptions raised by this method will be logged and the worker will
exit with status 1.
"""
# Helper to run application methods in a safety way when signal are
# received
def _reload(self):
with _utils.exit_on_exception():
if self._signal_lock.acquire(False):
try:
self._on_reload_internal_hook(self)
self.reload()
finally:
self._signal_lock.release()
def _terminate(self):
with _utils.exit_on_exception(), self._signal_lock:
self.terminate()
sys.exit(0)
def _run(self):
with _utils.exit_on_exception():
self.run()
class ServiceConfig(object):
def __init__(self, service_id, service, workers, args, kwargs):
self.service = service
self.workers = workers
self.args = args
self.kwargs = kwargs
self.service_id = service_id
class ServiceWorker(_utils.SignalManager):
"""Service Worker Wrapper
This represents the child process spawned by ServiceManager
All methods implemented here, must run in the main threads
"""
@classmethod
def create_and_wait(cls, *args, **kwargs):
sw = cls(*args, **kwargs)
sw.wait_forever()
def __init__(self, config, service_id, worker_id, parent_pipe,
started_hooks, graceful_shutdown_timeout):
super(ServiceWorker, self).__init__()
self._ready = threading.Event()
_utils.spawn(self._watch_parent_process, parent_pipe)
# Reseed random number generator
random.seed()
args = tuple() if config.args is None else config.args
kwargs = dict() if config.kwargs is None else config.kwargs
self.service = config.service(worker_id, *args, **kwargs)
self.service._initialize(worker_id)
if self.service.graceful_shutdown_timeout is None:
self.service.graceful_shutdown_timeout = graceful_shutdown_timeout
self.title = "%(name)s(%(worker_id)d) [%(pid)d]" % dict(
name=self.service.name, worker_id=worker_id, pid=os.getpid())
# Set process title
_utils.setproctitle(
"%(pname)s: %(name)s worker(%(worker_id)d)" % dict(
pname=_utils.get_process_name(), name=self.service.name,
worker_id=worker_id))
# We are ready tell them
self._ready.set()
_utils.run_hooks('new_worker', started_hooks, service_id, worker_id,
self.service)
def _watch_parent_process(self, parent_pipe):
# This will block until the write end is closed when the parent
# dies unexpectedly
parent_pipe[1].close()
try:
parent_pipe[0].recv()
except EOFError:
pass
if self._ready.is_set():
LOG.info('Parent process has died unexpectedly, %s exiting'
% self.title)
if os.name == "posix":
os.kill(os.getpid(), signal.SIGTERM)
else:
# Fallback to process signal later
self._signals_received.appendleft(signal.SIGTERM)
else:
os._exit(0)
def _alarm(self):
LOG.info('Graceful shutdown timeout (%d) exceeded, '
'exiting %s now.' %
(self.service.graceful_shutdown_timeout,
self.title))
os._exit(1)
def _on_signal_received(self, sig):
# Code below must not block to return to select.select() and catch
# next signals
if sig == _utils.SIGALRM:
self._alarm()
elif sig == signal.SIGTERM:
LOG.info('Caught SIGTERM signal, '
'graceful exiting of service %s' % self.title)
if self.service.graceful_shutdown_timeout > 0:
if os.name == "posix":
signal.alarm(self.service.graceful_shutdown_timeout)
else:
threading.Timer(self.service.graceful_shutdown_timeout,
self._alarm).start()
_utils.spawn(self.service._terminate)
elif sig == _utils.SIGHUP:
_utils.spawn(self.service._reload)
def wait_forever(self):
LOG.debug("Run service %s" % self.title)
_utils.spawn(self.service._run)
super(ServiceWorker, self)._wait_forever()
| cotyledon/_service.py | 8,126 | Base class for a service
This class will be executed in a new child process/worker
:py:class:`ServiceWorker` of a :py:class:`ServiceManager`. It registers
signals to manager the reloading and the ending of the process.
Methods :py:meth:`run`, :py:meth:`terminate` and :py:meth:`reload` are
optional.
Service Worker Wrapper
This represents the child process spawned by ServiceManager
All methods implemented here, must run in the main threads
Create a new Service
:param worker_id: the identifier of this service instance
:type worker_id: int
The identifier of the worker can be used for workload repartition
because it's consistent and always the same.
For example, if the number of workers for this service is 3,
one will got 0, the second got 1 and the last got 2.
if worker_id 1 died, the new spawned process will got 1 again.
Reloading of the service
This method will be executed when the Service receives a SIGHUP.
If not implemented the process will just end with status 0 and
:py:class:`ServiceRunner` will start a new fresh process for this
service with the same worker_id.
Any exceptions raised by this method will be logged and the worker will
exit with status 1.
Method representing the service activity
If not implemented the process will just wait to receive an ending
signal.
This method is ran into the thread and can block or return as needed
Any exceptions raised by this method will be logged and the worker will
exit with status 1.
Gracefully shutdown the service
This method will be executed when the Service has to shutdown cleanly.
If not implemented the process will just end with status 0.
To customize the exit code, the :py:class:`SystemExit` exception can be
used.
Any exceptions raised by this method will be logged and the worker will
exit with status 1.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Only used by oslo_config_glue for now, so we don't need to have a list of hook Helper to run application methods in a safety way when signal are received Reseed random number generator Set process title We are ready tell them This will block until the write end is closed when the parent dies unexpectedly Fallback to process signal later Code below must not block to return to select.select() and catch next signals | 2,742 | en | 0.901223 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def GetPrettyPrintErrors(input_api, output_api, cwd, rel_path, results):
"""Runs pretty-print command for specified file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'pretty_print.py', rel_path, '--presubmit',
'--non-interactive'], cwd=cwd)
if exit_code != 0:
error_msg = (
'%s is not formatted correctly; run git cl format to fix.' % rel_path)
results.append(output_api.PresubmitError(error_msg))
def GetPrefixErrors(input_api, output_api, cwd, rel_path, results):
"""Validates histogram prefixes in specified file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_prefix.py', rel_path], cwd=cwd)
if exit_code != 0:
error_msg = ('%s contains histogram(s) with disallowed prefix, please run '
'validate_prefix.py %s to fix.' % (rel_path, rel_path))
results.append(output_api.PresubmitError(error_msg))
def GetObsoleteXmlErrors(input_api, output_api, cwd, results):
"""Validates all histograms in the file are obsolete."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_obsolete_histograms.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'histograms_xml/obsolete_histograms.xml contains non-obsolete '
'histograms, please run validate_obsolete_histograms.py to fix.')
results.append(output_api.PresubmitError(error_msg))
def GetValidateHistogramsError(input_api, output_api, cwd, results):
"""Validates histograms format and index file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_format.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms are not well-formatted; please run %s/validate_format.py '
'and fix the reported errors.' % cwd)
results.append(output_api.PresubmitError(error_msg))
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_histograms_index.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms index file is not up-to-date. Please run '
'%s/histogram_paths.py to update it' % cwd)
results.append(output_api.PresubmitError(error_msg))
def ValidateSingleFile(input_api, output_api, file_obj, cwd, results):
"""Does corresponding validations if histograms.xml or enums.xml is changed.
Args:
input_api: An input_api instance that contains information about changes.
output_api: An output_api instance to create results of the PRESUBMIT check.
file_obj: A file object of one of the changed files.
cwd: Path to current working directory.
results: The returned variable which is a list of output_api results.
Returns:
A boolean that True if a histograms.xml or enums.xml file is changed.
"""
p = file_obj.AbsoluteLocalPath()
# Only do PRESUBMIT checks when |p| is under |cwd|.
if input_api.os_path.commonprefix([p, cwd]) != cwd:
return False
filepath = input_api.os_path.relpath(p, cwd)
if 'test_data' in filepath:
return False
# If the changed file is obsolete_histograms.xml, validate all histograms
# inside are obsolete.
if 'obsolete_histograms.xml' in filepath:
GetObsoleteXmlErrors(input_api, output_api, cwd, results)
# Return false here because we don't need to validate format if users only
# change obsolete_histograms.xml.
return False
# If the changed file is histograms.xml or histogram_suffixes_list.xml,
# pretty-print and validate prefix it.
elif ('histograms.xml' in filepath
or 'histogram_suffixes_list.xml' in filepath):
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
# TODO(crbug/1120229): Re-enable validate prefix check once all histograms
# are split.
# GetPrefixErrors(input_api, output_api, cwd, filepath, results)
return True
# If the changed file is enums.xml, pretty-print it.
elif 'enums.xml' in filepath:
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
return True
return False
def CheckChange(input_api, output_api):
"""Checks that histograms.xml is pretty-printed and well-formatted."""
results = []
cwd = input_api.PresubmitLocalPath()
xml_changed = False
# Only for changed files, do corresponding checks if the file is
# histograms.xml, enums.xml or obsolete_histograms.xml.
for file_obj in input_api.AffectedTextFiles():
is_changed = ValidateSingleFile(
input_api, output_api, file_obj, cwd, results)
xml_changed = xml_changed or is_changed
# Run validate_format.py and validate_histograms_index.py, if changed files
# contain histograms.xml or enums.xml.
if xml_changed:
GetValidateHistogramsError(input_api, output_api, cwd, results)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
| tools/metrics/histograms/PRESUBMIT.py | 5,276 | Checks that histograms.xml is pretty-printed and well-formatted.
Validates all histograms in the file are obsolete.
Validates histogram prefixes in specified file.
Runs pretty-print command for specified file.
Validates histograms format and index file.
Does corresponding validations if histograms.xml or enums.xml is changed.
Args:
input_api: An input_api instance that contains information about changes.
output_api: An output_api instance to create results of the PRESUBMIT check.
file_obj: A file object of one of the changed files.
cwd: Path to current working directory.
results: The returned variable which is a list of output_api results.
Returns:
A boolean that True if a histograms.xml or enums.xml file is changed.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
Copyright 2013 The Chromium Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. Only do PRESUBMIT checks when |p| is under |cwd|. If the changed file is obsolete_histograms.xml, validate all histograms inside are obsolete. Return false here because we don't need to validate format if users only change obsolete_histograms.xml. If the changed file is histograms.xml or histogram_suffixes_list.xml, pretty-print and validate prefix it. TODO(crbug/1120229): Re-enable validate prefix check once all histograms are split. GetPrefixErrors(input_api, output_api, cwd, filepath, results) If the changed file is enums.xml, pretty-print it. Only for changed files, do corresponding checks if the file is histograms.xml, enums.xml or obsolete_histograms.xml. Run validate_format.py and validate_histograms_index.py, if changed files contain histograms.xml or enums.xml. | 1,817 | en | 0.700777 |
import cv2
import pose_detection as pose_d
pose_model = pose_d.load_pose_model('pre_trained\AFLW2000.pkl')
def detect_face(img_PATH, model_PATH):
# Load the cascade
face_cascade = cv2.CascadeClassifier(model_PATH)
# Read the input image
img = cv2.imread(img_PATH)
# Convert into grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
if len(faces) > 1:
print('Multiple faces detected')
return False
elif len(faces) < 1:
print('No faces detected')
return False
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display the output
#cv2_imshow(img)
cv2.waitKey()
return True # TO DO may want to return face at some point as well
def detect_face_video(pose_model):
# Load the cascade
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# To capture video from webcam.
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Get pose estimate
yaw, pitch, roll = pose_d.run_pose_detection(pose_model, pose_d.load_img(img))
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
#draw pose label
img = pose_d.draw_labels(yaw, pitch, roll, img)
# Display
cv2.imshow('img', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k==27:
break
# Release the VideoCapture object
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
detect_face_video(pose_model) | face_detection_cv2.py | 2,040 | Load the cascade Read the input image Convert into grayscale Detect faces Draw rectangle around the faces Display the outputcv2_imshow(img) TO DO may want to return face at some point as well Load the cascade To capture video from webcam. To use a video file as input cap = cv2.VideoCapture('filename.mp4') Read the frame Convert to grayscale Detect the faces Get pose estimate Draw the rectangle around each facedraw pose label Display Stop if escape key is pressed Release the VideoCapture object | 501 | en | 0.800323 |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the isBalanced function below.
def isBalanced(s):
left_symbol = [ '{', '[', '(']
right_symbol = [ '}', ']', ')']
# fast checking of symbol counting equality
for i in range(3):
left_count = s.count( left_symbol[i] )
right_count = s.count( right_symbol[i] )
if left_count != right_count:
return "NO"
_stack = []
for i in range( len(s) ):
char = s[i]
if char in { '{', '[', '(' } :
# push into stack
_stack.append( char )
if char in { '}', ']', ')' } :
# pop from stack and compare with left symbol
index_of_right = right_symbol.index( char )
index_of_left = left_symbol.index( _stack.pop(-1) )
if index_of_left == index_of_right:
# match of {}, [], or ()
pass
else:
return "NO"
if len(_stack) == 0:
return "YES"
else:
return "NO"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
s = input()
result = isBalanced(s)
fptr.write(result + '\n')
fptr.close()
| Data Structures/Stack/Balanced Bracket/balanced_bracket.py | 1,310 | !/bin/python3 Complete the isBalanced function below. fast checking of symbol counting equality push into stack pop from stack and compare with left symbol match of {}, [], or () | 178 | en | 0.798362 |
import unittest
import attr
import numpy as np
from robogym.randomization.env import (
EnvActionRandomizer,
EnvObservationRandomizer,
EnvParameterRandomizer,
EnvRandomization,
EnvSimulationRandomizer,
build_randomizable_param,
)
from robogym.randomization.observation import ObservationRandomizer
from robogym.randomization.parameters import FloatRandomizerParameter
class DummyRandomizerParameter(FloatRandomizerParameter):
def __init__(self, name, val):
super().__init__(
name, val, value_range=(-1.0, 1.0), delta=1.0,
)
@attr.s(auto_attribs=True)
class DummyNestedEnvParameter:
c: int = build_randomizable_param(1, low=-3, high=3)
@attr.s(auto_attribs=True)
class DummyEnvParameter:
a: int = build_randomizable_param(0, low=-5, high=5)
b: float = build_randomizable_param(0.0, low=-1.0, high=1.0)
x: int = 0 # Non randomizable parameter.
nested: DummyNestedEnvParameter = DummyNestedEnvParameter()
class DummyObservationRandomizer(ObservationRandomizer):
def __init__(self, name, val):
super().__init__(name)
self.val = self.register_parameter(val)
def _randomize(self, target, random_state):
target[self.val.name] = self.val.get_value()
return target
class TestRandomization(unittest.TestCase):
def setUp(self):
super().setUp()
self.random_state = np.random.RandomState()
def test_randomizer_parameters(self):
parameter = DummyRandomizerParameter("foo", 0.0)
assert parameter.get_value() == 0.0
assert parameter.get_range() == (-1.0, 1.0)
assert parameter.get_delta() == 1.0
parameter.set_value(1.0)
assert parameter.get_value() == 1.0
def test_randomizer_basic(self):
"""
Test functionality of basic randomizer.
"""
randomizer = EnvParameterRandomizer(DummyEnvParameter())
assert len(randomizer.get_parameters()) == 3
# Make sure register duplicate parameter is not allowed.
with self.assertRaises(AssertionError):
randomizer.register_parameter(DummyRandomizerParameter("a", 1))
randomizer.register_parameter(DummyRandomizerParameter("d", 1))
assert len(randomizer.get_parameters()) == 4
randomizer.get_parameter("a").set_value(1)
randomizer.get_parameter("b").set_value(0.5)
randomizer.get_parameter("c").set_value(2)
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
assert parameters.a == 1
assert parameters.b == 0.5
assert parameters.nested.c == 2
randomizer.disable()
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
randomizer.get_parameter("a").set_value(1)
assert parameters.a == 0
def test_observation_randomizer(self):
randomizer = EnvObservationRandomizer(
[
DummyObservationRandomizer("r1", DummyRandomizerParameter("foo", 0.0)),
DummyObservationRandomizer("r2", DummyRandomizerParameter("bar", 1.0)),
]
)
assert len(randomizer.get_randomizers()) == 2
assert len(randomizer.get_parameters()) == 2
obs = randomizer.randomize({}, self.random_state)
assert obs["foo"] == 0.0
assert obs["bar"] == 1.0
def test_env_randomization(self):
randomization = EnvRandomization(
parameter_randomizer=EnvParameterRandomizer(DummyEnvParameter()),
observation_randomizer=EnvObservationRandomizer(
[
DummyObservationRandomizer(
"r1", DummyRandomizerParameter("foo", 0.0)
),
]
),
action_randomizer=EnvActionRandomizer([]),
simulation_randomizer=EnvSimulationRandomizer([]),
)
randomization.update_parameter("observation.r1:foo", 0.5)
parameter = randomization.get_parameter("observation.r1:foo")
assert parameter.get_value() == 0.5
| robogym/randomization/tests/test_randomization.py | 4,093 | Test functionality of basic randomizer.
Non randomizable parameter. Make sure register duplicate parameter is not allowed. | 124 | en | 0.431535 |
# COMBINATION:
# combination is all the different ways that we can group something where the order does not matter.
# PERMUTATION:
# permutation is all the different ways that we can group something where the order does matter.
import itertools
my_list=[1,2,3]
my_combinations=itertools.combinations(my_list,2)# here first arguement is a list and second arguement is how many items we want in a group. it is r of nCr.
for c in my_combinations:
print(c)
my_permutations=itertools.permutations(my_list,2)
for c in my_permutations:
print(c)
# When we should use combination and permutation?
# if the order doent matter we should use combinations.
import itertools
my_list=[1,2,3,4,5,6]
my_combinations=itertools.combinations(my_list,3)
answer=[results for results in my_combinations if sum(results)==10]
for i in answer:
print(i)
# if the order does matter we should use permutations.
# word macthing game.
import itertools
word="sample"
my_letters="pslame"
my_permutations=itertools.permutations(my_letters,len(my_letters))
for p in my_permutations:
if "".join(p) == word:
print("Match!")
break
else:
print("No match")
| term06 (permutation and combination).py | 1,174 | COMBINATION: combination is all the different ways that we can group something where the order does not matter. PERMUTATION: permutation is all the different ways that we can group something where the order does matter. here first arguement is a list and second arguement is how many items we want in a group. it is r of nCr. When we should use combination and permutation? if the order doent matter we should use combinations. if the order does matter we should use permutations. word macthing game. | 501 | en | 0.900267 |
import logging
from flask import request, flash, abort, Response
from flask_admin import expose
from flask_admin.babel import gettext, ngettext, lazy_gettext
from flask_admin.model import BaseModelView
from flask_admin.model.form import wrap_fields_in_fieldlist
from flask_admin.model.fields import ListEditableFieldList
from flask_admin._compat import iteritems, string_types
import mongoengine
import gridfs
from mongoengine.connection import get_db
from bson.objectid import ObjectId
from flask_admin.actions import action
from .filters import FilterConverter, BaseMongoEngineFilter
from .form import get_form, CustomModelConverter
from .typefmt import DEFAULT_FORMATTERS
from .tools import parse_like_term
from .helpers import format_error
from .ajax import process_ajax_references, create_ajax_loader
from .subdoc import convert_subdocuments
# Set up logger
log = logging.getLogger("flask-admin.mongo")
SORTABLE_FIELDS = set((
mongoengine.StringField,
mongoengine.IntField,
mongoengine.FloatField,
mongoengine.BooleanField,
mongoengine.DateTimeField,
mongoengine.ComplexDateTimeField,
mongoengine.ObjectIdField,
mongoengine.DecimalField,
mongoengine.ReferenceField,
mongoengine.EmailField,
mongoengine.UUIDField,
mongoengine.URLField
))
class ModelView(BaseModelView):
"""
MongoEngine model scaffolding.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of
:class:`flask_admin.contrib.mongoengine.filters.BaseFilter`
classes.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'))
"""
model_form_converter = CustomModelConverter
"""
Model form conversion class. Use this to implement custom
field conversion logic.
Custom class should be derived from the
`flask_admin.contrib.mongoengine.form.CustomModelConverter`.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
object_id_converter = ObjectId
"""
Mongodb ``_id`` value conversion function. Default is `bson.ObjectId`.
Use this if you are using String, Binary and etc.
For example::
class MyModelView(BaseModelView):
object_id_converter = int
or::
class MyModelView(BaseModelView):
object_id_converter = str
"""
filter_converter = FilterConverter()
"""
Field to filter converter.
Override this attribute to use a non-default converter.
"""
column_type_formatters = DEFAULT_FORMATTERS
"""
Customized type formatters for MongoEngine backend
"""
allowed_search_types = (mongoengine.StringField,
mongoengine.URLField,
mongoengine.EmailField)
"""
List of allowed search field types.
"""
form_subdocuments = None
"""
Subdocument configuration options.
This field accepts dictionary, where key is field name and value is either dictionary or instance of the
`flask_admin.contrib.EmbeddedForm`.
Consider following example::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.EmbeddedDocumentField(Comment)
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_columns': ('name',)
}
}
In this example, `Post` model has child `Comment` subdocument. When generating form for `Comment` embedded
document, Flask-Admin will only create `name` field.
It is also possible to use class-based embedded document configuration::
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
Arbitrary depth nesting is supported::
class SomeEmbed(EmbeddedForm):
form_excluded_columns = ('test',)
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
form_subdocuments = {
'inner': SomeEmbed()
}
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
There's also support for forms embedded into `ListField`. All you have
to do is to create nested rule with `None` as a name. Even though it
is slightly confusing, but that's how Flask-MongoEngine creates
form fields embedded into ListField::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.ListField(db.EmbeddedDocumentField(Comment))
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_subdocuments': {
None: {
'form_columns': ('name',)
}
}
}
}
"""
def __init__(self, model, name=None,
category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor
:param model:
Model class
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self._search_fields = []
super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self._primary_key = self.scaffold_pk()
def _refresh_cache(self):
"""
Refresh cache.
"""
# Process subdocuments
if self.form_subdocuments is None:
self.form_subdocuments = {}
self._form_subdocuments = convert_subdocuments(self.form_subdocuments)
# Cache other properties
super(ModelView, self)._refresh_cache()
def _process_ajax_references(self):
"""
AJAX endpoint is exposed by top-level admin view class, but
subdocuments might have AJAX references too.
This method will recursively go over subdocument configuration
and will precompute AJAX references for them ensuring that
subdocuments can also use AJAX to populate their ReferenceFields.
"""
references = super(ModelView, self)._process_ajax_references()
return process_ajax_references(references, self)
def _get_model_fields(self, model=None):
"""
Inspect model and return list of model fields
:param model:
Model to inspect
"""
if model is None:
model = self.model
return sorted(iteritems(model._fields), key=lambda n: n[1].creation_counter)
def scaffold_pk(self):
# MongoEngine models have predefined 'id' as a key
return 'id'
def get_pk_value(self, model):
"""
Return the primary key value from the model instance
:param model:
Model instance
"""
return model.pk
def scaffold_list_columns(self):
"""
Scaffold list columns
"""
columns = []
for n, f in self._get_model_fields():
# Verify type
field_class = type(f)
if (field_class == mongoengine.ListField and
isinstance(f.field, mongoengine.EmbeddedDocumentField)):
continue
if field_class == mongoengine.EmbeddedDocumentField:
continue
if self.column_display_pk or field_class != mongoengine.ObjectIdField:
columns.append(n)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns (name, field)
"""
columns = {}
for n, f in self._get_model_fields():
if type(f) in SORTABLE_FIELDS:
if self.column_display_pk or type(f) != mongoengine.ObjectIdField:
columns[n] = f
return columns
def init_search(self):
"""
Init search
"""
if self.column_searchable_list:
for p in self.column_searchable_list:
if isinstance(p, string_types):
p = self.model._fields.get(p)
if p is None:
raise Exception('Invalid search field')
field_type = type(p)
# Check type
if (field_type not in self.allowed_search_types):
raise Exception('Can only search on text columns. ' +
'Failed to setup search for "%s"' % p)
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, name):
"""
Return filter object(s) for the field
:param name:
Either field name or field instance
"""
if isinstance(name, string_types):
attr = self.model._fields.get(name)
else:
attr = name
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Find name
visible_name = None
if not isinstance(name, string_types):
visible_name = self.get_column_name(attr.name)
if not visible_name:
visible_name = self.get_column_name(name)
# Convert filter
type_name = type(attr).__name__
flt = self.filter_converter.convert(type_name,
attr,
visible_name)
return flt
def is_valid_filter(self, filter):
"""
Validate if the provided filter is a valid MongoEngine filter
:param filter:
Filter object
"""
return isinstance(filter, BaseMongoEngineFilter)
def scaffold_form(self):
"""
Create form from the model.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
extra_fields=self.form_extra_fields)
return form_class
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList,
validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
:param custom_fieldlist:
A WTForm FieldList class. By default, `ListEditableFieldList`.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.column_editable_list,
field_args=validators)
return wrap_fields_in_fieldlist(self.form_base_class,
form_class,
custom_fieldlist)
# AJAX foreignkey support
def _create_ajax_loader(self, name, opts):
return create_ajax_loader(self.model, name, name, opts)
def get_query(self):
"""
Returns the QuerySet for this view. By default, it returns all the
objects for the current model.
"""
return self.model.objects
def _search(self, query, search_term):
# TODO: Unfortunately, MongoEngine contains bug which
# prevents running complex Q queries and, as a result,
# Flask-Admin does not support per-word searching like
# in other backends
op, term = parse_like_term(search_term)
criteria = None
for field in self._search_fields:
flt = {'%s__%s' % (field.name, op): term}
q = mongoengine.Q(**flt)
if criteria is None:
criteria = q
else:
criteria |= q
return query.filter(criteria)
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True):
"""
Get list of objects from MongoEngine
:param page:
Page number
:param sort_column:
Sort column
:param sort_desc:
Sort descending
:param search:
Search criteria
:param filters:
List of applied filters
:param execute:
Run query immediately or not
"""
query = self.get_query()
# Filters
if self._filters:
for flt, flt_name, value in filters:
f = self._filters[flt]
query = f.apply(query, f.clean(value))
# Search
if self._search_supported and search:
query = self._search(query, search)
# Get count
count = query.count() if not self.simple_list_pager else None
# Sorting
if sort_column:
query = query.order_by('%s%s' % ('-' if sort_desc else '', sort_column))
else:
order = self._get_default_order()
if order:
query = query.order_by('%s%s' % ('-' if order[1] else '', order[0]))
# Pagination
if page is not None:
query = query.skip(page * self.page_size)
query = query.limit(self.page_size)
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model instance by its ID
:param id:
Model ID
"""
try:
return self.get_query().filter(pk=id).first()
except mongoengine.ValidationError as ex:
flash(gettext('Failed to get model. %(error)s',
error=format_error(ex)),
'error')
return None
def create_model(self, form):
"""
Create model helper
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self._on_model_change(form, model, True)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to create record.')
return False
else:
self.after_model_change(form, model, True)
return model
def update_model(self, form, model):
"""
Update model helper
:param form:
Form instance
:param model:
Model instance to update
"""
try:
form.populate_obj(model)
self._on_model_change(form, model, False)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to update record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to update record.')
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model helper
:param model:
Model instance
"""
try:
self.on_model_delete(model)
model.delete()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to delete record.')
return False
else:
self.after_model_delete(model)
return True
# FileField access API
@expose('/api/file/')
def api_file_view(self):
pk = request.args.get('id')
coll = request.args.get('coll')
db = request.args.get('db', 'default')
if not pk or not coll or not db:
abort(404)
fs = gridfs.GridFS(get_db(db), coll)
data = fs.get(self.object_id_converter(pk))
if not data:
abort(404)
return Response(data.read(),
content_type=data.content_type,
headers={
'Content-Length': data.length
})
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
try:
count = 0
all_ids = [self.object_id_converter(pk) for pk in ids]
for obj in self.get_query().in_bulk(all_ids).values():
count += self.delete_model(obj)
flash(ngettext('Record was successfully deleted.',
'%(count)s records were successfully deleted.',
count,
count=count))
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete records. %(error)s', error=str(ex)),
'error')
| sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py | 20,150 | MongoEngine model scaffolding.
Constructor
:param model:
Model class
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
Inspect model and return list of model fields
:param model:
Model to inspect
AJAX endpoint is exposed by top-level admin view class, but
subdocuments might have AJAX references too.
This method will recursively go over subdocument configuration
and will precompute AJAX references for them ensuring that
subdocuments can also use AJAX to populate their ReferenceFields.
Refresh cache.
Create model helper
:param form:
Form instance
Delete model helper
:param model:
Model instance
Get list of objects from MongoEngine
:param page:
Page number
:param sort_column:
Sort column
:param sort_desc:
Sort descending
:param search:
Search criteria
:param filters:
List of applied filters
:param execute:
Run query immediately or not
Return a single model instance by its ID
:param id:
Model ID
Return the primary key value from the model instance
:param model:
Model instance
Returns the QuerySet for this view. By default, it returns all the
objects for the current model.
Init search
Validate if the provided filter is a valid MongoEngine filter
:param filter:
Filter object
Return filter object(s) for the field
:param name:
Either field name or field instance
Create form from the model.
Scaffold list columns
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
:param custom_fieldlist:
A WTForm FieldList class. By default, `ListEditableFieldList`.
Return a dictionary of sortable columns (name, field)
Update model helper
:param form:
Form instance
:param model:
Model instance to update
Set up logger Process subdocuments Cache other properties MongoEngine models have predefined 'id' as a key Verify type Check type Find name Convert filter AJAX foreignkey support TODO: Unfortunately, MongoEngine contains bug which prevents running complex Q queries and, as a result, Flask-Admin does not support per-word searching like in other backends Filters Search Get count Sorting Pagination FileField access API Default model actions Check delete action permission | 2,894 | en | 0.632425 |
import requests, json, os, time
from PIL import Image
from io import BytesIO
import img2pdf
class jumpplus_downloader:
def __init__(self):
self.file=0
self.h=1200
self.w=760
def auto_list_download(self, url, next=False, sleeptime=20,pdfConversion=True):
self.json_download(url)
self.file=0
if os.path.isdir(self.list["readableProduct"]["title"])!=True:
os.mkdir(self.list["readableProduct"]["title"])
for page in self.list["readableProduct"]["pageStructure"]["pages"]:
time.sleep(sleeptime)
if page["type"]=="main":
self.h=page["height"]
self.w=page["width"]
self.download(page["src"],False)
self.processing()
self.output("./"+self.list["readableProduct"]["title"]+"/")
if pdfConversion:
self.convertToPdf()
if self.list["readableProduct"]["nextReadableProductUri"]!=None and next==True:
self.auto_list_download(self.list["readableProduct"]["nextReadableProductUri"],True)
def json_download(self,url):
#Counterfeit User agent for absolutely successfully connection.
session=requests.session()
headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"}
json_data=session.get(url+".json",headers=headers).text
self.list=json.loads(json_data)
def json_localread(self, filepath):
with open(filepath) as json_file:
json_data=json.load(json_file)
self.list=json_data
def download(self,url,fakeque=False):
if fakeque:
print("Emulating Download : " + url)
self.img=url
else:
session=requests.session()
headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"}
self.img=requests.get(url)
def processing(self):
readImage=Image.open(BytesIO(self.img.content))
imageSize=readImage.size
width=imageSize[0]-24
height=imageSize[1]-16
buff=[]
counterX=0
counterY=0
for wx in range(4):
inbuff=[]
for lx in range(4):
cropped=readImage.crop(box=(width/4*counterX,height/4*counterY, width/4*(counterX+1),height/4*(counterY+1)))
inbuff.append(cropped)
counterY+=1
buff.append(inbuff)
counterX+=1
counterY=0
self.converted_img=Image.new("RGB",(int(width),int(height)))
counterX=0
counterY=0
for wdx in buff:
for ldx in wdx:
print(str(counterY))
self.converted_img.paste(ldx, (int(width/4*counterX) , int(height/4*counterY)))
counterX+=1
counterX=0
print("Current Y Counter:"+str(counterY))
counterY+=1
def output(self, file="./"):
self.converted_img.save(file+str(self.file)+".png")
self.file+=1
def convertToPdf(self):
directory="./"+self.list["readableProduct"]["title"]+"/"
sourceDir=os.listdir(directory)
imgcount=0
img=[]
filextend=sourceDir[0].split(".")
filextend=(str(".")+str(filextend[1]))
for images in sourceDir:
img.append(directory + str(imgcount) + filextend )
imgcount=imgcount+1
with open("./"+self.list["readableProduct"]["title"]+".pdf","wb") as f:
f.write(img2pdf.convert(img))
#A simple Json Dumper for debugging.
def dumpSimplifiedJson(self,jsObject):
f=open("JSON.json","w")
json.dump(jsObject, f, ensure_ascii=False, indent=4, sort_keys=True, separators=(',',': '))
| py/lib/jumpplus_downloader.py | 3,931 | Counterfeit User agent for absolutely successfully connection.A simple Json Dumper for debugging. | 97 | en | 0.904576 |
import os
import sys
import numpy as np
from PIL import Image
import torch
#TODO - add save function, these functions can be used to check movement
def crop_image(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
return image_array[y_min:y_max, x_min:x_max]
#Keep image size, set pixel value outside of bounding box as 0
def crop_pad_image(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
mask_array = np.zeros(image_array.shape, dtype=int)
mask_array[y_min:y_max, x_min:x_max] = 1
zero_array = np.where(mask_array==0)
image_array_copy[zero_array[0],zero_array[1]] = 0
return image_array_copy
def set_bb_to_black(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
mask_array = np.zeros(image_array.shape, dtype=int)
mask_array[y_min:y_max, x_min:x_max] = 1
zero_array = np.where(mask_array==1)
image_array_copy[zero_array[0],zero_array[1]] = 0
return image_array_copy
def transform_img_for_model(image_array, transforms=None):
image_array_copy = np.copy(image_array)
#image_array_copy.unsqueeze_(0)
image_array_copy = np.expand_dims(image_array_copy, axis=2)
if(transforms is None):
image_array_copy = torch.from_numpy(image_array_copy).repeat(3, 1, 1)
else:
image_array_copy = transforms(image_array_copy).repeat(3, 1, 1)
return image_array_copy
def save_image_from_tensor(image_array, path):
og = Image.fromarray(image_array.numpy())
og = og.convert('RGB')
og.save(path)
def resize_image(image_array, width, height):
og = Image.fromarray(image_array.numpy())
og = og.convert('RGB')
og = og.resize((width, height))
og = og.convert('L')
return np.array(og)
| pneumoRL/image_util.py | 2,160 | TODO - add save function, these functions can be used to check movement Keep image size, set pixel value outside of bounding box as 0image_array_copy.unsqueeze_(0) | 163 | en | 0.776656 |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from VectorTestSequence import VectorTestSequence
from base.ChoicesModifier import ChoicesModifier
## This test verifies that vector register operands with different layouts don't overlap.
class MainSequence(VectorTestSequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mInstrList = (
'VNSRA.WI##RISCV',
'VNSRA.WV##RISCV',
'VNSRA.WX##RISCV',
'VNSRL.WI##RISCV',
'VNSRL.WV##RISCV',
'VNSRL.WX##RISCV',
'VWADD.VV##RISCV',
'VWADD.VX##RISCV',
'VWADD.WV##RISCV',
'VWADD.WX##RISCV',
'VWADDU.VV##RISCV',
'VWADDU.VX##RISCV',
'VWADDU.WV##RISCV',
'VWADDU.WX##RISCV',
'VWMACC.VV##RISCV',
'VWMACC.VX##RISCV',
'VWMACCSU.VV##RISCV',
'VWMACCSU.VX##RISCV',
'VWMACCU.VV##RISCV',
'VWMACCU.VX##RISCV',
'VWMACCUS.VX##RISCV',
'VWMUL.VV##RISCV',
'VWMUL.VX##RISCV',
'VWMULSU.VV##RISCV',
'VWMULSU.VX##RISCV',
'VWMULU.VV##RISCV',
'VWMULU.VX##RISCV',
'VWSUB.VV##RISCV',
'VWSUB.VX##RISCV',
'VWSUB.WV##RISCV',
'VWSUB.WX##RISCV',
'VWSUBU.VV##RISCV',
'VWSUBU.VX##RISCV',
'VWSUBU.WV##RISCV',
'VWSUBU.WX##RISCV',
)
## Set up the environment prior to generating the test instructions.
def _setUpTest(self):
choices_mod = ChoicesModifier(self.genThread)
# TODO(Noah): Remove the restriction on SEW when a mechanism to skip instructions with
# illegal vector layouts is implemented. For now, ensure vector element width is set to no
# more than 32 bits.
choice_weights = {'0x0': 10, '0x1': 10, '0x2': 10, '0x3': 0, '0x4': 0, '0x5': 0, '0x6': 0, '0x7': 0}
choices_mod.modifyRegisterFieldValueChoices('vtype.VSEW', choice_weights)
# Ensure vector register group size is no more than 4, as larger values are not legal for
# widening and narrowing instructions
vlmul_choice_weights = {'0x0': 10, '0x1': 10, '0x2': 10, '0x3': 0, '0x4': 0, '0x5': 10, '0x6': 10, '0x7': 10}
choices_mod.modifyRegisterFieldValueChoices('vtype.VLMUL', vlmul_choice_weights)
choices_mod.commitSet()
## Return the maximum number of test instructions to generate.
def _getMaxInstructionCount(self):
return 1000
## Return a list of test instructions to randomly choose from.
def _getInstructionList(self):
return self._mInstrList
## Verify additional aspects of the instruction generation and execution.
#
# @param aInstr The name of the instruction.
# @param aInstrRecord A record of the generated instruction.
def _performAdditionalVerification(self, aInstr, aInstrRecord):
vd_val = aInstrRecord['Dests']['vd']
vs1_val = aInstrRecord['Srcs'].get('vs1')
vs2_val = aInstrRecord['Srcs']['vs2']
if aInstr.startswith('VW'):
if vs1_val and (vd_val == (vs1_val & 0x1F)):
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
if ('.W' not in aInstr) and (vd_val == (vs2_val & 0x1F)):
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
elif aInstr.startswith('VN'):
if (vd_val & 0x1F) == vs2_val:
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
else:
self.error('Unexpected instruction %s' % aInstr)
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| tests/riscv/vector/vector_wide_operand_conflict_force.py | 4,580 | Copyright (C) [2020] Futurewei Technologies, Inc. FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the License for the specific language governing permissions and limitations under the License. This test verifies that vector register operands with different layouts don't overlap. Set up the environment prior to generating the test instructions. TODO(Noah): Remove the restriction on SEW when a mechanism to skip instructions with illegal vector layouts is implemented. For now, ensure vector element width is set to no more than 32 bits. Ensure vector register group size is no more than 4, as larger values are not legal for widening and narrowing instructions Return the maximum number of test instructions to generate. Return a list of test instructions to randomly choose from. Verify additional aspects of the instruction generation and execution. @param aInstr The name of the instruction. @param aInstrRecord A record of the generated instruction. | 1,345 | en | 0.8459 |
import numpy as np
import matplotlib.pyplot as plt
def estimate(particles, weights):
"""returns mean and variance of the weighted particles"""
pos = particles
mean = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mean)**2, weights=weights, axis=0)
return mean, var
def simple_resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, np.random.rand(N))
# resample according to indexes
particles[:] = particles[indexes]
weights.fill(1.0 / N)
return particles,weights
x=0.1#初始真实状态
x_N=1#系统过程噪声的协方差(由于是一维的,这里就是方差)
x_R=1#测量的协方差
T=75#共进行75次
N=100#粒子数,越大效果越好,计算量也越大
V=2#初始分布的方差
x_P=x+np.random.randn(N)*np.sqrt(V)
#plt.hist(x_P,N, normed=True)
z_out=[x**2/20+np.random.randn(1)*np.sqrt(x_R)]#实际测量值
x_out=[x]#测量值的输出向量
x_est=x#估计值
x_est_out=[x_est]
#print(x_out)
for t in range(1,T):
x=0.5*x+25*x/(1+x**2)+8*np.cos(1.2*(t-1))+np.random.randn()*np.sqrt(x_N)
z=x**2/20+np.random.randn()*np.sqrt(x_R)
#更新粒子
x_P_update=0.5*x_P+25*x_P/(1+x_P**2)+8*np.cos(1.2*(t-1))+np.random.randn(N)*np.sqrt(x_N)
z_update=x_P_update**2/20+np.random.randn(N)*np.sqrt(x_R)
#print(z_update)
#计算权重
P_w=(1/np.sqrt(2*np.pi*x_R))*np.exp(-(z-z_update)**2/(2*x_R))
#估计
x_est,var=estimate(z_update,P_w)
#重采样
x_P,P_w=simple_resample(x_P,P_w)
#保存数据
x_out.append(x)
z_out.append(z)
x_est_out.append(x_est)
#print(x_out)
t=np.arange(0,T)
plt.plot(t,x_out,color='blue',label='true value')
plt.plot(t,x_est_out,color='red',label='estimate value')
plt.legend()
plt.show() | 002_Particle_Filter/Particle_Filter.py | 1,939 | returns mean and variance of the weighted particles
avoid round-off error resample according to indexes初始真实状态系统过程噪声的协方差(由于是一维的,这里就是方差)测量的协方差共进行75次粒子数,越大效果越好,计算量也越大初始分布的方差plt.hist(x_P,N, normed=True)实际测量值测量值的输出向量估计值print(x_out)更新粒子print(z_update)计算权重估计重采样保存数据print(x_out) | 272 | zh | 0.333185 |
#
# PySNMP MIB module ADIC-INTELLIGENT-STORAGE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADIC-INTELLIGENT-STORAGE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:13:36 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, NotificationType, iso, Counter64, ObjectIdentity, Counter32, Integer32, Unsigned32, enterprises, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, ModuleIdentity, MibIdentifier, Gauge32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "NotificationType", "iso", "Counter64", "ObjectIdentity", "Counter32", "Integer32", "Unsigned32", "enterprises", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "ModuleIdentity", "MibIdentifier", "Gauge32", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
adic = MibIdentifier((1, 3, 6, 1, 4, 1, 3764))
storage = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1))
intelligent = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1))
productAgentInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10))
globalData = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20))
components = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30))
software = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 100))
hardware = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200))
powerAndCooling = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200))
sml = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 300))
network = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 400))
notification = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500))
class Boolean(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("true", 1), ("false", 2))
class AdicMibVersion(DisplayString):
pass
class AdicREDIdentifier(Counter32):
pass
class AdicEnable(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enabled", 1), ("disabled", 2))
class AdicAgentStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("ok", 3), ("non-critical", 4), ("critical", 5), ("non-recoverable", 6))
class AdicOnlineStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("online", 1), ("offline", 2), ("shutdown", 3))
class AdicGlobalId(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class AdicComponentType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("mcb", 1), ("cmb", 2), ("ioBlade", 3), ("rcu", 4), ("networkChasis", 5), ("controlModule", 6), ("expansionModule", 7), ("powerSupply", 8))
class AdicInterfaceType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("scsi", 1), ("fibreChannel", 2))
class AdicSensorStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("nominal", 1), ("warningLow", 2), ("warningHigh", 3), ("alarmLow", 4), ("alarmHigh", 5), ("notInstalled", 6), ("noData", 7))
class AdicVoltageType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("dc", 1), ("ac", 2))
class AdicDateAndTime(OctetString):
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(8, 8), ValueSizeConstraint(11, 11), )
class AdicTrapSeverity(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("emergency", 1), ("alarm", 2), ("warning", 3), ("notice", 4), ("informational", 5))
class AdicDoorStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("open", 1), ("closed", 2), ("closedAndLocked", 3), ("closedAndUnlocked", 4), ("contollerFailed", 5), ("notInstalled", 6), ("noData", 7))
class AdicDriveStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("idle", 1), ("loading", 2), ("ejecting", 3), ("inserted", 4), ("removed", 5), ("notInstalled", 6), ("noData", 7))
class RowStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("active", 1), ("notInService", 2), ("notReady", 3), ("createAndGo", 4), ("createAndWait", 5), ("destroy", 6))
productMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 1), AdicMibVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productMibVersion.setDescription('MIB version identifier.')
productSnmpAgentVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productSnmpAgentVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productSnmpAgentVersion.setDescription('SNMP agent version identifier.')
productName = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productName.setStatus('mandatory')
if mibBuilder.loadTexts: productName.setDescription('Name of ADIC branded product. Uniquely identifies the product, independent of OEM.')
productDisplayName = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDisplayName.setStatus('mandatory')
if mibBuilder.loadTexts: productDisplayName.setDescription('Name of this agent for display purposes. May be customized for OEM.')
productDescription = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDescription.setStatus('mandatory')
if mibBuilder.loadTexts: productDescription.setDescription('A short description of this SNMP agent.')
productVendor = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productVendor.setStatus('mandatory')
if mibBuilder.loadTexts: productVendor.setDescription('Name of the product vendor or OEM.')
productVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productVersion.setDescription('String Format: MNNO.TVBBBPP Examples 1. 091a.TR054 Version 0.91, build 54 of the RCS test code for ADIC 2. 100A.GM052 Version 1.00, build 52 of the MCB GA candidate code for ADIC M Major version number NN Minor version number O OEM (Uppercase when release candidate, otherwise lowercase) A/a - ADIC Others - Reserved) T Target G - GA Candidate Release (labeled build that is a release candidate) T - Test build (labeled build used for formal testing) D - Dev build (labeled build used for unit testing) (lower case) - specifies developer of a local build V Variant S - System R - RCS M - MCB BBB Build number (3 digit sequential number specifying exact build) PP Patch Number (Optional alphanumeric characters denoting patch level of this build if necessary)')
productDisplayVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDisplayVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productDisplayVersion.setDescription('The version identifier according to the vendor or OEM.')
productLibraryClass = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 10))).clone(namedValues=NamedValues(("basic", 1), ("intelligent", 2), ("virtual", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: productLibraryClass.setStatus('mandatory')
if mibBuilder.loadTexts: productLibraryClass.setDescription('Basic library includes minimal connectivity hardware. Intelligent library includes SAN appliances and value-added features.')
productSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productSerialNumber.setStatus('mandatory')
if mibBuilder.loadTexts: productSerialNumber.setDescription('The serial number of the entire library.')
agentGlobalStatus = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 1), AdicAgentStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentGlobalStatus.setStatus('mandatory')
if mibBuilder.loadTexts: agentGlobalStatus.setDescription('Current overall status of the agent.')
agentLastGlobalStatus = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 2), AdicAgentStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentLastGlobalStatus.setStatus('mandatory')
if mibBuilder.loadTexts: agentLastGlobalStatus.setDescription('The status before the current status which induced an initiative to issue a global status change trap.')
agentTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: agentTimeStamp.setDescription('The last time that the agent values have been updated. Universal time in seconds since UTC 1/1/70.')
agentGetTimeOut = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentGetTimeOut.setStatus('mandatory')
if mibBuilder.loadTexts: agentGetTimeOut.setDescription('Suggested time out in milliseconds for how long an SNMP management application should wait while attempting to poll the SNMP agent.')
agentModifiers = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentModifiers.setStatus('mandatory')
if mibBuilder.loadTexts: agentModifiers.setDescription('Agent functional modifiers, when set the modifier is active. ----------------------------------------------------- Bit 3 => Agent in debug mode. ----------------------------------------------------- All other bits are product specific.')
agentRefreshRate = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentRefreshRate.setStatus('mandatory')
if mibBuilder.loadTexts: agentRefreshRate.setDescription('Rate in seconds at which the agent cached data is being updated.')
componentTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10), )
if mibBuilder.loadTexts: componentTable.setStatus('mandatory')
if mibBuilder.loadTexts: componentTable.setDescription("General information about the system's components, including the unique identifiers. The structure this table is based on the Fibre Alliance MIB connUnitEntry.")
componentEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"))
if mibBuilder.loadTexts: componentEntry.setStatus('mandatory')
if mibBuilder.loadTexts: componentEntry.setDescription('A component entry containing objects for a particular component.')
componentId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 1), AdicGlobalId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentId.setStatus('mandatory')
if mibBuilder.loadTexts: componentId.setDescription('The unique identification for this component among those within this proxy domain.')
componentType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 2), AdicComponentType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentType.setStatus('mandatory')
if mibBuilder.loadTexts: componentType.setDescription('The type of this component.')
componentDisplayName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentDisplayName.setStatus('mandatory')
if mibBuilder.loadTexts: componentDisplayName.setDescription('Name of this component for display purposes. Different OEMs may have different display names for the same ADIC product.')
componentInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: componentInfo.setStatus('mandatory')
if mibBuilder.loadTexts: componentInfo.setDescription('A display string containing information about this component.')
componentLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentLocation.setStatus('mandatory')
if mibBuilder.loadTexts: componentLocation.setDescription('Location information for this component.')
componentVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentVendor.setStatus('mandatory')
if mibBuilder.loadTexts: componentVendor.setDescription('Name vendor of this component.')
componentSn = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentSn.setStatus('mandatory')
if mibBuilder.loadTexts: componentSn.setDescription('The serial number for this component.')
componentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("unused", 2), ("ok", 3), ("warning", 4), ("failed", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: componentStatus.setDescription('Overall status of the component.')
componentControl = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("resetColdStart", 1), ("resetWarmStart", 2), ("offline", 3), ("online", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: componentControl.setStatus('mandatory')
if mibBuilder.loadTexts: componentControl.setDescription("This object is used to control the addressed connUnit. NOTE: 'Cold Start' and 'Warm Start' are as defined in MIB II and are not meant to be a factory reset. resetColdStart: the addressed unit performs a 'Cold Start' reset. resetWarmStart: the addressed unit performs a 'Warm Start' reset. offline: the addressed unit puts itself into an implementation dependant 'offline' state. online: the addressed unit puts itself into an implementation dependant 'online' state.")
componentREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentREDId.setStatus('mandatory')
if mibBuilder.loadTexts: componentREDId.setDescription('Runtime Error Detection identifier for this power supply.')
componentFirmwareVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentFirmwareVersion.setStatus('mandatory')
if mibBuilder.loadTexts: componentFirmwareVersion.setDescription('Firmware version (or level) for this component.')
componentGeoAddrAisle = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrAisle.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrAisle.setDescription('The aisle number where this component is located. A negative value indicates that an aisle number is not applicable to this component.')
componentGeoAddrFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrFrame.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrFrame.setDescription('The frame number where this component is located. A negative value indicates that a frame number is not applicable to this component.')
componentGeoAddrRack = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrRack.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrRack.setDescription('The rack number where this component is located. A negative value indicates that a rack number is not applicable to this component.')
componentGeoAddrChassis = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrChassis.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrChassis.setDescription('The chassis number where this component is located. A negative value indicates that a chassis number is not applicable to this component.')
componentGeoAddrBlade = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrBlade.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrBlade.setDescription('The blade number within the network chasis where this component is located. A negative value indicates that a blade number is not applicable to this component.')
componentIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 17), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: componentIpAddress.setDescription('IP address of this component. If the component has no IP address, this object returns 0.0.0.0. The address may refer to an internal network not accessible to an external management application.')
powerSupplyTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10), )
if mibBuilder.loadTexts: powerSupplyTable.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyTable.setDescription('** This table is optional ** Table of the power supplies.')
powerSupplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "powerSupplyIndex"))
if mibBuilder.loadTexts: powerSupplyEntry.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific power supply.')
powerSupplyIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyIndex.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyIndex.setDescription('** This object is optional ** Index of this power supply within the component specified by componentId.')
powerSupplyName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyName.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyName.setDescription('** This object is optional ** Display name of this power supply.')
powerSupplyWattage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyWattage.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyWattage.setDescription('** This object is optional ** What is maximum power output of this power supply. Units are Watts.')
powerSupplyType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 4), AdicVoltageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyType.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyType.setDescription('** This object is optional ** DC or AC power supply?')
powerSupplyREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 5), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyREDId.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this power supply.')
powerSupplyRatedVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyRatedVoltage.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyRatedVoltage.setDescription('** This object is optional ** Rated output voltage in millivolts of this power supply.')
powerSupplyLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyLocation.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyLocation.setDescription('** This object is optional ** Physical location of this power supply.')
voltageSensorTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20), )
if mibBuilder.loadTexts: voltageSensorTable.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorTable.setDescription('** This table is optional ** Table of the voltage sensors.')
voltageSensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "powerSupplyIndex"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "voltageSensorIndex"))
if mibBuilder.loadTexts: voltageSensorEntry.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific voltage sensor.')
voltageSensorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorIndex.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorIndex.setDescription('** This object is optional ** Index of this voltage sensor within the component specified by componentId.')
voltageSensorName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorName.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorName.setDescription('** This object is optional ** Display name of this voltage sensor.')
voltageSensorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorStatus.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorStatus.setDescription('** This object is optional ** What is the state of this voltage sensor? Is the voltage in the nominal, warning or alarm region?')
voltageSensorMillivolts = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorMillivolts.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorMillivolts.setDescription('** This object is optional ** What is the voltage in millivolts of this voltage sensor?')
voltageSensorType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 5), AdicVoltageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorType.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorType.setDescription('** This object is optional ** DC or AC voltage sensor?')
voltageSensorNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorNominalLo.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorNominalLo.setDescription('** This object is optional ** Lower voltage limit of the nominal state for this voltage sensor. Unit are millivolts.')
voltageSensorNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorNominalHi.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorNominalHi.setDescription('** This object is optional ** Upper voltage limit of the nominal state for this voltage sensor. Unit are millivolts.')
voltageSensorWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorWarningLo.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorWarningLo.setDescription('** This object is optional ** Lower voltage limit of the warning state for this voltage sensor. Unit are millivolts. If the voltage falls below this limit, the sensor enters the alarm state.')
voltageSensorWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorWarningHi.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorWarningHi.setDescription('** This object is optional ** Upper voltage limit of the warning state for this voltage sensor. Unit are millivolts. If the voltage rises above this limit, the sensor enters the alarm state.')
voltageSensorLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorLocation.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorLocation.setDescription('** This object is optional ** Physical location of the voltage sensor.')
voltageSensorREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 11), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorREDId.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this voltage sensor.')
temperatureSensorTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30), )
if mibBuilder.loadTexts: temperatureSensorTable.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorTable.setDescription('** This table is optional ** Table of the temperature sensors in each component.')
temperatureSensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "temperatureSensorIndex"))
if mibBuilder.loadTexts: temperatureSensorEntry.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific sensor.')
temperatureSensorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorIndex.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorIndex.setDescription('** This object is optional ** Index of this temperatureSensor within the component specified by componentId.')
temperatureSensorName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorName.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorName.setDescription('** This object is optional ** Display name of this temperatureSensor.')
temperatureSensorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorStatus.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorStatus.setDescription('** This object is optional ** What is the state of this temperatureSensor? Is the temperature in the nominal, warning or alarm region?')
temperatureSensorDegreesCelsius = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorDegreesCelsius.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorDegreesCelsius.setDescription('** This object is optional ** The temperature in degrees Celsuis for this temperature sensor.')
temperatureSensorNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorNominalLo.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorNominalLo.setDescription('** This object is optional ** Lower temperature limit of the nominal state for this temperature sensor. Unit are degrees Celsius.')
temperatureSensorNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorNominalHi.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorNominalHi.setDescription('** This object is optional ** Upper temperature limit of the nominal state for this temperature sensor. Unit are degrees Celsius.')
temperatureSensorWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorWarningLo.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorWarningLo.setDescription('** This object is optional ** Lower temperature limit of the warning state for this temperature sensor. Unit are degrees Celsius. If the temperature falls below this limit, the sensor enters the alarm state.')
temperatureSensorWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorWarningHi.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorWarningHi.setDescription('** This object is optional ** Upper temperature limit of the warning state for this temperature sensor. Unit are degrees Celsius. If the temperature rises above this limit, the sensor enters the alarm state.')
temperatureSensorLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorLocation.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorLocation.setDescription('** This object is optional ** Physical location of this temperature sensor.')
temperatureSensorREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorREDId.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this temperature sensor.')
coolingFanTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40), )
if mibBuilder.loadTexts: coolingFanTable.setStatus('optional')
if mibBuilder.loadTexts: coolingFanTable.setDescription('** This table is optional ** Table of cooling fans in the library.')
coolingFanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "coolingFanIndex"))
if mibBuilder.loadTexts: coolingFanEntry.setStatus('optional')
if mibBuilder.loadTexts: coolingFanEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific cooling fan.')
coolingFanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanIndex.setStatus('optional')
if mibBuilder.loadTexts: coolingFanIndex.setDescription('** This object is optional ** Index of this cooling fan within the component specified by componentId.')
coolingFanName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanName.setStatus('optional')
if mibBuilder.loadTexts: coolingFanName.setDescription('** This object is optional ** Display name of this coolingFan.')
coolingFanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanStatus.setStatus('optional')
if mibBuilder.loadTexts: coolingFanStatus.setDescription('** This object is optional ** Is the fan speed in the nominal, warning or alarm region?')
coolingFanRPM = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanRPM.setStatus('optional')
if mibBuilder.loadTexts: coolingFanRPM.setDescription('** This object is optional ** The fan speed in revolutions per minute.')
coolingFanNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanNominalLo.setStatus('optional')
if mibBuilder.loadTexts: coolingFanNominalLo.setDescription('** This object is optional ** Lower fan speed limit of the nominal state for this fan. Units are RPM.')
coolingFanNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanNominalHi.setStatus('optional')
if mibBuilder.loadTexts: coolingFanNominalHi.setDescription('** This object is optional ** Upper fan speed limit of the nominal state for this fan. Units are RPM.')
coolingFanWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanWarningLo.setStatus('optional')
if mibBuilder.loadTexts: coolingFanWarningLo.setDescription('** This object is optional ** Lower fan speed limit of the warning state for this fan. Units are RPM. If the speed falls below this limit, the fan enters the alarmLow state.')
coolingFanWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanWarningHi.setStatus('optional')
if mibBuilder.loadTexts: coolingFanWarningHi.setDescription('** This object is optional ** Upper fan speed limit of the warning state for this fan. Units are RPM. If the speed rises above this limit, the fan enters the alarmHigh state.')
coolingFanLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanLocation.setStatus('optional')
if mibBuilder.loadTexts: coolingFanLocation.setDescription('** This object is optional ** Physical location of this fan.')
coolingFanREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanREDId.setStatus('optional')
if mibBuilder.loadTexts: coolingFanREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this fan.')
trapPayloadTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10), )
if mibBuilder.loadTexts: trapPayloadTable.setStatus('mandatory')
if mibBuilder.loadTexts: trapPayloadTable.setDescription('Defines objects common to all trap payloads.')
trapPayloadEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "trapSequenceNumber"))
if mibBuilder.loadTexts: trapPayloadEntry.setStatus('mandatory')
if mibBuilder.loadTexts: trapPayloadEntry.setDescription('Each entry contains the information for a specific cooling fan.')
trapSequenceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSequenceNumber.setStatus('mandatory')
if mibBuilder.loadTexts: trapSequenceNumber.setDescription('')
trapSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: trapSeverity.setDescription('')
trapSummaryText = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSummaryText.setStatus('mandatory')
if mibBuilder.loadTexts: trapSummaryText.setDescription('')
trapIntendedUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("public", 1), ("triggerRefresh", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapIntendedUsage.setStatus('mandatory')
if mibBuilder.loadTexts: trapIntendedUsage.setDescription("The value of this qualifier aids the management application in determining how to respond to the trap. If the value is public(1), the information is intended to be propagated to external observers, such as sending email. If the value is triggerRefresh(2), the information is intended to update the management application's data model, but not necessarily propagated to external observers.")
startupSequenceComplete = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,500)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "trapSummaryText"))
if mibBuilder.loadTexts: startupSequenceComplete.setDescription('The component indicated by the value of componentId has successfully completed its startup sequence.')
shutdownSequenceInitiated = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,501)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "trapSummaryText"))
if mibBuilder.loadTexts: shutdownSequenceInitiated.setDescription('The component indicated by the value of componentId has initiated its shutdown sequence.')
componentAdded = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,502)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "componentType"))
if mibBuilder.loadTexts: componentAdded.setDescription('The component indicated by the value of componentId has been added to the library.')
componentRemoved = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,503)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "componentType"))
if mibBuilder.loadTexts: componentRemoved.setDescription('The component indicated by the value of componentId has been removed from the library.')
productLibraryClassChange = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,504)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "productLibraryClass"), ("ADIC-INTELLIGENT-STORAGE-MIB", "productLibraryClass"))
if mibBuilder.loadTexts: productLibraryClassChange.setDescription('The product library class has changed. This occurs when connectivity hardware is added or removed. The payload contains the productLibraryClass before and after the change.')
mibBuilder.exportSymbols("ADIC-INTELLIGENT-STORAGE-MIB", powerSupplyTable=powerSupplyTable, powerSupplyEntry=powerSupplyEntry, sml=sml, powerSupplyREDId=powerSupplyREDId, temperatureSensorEntry=temperatureSensorEntry, componentLocation=componentLocation, voltageSensorNominalLo=voltageSensorNominalLo, temperatureSensorWarningHi=temperatureSensorWarningHi, intelligent=intelligent, RowStatus=RowStatus, AdicVoltageType=AdicVoltageType, software=software, agentModifiers=agentModifiers, shutdownSequenceInitiated=shutdownSequenceInitiated, coolingFanName=coolingFanName, voltageSensorTable=voltageSensorTable, trapSequenceNumber=trapSequenceNumber, trapIntendedUsage=trapIntendedUsage, componentIpAddress=componentIpAddress, globalData=globalData, temperatureSensorNominalHi=temperatureSensorNominalHi, productName=productName, powerSupplyRatedVoltage=powerSupplyRatedVoltage, AdicAgentStatus=AdicAgentStatus, voltageSensorWarningLo=voltageSensorWarningLo, agentGetTimeOut=agentGetTimeOut, coolingFanLocation=coolingFanLocation, AdicGlobalId=AdicGlobalId, voltageSensorStatus=voltageSensorStatus, AdicMibVersion=AdicMibVersion, powerSupplyLocation=powerSupplyLocation, productLibraryClassChange=productLibraryClassChange, AdicTrapSeverity=AdicTrapSeverity, storage=storage, componentEntry=componentEntry, coolingFanIndex=coolingFanIndex, temperatureSensorDegreesCelsius=temperatureSensorDegreesCelsius, voltageSensorLocation=voltageSensorLocation, agentRefreshRate=agentRefreshRate, coolingFanNominalHi=coolingFanNominalHi, AdicInterfaceType=AdicInterfaceType, componentId=componentId, temperatureSensorIndex=temperatureSensorIndex, coolingFanStatus=coolingFanStatus, AdicDriveStatus=AdicDriveStatus, coolingFanREDId=coolingFanREDId, trapPayloadEntry=trapPayloadEntry, agentTimeStamp=agentTimeStamp, componentREDId=componentREDId, powerAndCooling=powerAndCooling, voltageSensorEntry=voltageSensorEntry, coolingFanWarningHi=coolingFanWarningHi, AdicDateAndTime=AdicDateAndTime, componentGeoAddrBlade=componentGeoAddrBlade, notification=notification, productDisplayVersion=productDisplayVersion, componentControl=componentControl, AdicDoorStatus=AdicDoorStatus, componentGeoAddrChassis=componentGeoAddrChassis, productSnmpAgentVersion=productSnmpAgentVersion, components=components, agentLastGlobalStatus=agentLastGlobalStatus, temperatureSensorNominalLo=temperatureSensorNominalLo, voltageSensorType=voltageSensorType, componentGeoAddrAisle=componentGeoAddrAisle, network=network, componentDisplayName=componentDisplayName, temperatureSensorTable=temperatureSensorTable, powerSupplyType=powerSupplyType, temperatureSensorStatus=temperatureSensorStatus, AdicREDIdentifier=AdicREDIdentifier, voltageSensorIndex=voltageSensorIndex, componentTable=componentTable, componentStatus=componentStatus, powerSupplyIndex=powerSupplyIndex, AdicSensorStatus=AdicSensorStatus, agentGlobalStatus=agentGlobalStatus, componentVendor=componentVendor, AdicComponentType=AdicComponentType, componentFirmwareVersion=componentFirmwareVersion, coolingFanNominalLo=coolingFanNominalLo, coolingFanTable=coolingFanTable, temperatureSensorREDId=temperatureSensorREDId, coolingFanWarningLo=coolingFanWarningLo, powerSupplyName=powerSupplyName, hardware=hardware, voltageSensorName=voltageSensorName, productAgentInfo=productAgentInfo, Boolean=Boolean, voltageSensorNominalHi=voltageSensorNominalHi, temperatureSensorName=temperatureSensorName, componentSn=componentSn, powerSupplyWattage=powerSupplyWattage, voltageSensorMillivolts=voltageSensorMillivolts, voltageSensorWarningHi=voltageSensorWarningHi, startupSequenceComplete=startupSequenceComplete, productDisplayName=productDisplayName, productLibraryClass=productLibraryClass, componentGeoAddrRack=componentGeoAddrRack, productSerialNumber=productSerialNumber, adic=adic, coolingFanEntry=coolingFanEntry, AdicEnable=AdicEnable, temperatureSensorWarningLo=temperatureSensorWarningLo, componentType=componentType, componentAdded=componentAdded, productVendor=productVendor, componentRemoved=componentRemoved, productVersion=productVersion, voltageSensorREDId=voltageSensorREDId, productMibVersion=productMibVersion, componentGeoAddrFrame=componentGeoAddrFrame, temperatureSensorLocation=temperatureSensorLocation, trapPayloadTable=trapPayloadTable, trapSummaryText=trapSummaryText, AdicOnlineStatus=AdicOnlineStatus, trapSeverity=trapSeverity, componentInfo=componentInfo, coolingFanRPM=coolingFanRPM, productDescription=productDescription)
| pysnmp-with-texts/ADIC-INTELLIGENT-STORAGE-MIB.py | 44,179 | PySNMP MIB module ADIC-INTELLIGENT-STORAGE-MIB (http://snmplabs.com/pysmi) ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADIC-INTELLIGENT-STORAGE-MIB Produced by pysmi-0.3.4 at Wed May 1 11:13:36 2019 On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) | 350 | en | 0.435675 |
"""Tests to ensure that the html.parser tree builder generates good
trees."""
from pdb import set_trace
import pickle
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest
from bs4.builder import HTMLParserTreeBuilder
from bs4.builder._htmlparser import BeautifulSoupHTMLParser
class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
default_builder = HTMLParserTreeBuilder
def test_namespaced_system_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_namespaced_public_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_builder_is_pickled(self):
"""Unlike most tree builders, HTMLParserTreeBuilder and will
be restored after pickling.
"""
tree = self.soup("<a><b>foo</a>")
dumped = pickle.dumps(tree, 2)
loaded = pickle.loads(dumped)
self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
def test_redundant_empty_element_closing_tags(self):
self.assertSoupEquals('<br></br><br></br><br></br>', "<br/><br/><br/>")
self.assertSoupEquals('</br></br></br>', "")
def test_empty_element(self):
# This verifies that any buffered data present when the parser
# finishes working is handled.
self.assertSoupEquals("foo &# bar", "foo &# bar")
class TestHTMLParserSubclass(SoupTest):
def test_error(self):
"""Verify that our HTMLParser subclass implements error() in a way
that doesn't cause a crash.
"""
parser = BeautifulSoupHTMLParser()
parser.error("don't crash")
| virtual/lib/python3.6/site-packages/bs4/tests/test_htmlparser.py | 1,688 | Unlike most tree builders, HTMLParserTreeBuilder and will
be restored after pickling.
Verify that our HTMLParser subclass implements error() in a way
that doesn't cause a crash.
Tests to ensure that the html.parser tree builder generates good
trees.
html.parser can't handle namespaced doctypes, so skip this one. html.parser can't handle namespaced doctypes, so skip this one. This verifies that any buffered data present when the parser finishes working is handled. | 469 | en | 0.867614 |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
import settings.base
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'profiles.views.index', name='index'),
url(r'^accounts/', include('allauth.urls')),
# Examples:
# url(r'^$', 'explorind_project.views.home', name='home'),
# url(r'^explorind_project/', include('explorind_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^locations/', include('locations.urls')),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.base.STATIC_ROOT,
}),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.base.MEDIA_ROOT}),
url(r'^login$', 'profiles.views.login_view'), # login
url(r'^logout$', 'profiles.views.logout_view'), # logout
url(r'^signup$', 'profiles.views.signup'), # signup
url(r'^submit$', 'profiles.views.submit'),
url(r'^reviews$', 'profiles.views.public'),
url(r'^users/$', 'profiles.views.users'),
url(r'^users/(?P<username>.{0,30})/$', 'profiles.views.users'),
url(r'^follow$', 'profiles.views.follow'),
)
| explorind_project/explorind_project/urls.py | 1,451 | Uncomment the next two lines to enable the admin: Examples: url(r'^$', 'explorind_project.views.home', name='home'), url(r'^explorind_project/', include('explorind_project.foo.urls')), Uncomment the admin/doc line below to enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), Uncomment the next line to enable the admin: login logout signup | 378 | en | 0.457299 |
"""GoodWe PV inverter numeric settings entities."""
from __future__ import annotations
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
import logging
from goodwe import Inverter, InverterError
from homeassistant.components.number import NumberEntity, NumberEntityDescription
from homeassistant.const import ENTITY_CATEGORY_CONFIG, PERCENTAGE, POWER_WATT
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN, KEY_DEVICE_INFO, KEY_INVERTER
_LOGGER = logging.getLogger(__name__)
@dataclass
class GoodweNumberEntityDescriptionBase:
"""Required values when describing Goodwe number entities."""
getter: Callable[[Inverter], Awaitable[int]]
setter: Callable[[Inverter, int], Awaitable[None]]
@dataclass
class GoodweNumberEntityDescription(
NumberEntityDescription, GoodweNumberEntityDescriptionBase
):
"""Class describing Goodwe number entities."""
NUMBERS = (
GoodweNumberEntityDescription(
key="grid_export_limit",
name="Grid export limit",
icon="mdi:transmission-tower",
entity_category=ENTITY_CATEGORY_CONFIG,
unit_of_measurement=POWER_WATT,
getter=lambda inv: inv.get_grid_export_limit(),
setter=lambda inv, val: inv.set_grid_export_limit(val),
step=100,
min_value=0,
max_value=10000,
),
GoodweNumberEntityDescription(
key="battery_discharge_depth",
name="Depth of discharge (on-grid)",
icon="mdi:battery-arrow-down",
entity_category=ENTITY_CATEGORY_CONFIG,
unit_of_measurement=PERCENTAGE,
getter=lambda inv: inv.get_ongrid_battery_dod(),
setter=lambda inv, val: inv.set_ongrid_battery_dod(val),
step=1,
min_value=0,
max_value=99,
),
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the inverter select entities from a config entry."""
inverter = hass.data[DOMAIN][config_entry.entry_id][KEY_INVERTER]
device_info = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE_INFO]
entities = []
for description in NUMBERS:
try:
current_value = await description.getter(inverter)
except InverterError:
# Inverter model does not support this setting
_LOGGER.debug("Could not read inverter setting %s", description.key)
continue
entities.append(
InverterNumberEntity(device_info, description, inverter, current_value),
)
async_add_entities(entities)
class InverterNumberEntity(NumberEntity):
"""Inverter numeric setting entity."""
_attr_should_poll = False
entity_description: GoodweNumberEntityDescription
def __init__(
self,
device_info: DeviceInfo,
description: GoodweNumberEntityDescription,
inverter: Inverter,
current_value: int,
) -> None:
"""Initialize the number inverter setting entity."""
self.entity_description = description
self._attr_unique_id = f"{DOMAIN}-{description.key}-{inverter.serial_number}"
self._attr_device_info = device_info
self._attr_value = float(current_value)
self._inverter: Inverter = inverter
async def async_set_value(self, value: float) -> None:
"""Set new value."""
if self.entity_description.setter:
await self.entity_description.setter(self._inverter, int(value))
self._attr_value = value
self.async_write_ha_state()
| homeassistant/components/goodwe/number.py | 3,529 | Class describing Goodwe number entities.
Required values when describing Goodwe number entities.
Inverter numeric setting entity.
Initialize the number inverter setting entity.
GoodWe PV inverter numeric settings entities.
Inverter model does not support this setting | 269 | en | 0.700365 |
# -*- coding: utf-8 -*-
"""Configuration file for sniffer."""
# pylint: disable=superfluous-parens,bad-continuation
import time
import subprocess
from sniffer.api import select_runnable, file_validator, runnable
try:
from pync import Notifier
except ImportError:
notify = None
else:
notify = Notifier.notify
watch_paths = ["flask_api"]
class Options(object):
group = int(time.time()) # unique per run
show_coverage = False
rerun_args = None
targets = [
(('make', 'test'), "Run Tests", True),
(('make', 'check'), "Static Analysis", True),
(('make', 'doc'), None, True),
]
@select_runnable('run_targets')
@file_validator
def python_files(filename):
return filename.endswith('.py')
@select_runnable('run_targets')
@file_validator
def html_files(filename):
return filename.split('.')[-1] in ['html', 'css', 'js']
@runnable
def run_targets(*args):
"""Run targets for Python."""
Options.show_coverage = 'coverage' in args
count = 0
for count, (command, title, retry) in enumerate(Options.targets, start=1):
success = call(command, title, retry)
if not success:
message = "✅ " * (count - 1) + "❌"
show_notification(message, title)
return False
message = "✅ " * count
title = "All Targets"
show_notification(message, title)
show_coverage()
return True
def call(command, title, retry):
"""Run a command-line program and display the result."""
if Options.rerun_args:
command, title, retry = Options.rerun_args
Options.rerun_args = None
success = call(command, title, retry)
if not success:
return False
print("")
print("$ %s" % ' '.join(command))
failure = subprocess.call(command)
if failure and retry:
Options.rerun_args = command, title, retry
return not failure
def show_notification(message, title):
"""Show a user notification."""
if notify and title:
notify(message, title=title, group=Options.group)
def show_coverage():
"""Launch the coverage report."""
if Options.show_coverage:
subprocess.call(['make', 'read-coverage'])
Options.show_coverage = False
| scent.py | 2,255 | Run a command-line program and display the result.
Run targets for Python.
Launch the coverage report.
Show a user notification.
Configuration file for sniffer.
-*- coding: utf-8 -*- pylint: disable=superfluous-parens,bad-continuation unique per run | 251 | en | 0.730372 |
"""Provide useful functions for using PTLFlow."""
# =============================================================================
# Copyright 2021 Henrique Morimitsu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
__version__ = '0.2.5'
import logging
from argparse import Namespace
from pathlib import Path
from typing import List, Optional
import requests
import torch
from torch import hub
from ptlflow.models.base_model.base_model import BaseModel
from ptlflow.models.dicl.dicl import DICL
from ptlflow.models.fastflownet.fastflownet import FastFlowNet
from ptlflow.models.flownet.flownet2 import FlowNet2
from ptlflow.models.flownet.flownetc import FlowNetC
from ptlflow.models.flownet.flownetcs import FlowNetCS
from ptlflow.models.flownet.flownetcss import FlowNetCSS
from ptlflow.models.flownet.flownets import FlowNetS
from ptlflow.models.flownet.flownetsd import FlowNetSD
from ptlflow.models.gma.gma import GMA
from ptlflow.models.hd3.hd3 import HD3, HD3Context
from ptlflow.models.irr.pwcnet import IRRPWCNet
from ptlflow.models.irr.pwcnet_irr import IRRPWCNetIRR
from ptlflow.models.irr.irr_pwc import IRRPWC
from ptlflow.models.lcv.lcv_raft import LCV_RAFT, LCV_RAFTSmall
from ptlflow.models.liteflownet.liteflownet import LiteFlowNet
from ptlflow.models.liteflownet.liteflownet3 import (
LiteFlowNet3, LiteFlowNet3PseudoReg, LiteFlowNet3S, LiteFlowNet3SPseudoReg)
from ptlflow.models.liteflownet.liteflownet2 import LiteFlowNet2, LiteFlowNet2PseudoReg
from ptlflow.models.maskflownet.maskflownet import MaskFlownet, MaskFlownet_S
from ptlflow.models.pwcnet.pwcnet import PWCNet, PWCDCNet
from ptlflow.models.raft.raft import RAFT, RAFTSmall
from ptlflow.models.scopeflow.irr_pwc_v2 import ScopeFlow
from ptlflow.models.starflow.starflow import StarFlow
from ptlflow.models.vcn.vcn import VCN, VCNSmall
from ptlflow.utils.utils import config_logging
try:
from ptlflow.models.scv.scv import SCVEighth, SCVQuarter
except ImportError as e:
print(e)
SCVEighth = None
SCVQuarter = None
config_logging()
models_dict = {
'dicl': DICL,
'fastflownet': FastFlowNet,
'flownet2': FlowNet2,
'flownetc': FlowNetC,
'flownetcs': FlowNetCS,
'flownetcss': FlowNetCSS,
'flownets': FlowNetS,
'flownetsd': FlowNetSD,
'gma': GMA,
'hd3': HD3,
'hd3_ctxt': HD3Context,
'irr_pwc': IRRPWC,
'irr_pwcnet': IRRPWCNet,
'irr_pwcnet_irr': IRRPWCNetIRR,
'lcv_raft': LCV_RAFT,
'lcv_raft_small': LCV_RAFTSmall,
'liteflownet': LiteFlowNet,
'liteflownet2': LiteFlowNet2,
'liteflownet2_pseudoreg': LiteFlowNet2PseudoReg,
'liteflownet3': LiteFlowNet3,
'liteflownet3_pseudoreg': LiteFlowNet3PseudoReg,
'liteflownet3s': LiteFlowNet3S,
'liteflownet3s_pseudoreg': LiteFlowNet3SPseudoReg,
'maskflownet': MaskFlownet,
'maskflownet_s': MaskFlownet_S,
'pwcnet': PWCNet,
'pwcdcnet': PWCDCNet,
'raft': RAFT,
'raft_small': RAFTSmall,
'scopeflow': ScopeFlow,
'scv4': SCVQuarter,
'scv8': SCVEighth,
'starflow': StarFlow,
'vcn': VCN,
'vcn_small': VCNSmall,
}
def download_scripts(
destination_dir: Path = Path('ptlflow_scripts')
) -> None:
"""Download the main scripts and configs to start working with PTLFlow."""
github_url = 'https://raw.githubusercontent.com/hmorimitsu/ptlflow/main/'
script_names = [
'datasets.yml',
'infer.py',
'test.py',
'train.py',
'validate.py'
]
destination_dir.mkdir(parents=True, exist_ok=True)
for sname in script_names:
script_url = github_url + sname
data = requests.get(script_url)
if data.status_code == 200:
with open(destination_dir / sname, 'wb') as f:
f.write(data.content)
else:
logging.warning('Script %s was not found.', script_url)
logging.info('Downloaded scripts to %s.', str(destination_dir))
def get_model(
model_name: str,
pretrained_ckpt: Optional[str] = None,
args: Optional[Namespace] = None
) -> BaseModel:
"""Return an instance of a chosen model.
The instance can have configured by he arguments, and load some existing pretrained weights.
Note that this is different from get_model_reference(), which returns a reference to the model class. The instance,
returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to
"return get_model_reference()()", which looks confusing. This can be rewritten as
"model_ref = get_model_reference(); return model_ref()".
Parameters
----------
model_name : str
Name of the model to get an instance of.
pretrained_ckpt : Optional[str], optional
Name of the pretrained weight to load or a path to a local checkpoint file.
args : Optional[Namespace], optional
Some arguments that ill be provided to the model.
Returns
-------
BaseModel
The instance of the chosen model.
Raises
------
ValueError
If the given checkpoint name is not a valid choice.
ValueError
If a checkpoint name is given, but the model does not have any pretrained weights available.
See Also
--------
get_model_reference : To get a reference to the class of a model.
"""
model_ref = get_model_reference(model_name)
if args is None:
parser = model_ref.add_model_specific_args()
args = parser.parse_args([])
model = model_ref(args)
if pretrained_ckpt is None and args is not None and args.pretrained_ckpt is not None:
pretrained_ckpt = args.pretrained_ckpt
if pretrained_ckpt is not None:
if Path(pretrained_ckpt).exists():
ckpt_path = pretrained_ckpt
elif hasattr(model_ref, 'pretrained_checkpoints'):
ckpt_path = model_ref.pretrained_checkpoints.get(pretrained_ckpt)
if ckpt_path is None:
raise ValueError(
f'Invalid checkpoint name {pretrained_ckpt}. '
f'Choose one from {{{",".join(model.pretrained_checkpoints.keys())}}}')
else:
raise ValueError(f'Cannot find checkpoint {pretrained_ckpt} for model {model_name}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if Path(ckpt_path).exists():
ckpt = torch.load(ckpt_path, map_location=torch.device(device))
else:
model_dir = Path(hub.get_dir()) / 'ptlflow' / 'checkpoints'
ckpt = hub.load_state_dict_from_url(
ckpt_path, model_dir=model_dir, map_location=torch.device(device), check_hash=True)
state_dict = ckpt['state_dict']
model.load_state_dict(state_dict)
return model
def get_model_reference(
model_name: str
) -> BaseModel:
"""Return a reference to the class of a chosen model.
Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this
function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as
"model_ref = get_model_reference(); model_instance = model_ref()".
Parameters
----------
model_name : str
Name of the model to get a reference of.
Returns
-------
BaseModel
A reference to the chosen model.
Raises
------
ValueError
If the given name is not a valid choice.
See Also
--------
get_model : To get an instance of a model.
"""
try:
return models_dict[model_name]
except KeyError:
raise ValueError(f'Unknown model name: {model_name}. Choose from [{", ".join(models_dict.keys())}]')
def get_trainable_model_names() -> List[str]:
"""Return a list of model names that are able to be trained.
This function return the names of the model that have a loss function defined.
Returns
=======
List[str]
The list of the model names that can be trained.
"""
return [mname for mname in models_dict.keys() if get_model(mname).loss_fn is not None]
| ptlflow/__init__.py | 8,679 | Download the main scripts and configs to start working with PTLFlow.
Return an instance of a chosen model.
The instance can have configured by he arguments, and load some existing pretrained weights.
Note that this is different from get_model_reference(), which returns a reference to the model class. The instance,
returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to
"return get_model_reference()()", which looks confusing. This can be rewritten as
"model_ref = get_model_reference(); return model_ref()".
Parameters
----------
model_name : str
Name of the model to get an instance of.
pretrained_ckpt : Optional[str], optional
Name of the pretrained weight to load or a path to a local checkpoint file.
args : Optional[Namespace], optional
Some arguments that ill be provided to the model.
Returns
-------
BaseModel
The instance of the chosen model.
Raises
------
ValueError
If the given checkpoint name is not a valid choice.
ValueError
If a checkpoint name is given, but the model does not have any pretrained weights available.
See Also
--------
get_model_reference : To get a reference to the class of a model.
Return a reference to the class of a chosen model.
Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this
function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as
"model_ref = get_model_reference(); model_instance = model_ref()".
Parameters
----------
model_name : str
Name of the model to get a reference of.
Returns
-------
BaseModel
A reference to the chosen model.
Raises
------
ValueError
If the given name is not a valid choice.
See Also
--------
get_model : To get an instance of a model.
Return a list of model names that are able to be trained.
This function return the names of the model that have a loss function defined.
Returns
=======
List[str]
The list of the model names that can be trained.
Provide useful functions for using PTLFlow.
============================================================================= Copyright 2021 Henrique Morimitsu Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================= | 2,819 | en | 0.771262 |
# _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2018/5/12.
"""
from app import create_app
__author__ = 'Allen7D'
from app.models.base import db
from app.models.user import User
app = create_app()
with app.app_context():
with db.auto_commit():
# 创建一个超级管理员
user = User()
user.openid = '999'
user.email = '999@qq.com'
user.nickname = 'Super'
user.auth = 2
user.password = '123456'
db.session.add(user)
with db.auto_commit():
# 创建一个普通管理员
user = User()
user.openid = '777'
user.email = '777@qq.com'
user.nickname = 'Admin'
user.auth = 1
user.password = '123456'
db.session.add(user)
| fake.py | 757 | Created by Allen7D on 2018/5/12.
_*_ coding: utf-8 _*_ 创建一个超级管理员 创建一个普通管理员 | 76 | zh | 0.730702 |
import requests
import pprint
from config import API_KEY
base_url = f'https://api.telegram.org/bot{API_KEY}/'
api_response = requests.get(base_url + 'getUpdates').json()
for update in api_response['result']:
message = update['message']
chat_id = message['chat']['id']
text = message['text']
reply_message = {
'chat_id': chat_id,
'text': text
}
requests.post(base_url + 'sendMessage', json=reply_message)
# pprint.pprint(api_response['result'][0]) | telegramsLibs/api_telega_intro.py | 494 | pprint.pprint(api_response['result'][0]) | 40 | en | 0.238209 |
# Metafier V2: writes directly to output.mc
# Uses numpy and memoization to speed up a crap ton & compress data a bit
# ===REQUIRES metatemplate11.mc===
import golly as g
import numpy as np
from shutil import copyfile
#Get the selection
selection = g.getselrect()
if not selection: g.exit("No selection.")
#Get the cells in the selection
cells = g.getcells(selection)
if not cells: g.exit("No pattern in selection")
if len(cells) % 3: cells = cells[:-1]
selw = selection[2]
selh = selection[3]
patternsize = 1 << int(np.ceil(np.log2(selh | selw)))
metapattern = np.zeros((patternsize, patternsize))
#Pseudo-convolution, to detect diagonal neighbors
# +1 +0 +2
# +0 *16 +0
# +4 +0 +8
for cell in np.reshape(cells, (-1, 3)):
selx = cell[0] - selection[0]
sely = cell[1] - selection[1]
metapattern[sely][selx] += 16 * cell[2]
if sely:
if selx:
metapattern[sely - 1][selx - 1] += 8
if selx + 1 < selw:
metapattern[sely - 1][selx + 1] += 4
if sely + 1 < selh:
if selx:
metapattern[sely + 1][selx - 1] += 2
if selx + 1 < selw:
metapattern[sely + 1][selx + 1] += 1
#Remove all B/S cells
metapattern[metapattern < 32] = np.nan
metapattern += 5630 - 32 #5632 is starting point of 11s in template
metapattern[np.isnan(metapattern)] = 0
metapattern = metapattern.astype(int)
#Using metatemplate11, memoization, and some recursion
def createLine(pattern, outfile, linenum = [5726], memo = {}): #linenum and memo are mutable function arguments, which are only initialized during function definition
if tuple(pattern.ravel().tolist()) not in memo: #If we haven't seen this type of pattern before, let's remember it
if pattern.shape[0] == 2: #Pattern is a leaf, write leaf line
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
pattern[0, 0],
pattern[0, 1],
pattern[1, 0],
pattern[1, 1]))
else: #Pattern is a branch, keep going down quadtree
subpatterns = pattern.reshape(2, pattern.shape[0] >> 1, 2, pattern.shape[0] >> 1).swapaxes(1,2)
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
createLine(subpatterns[0, 0], outfile),
createLine(subpatterns[0, 1], outfile),
createLine(subpatterns[1, 0], outfile),
createLine(subpatterns[1, 1], outfile)))
memo[tuple(pattern.ravel().tolist())] = linenum[0]
linenum[0] += 1
return memo[tuple(pattern.ravel().tolist())]
copyfile('metatemplate11.mc', 'output.mc')
with open('output.mc', 'a') as outputfile:
createLine(metapattern, outputfile)
#Display output.mc
g.addlayer()
g.open('output.mc')
#TODO: Use metatemplate10?
| MetafierV2.py | 3,190 | Metafier V2: writes directly to output.mc Uses numpy and memoization to speed up a crap ton & compress data a bit ===REQUIRES metatemplate11.mc===Get the selectionGet the cells in the selectionPseudo-convolution, to detect diagonal neighbors +1 +0 +2 +0 *16 +0 +4 +0 +8Remove all B/S cells5632 is starting point of 11s in templateUsing metatemplate11, memoization, and some recursionlinenum and memo are mutable function arguments, which are only initialized during function definitionIf we haven't seen this type of pattern before, let's remember itPattern is a leaf, write leaf linePattern is a branch, keep going down quadtreeDisplay output.mcTODO: Use metatemplate10? | 676 | en | 0.796019 |
#!/bin/env
"""
Help new users configure the database for use with social networks.
"""
import os
from datetime import datetime
# Fix Python 2.x.
try:
input = raw_input
except NameError:
pass
import django
from django.conf import settings
from django.core.management.utils import get_random_secret_key
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
settings.configure(
DEBUG=True,
TEMPLATES=[dict(
# DEBUG = True,
BACKEND='django.template.backends.django.DjangoTemplates',
APP_DIRS=True,
DIRS=[
os.path.join(BASE_DIR, 'allauthdemo'),
],
)],
)
try:
django.setup() # for Django >= 1.7
except AttributeError:
pass # must be < Django 1.7
from django.template.loader import get_template
from django.template import engines
commands_template = engines['django'].from_string("""
Run these commands:
python manage.py makemigrations allauthdemo_auth
python manage.py migrate
python manage.py createsuperuser
{% if facebook %}# Facebook
python manage.py set_auth_provider facebook {{facebook.client_id}} {{facebook.secret}}{% endif %}
{% if google %}# Google
python manage.py set_auth_provider google {{google.client_id}} {{google.secret}}{% endif %}
{% if github %}# GitHub
python manage.py set_auth_provider github {{github.client_id}} {{github.secret}}{% endif %}
{% if vk %}# VK
python manage.py set_auth_provider vk {{vk.client_id}} {{vk.secret}}{% endif %}
If you have other providers you can add them in that way.
""")
settings_template = get_template("settings.template.py")
def heading(text):
text = text.strip()
line = '-' * len(text)
print("\n%s\n%s\n%s\n" % (line, text, line))
def print_list(ls):
max_len = max([len(i) for i in ls])
num = len(str(len(ls))) #TODO: full list providers
line = '-' * (2+num+3+max_len+2)
for i in range(len(ls)):
print(line)
print("| %d | %s "% (i+1, ls[i]))
def ask_text(need, default=None):
need = need.strip()
if default:
msg = "\n%s? Default: [%s] > " % (need, default)
else:
msg = "\n%s? > " % need
while True:
response = input(msg)
if response:
return response
elif default is not None:
return default
else:
pass # raw_input('Please enter a value.')
providers = ['facebook', 'google', 'github', 'vk']
if __name__ == "__main__":
context = {
'now': str(datetime.now()),
'secret_key': get_random_secret_key(),
}
print_list(providers)
print("Please list comma-separated providers. Example: 1,2,3,4")
corrct_providers = [int(i)-1 for i in input("Please enter: ").split(',')]
for i in corrct_providers:
p = providers[i]
heading(p)
secret = ask_text("{} Secret"%(p))
client_id = ask_text("{} Client ID"%(p))
context[p] = dict(secret=secret, client_id=client_id)
heading("Rendering settings...")
with open('allauthdemo/settings.py', 'w') as out:
out.write(settings_template.render(context, request=None))
print("OK")
heading("Next steps")
print(commands_template.render(context, request=None))
heading("Done")
| configure_new.py | 3,284 | Help new users configure the database for use with social networks.
!/bin/env Fix Python 2.x. DEBUG = True, for Django >= 1.7 must be < Django 1.7TODO: full list providers raw_input('Please enter a value.') | 207 | en | 0.456013 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script to demonstrate vaspy.incar functionality.
"""
import argparse
import vaspy
import vaspy.incar
from logging import DEBUG, INFO, Formatter, StreamHandler, getLogger
LOGLEVEL = DEBUG
logger = getLogger(__name__)
fmt = "%(asctime)s %(levelname)s %(name)s :%(message)s"
formatter = Formatter(fmt)
handler = StreamHandler()
handler.setLevel(LOGLEVEL)
logger.setLevel(LOGLEVEL)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-r",
help="""Show reformated INCAR (Use -i if edit in place)""",
action="store_true",
)
parser.add_argument("-i", help="""Edit the INCAR file in place""", action="store_true")
parser.add_argument(
"--lint",
help="""Tyny and private verion of code checker for vasp""",
action="store_true",
)
parser.add_argument("incar_file", metavar="INCAR_file", nargs=1)
args = parser.parse_args()
assert not (
args.lint and (args.i or args.r)
), "Lint option and re-format option (-i, -r) is exclusive."
logger.debug("args: {}".format(args))
incar: vaspy.incar.Incar = vaspy.load(args.incar_file[0])
if args.i:
with open(args.incar_file[0], mode="wt") as incar_file:
incar_file.write(incar.__str__())
if args.r:
print(incar)
if args.lint:
lint_msg = incar.lint_all()
if lint_msg: # if python 3.8 lint_msg:= incar.lint_all() can be used...
print(lint_msg)
else:
print("ALL OK. Submit the job!!")
| scripts/vaspy-incar.py | 1,584 | Script to demonstrate vaspy.incar functionality.
!/usr/bin/env python3 -*- coding: utf-8 -*- if python 3.8 lint_msg:= incar.lint_all() can be used... | 150 | en | 0.516579 |
'''
blackbody.py - Color of thermal blackbodies.
Description:
Calculate the spectrum of a thermal blackbody at an arbitrary temperature.
Constants:
PLANCK_CONSTANT - Planck's constant, in J-sec
SPEED_OF_LIGHT - Speed of light, in m/sec
BOLTZMAN_CONSTANT - Boltzman's constant, in J/K
SUN_TEMPERATURE - Surface temperature of the Sun, in K
Functions:
blackbody_specific_intensity (wl_nm, T_K) -
Get the monochromatic specific intensity for a blackbody -
wl_nm = wavelength [nm]
T_K = temperature [K]
This is the energy radiated per second per unit wavelength per unit solid angle.
Reference - Shu, eq. 4.6, p. 78.
blackbody_spectrum (T_K) -
Get the spectrum of a blackbody, as a numpy array.
blackbody_color (T_K) -
Given a temperature (K), return the xyz color of a thermal blackbody.
Plots:
blackbody_patch_plot (T_list, title, filename) -
Draw a patch plot of blackbody colors for the given temperature range.
blackbody_color_vs_temperature_plot (T_list, title, filename) -
Draw a color vs temperature plot for the given temperature range.
blackbody_spectrum_plot (T_K) -
Draw the spectrum of a blackbody at the given temperature.
References:
Frank H. Shu, The Physical Universe. An Introduction to Astronomy,
University Science Books, Mill Valley, California. 1982. ISBN 0-935702-05-9.
Charles Kittel and Herbert Kroemer, Thermal Physics, 2nd edition,
W. H. Freeman, New York, 1980. ISBN 0-7167-1088-9.
License:
Copyright (C) 2008 Mark Kness
Author - Mark Kness - mkness@alumni.utexas.net
This file is part of ColorPy.
ColorPy is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ColorPy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with ColorPy. If not, see <http://www.gnu.org/licenses/>.
'''
import math, numpy, pylab
import colormodels
import ciexyz
import plots
# Physical constants in mks units
PLANCK_CONSTANT = 6.6237e-34 # J-sec
SPEED_OF_LIGHT = 2.997925e+08 # m/sec
BOLTZMAN_CONSTANT = 1.3802e-23 # J/K
SUN_TEMPERATURE = 5778.0 # K
def blackbody_specific_intensity (wl_nm, T_K):
'''Get the monochromatic specific intensity for a blackbody -
wl_nm = wavelength [nm]
T_K = temperature [K]
This is the energy radiated per second per unit wavelength per unit solid angle.
Reference - Shu, eq. 4.6, p. 78.'''
# precalculations that could be made global
a = (PLANCK_CONSTANT * SPEED_OF_LIGHT) / (BOLTZMAN_CONSTANT)
b = (2.0 * PLANCK_CONSTANT * SPEED_OF_LIGHT * SPEED_OF_LIGHT)
wl_m = wl_nm * 1.0e-9
try:
exponent = a / (wl_m * T_K)
except ZeroDivisionError:
# treat same as large exponent
return 0.0
if exponent > 500.0:
# so large that the final result is nearly zero - avoid the giant intermediate
return 0.0
specific_intensity = b / (math.pow (wl_m, 5) * (math.exp (exponent) - 1.0))
return specific_intensity
def blackbody_spectrum (T_K):
'''Get the spectrum of a blackbody, as a numpy array.'''
spectrum = ciexyz.empty_spectrum()
(num_rows, num_cols) = spectrum.shape
for i in xrange (0, num_rows):
specific_intensity = blackbody_specific_intensity (spectrum [i][0], T_K)
# scale by size of wavelength interval
spectrum [i][1] = specific_intensity * ciexyz.delta_wl_nm * 1.0e-9
return spectrum
def blackbody_color (T_K):
'''Given a temperature (K), return the xyz color of a thermal blackbody.'''
spectrum = blackbody_spectrum (T_K)
xyz = ciexyz.xyz_from_spectrum (spectrum)
return xyz
#
# Figures
#
def blackbody_patch_plot (T_list, title, filename):
'''Draw a patch plot of blackbody colors for the given temperature range.'''
xyz_colors = []
color_names = []
for Ti in T_list:
xyz = blackbody_color (Ti)
xyz_colors.append (xyz)
name = '%g K' % (Ti)
color_names.append (name)
plots.xyz_patch_plot (xyz_colors, color_names, title, filename)
def blackbody_color_vs_temperature_plot (T_list, title, filename):
'''Draw a color vs temperature plot for the given temperature range.'''
num_T = len (T_list)
rgb_list = numpy.empty ((num_T, 3))
for i in xrange (0, num_T):
T_i = T_list [i]
xyz = blackbody_color (T_i)
rgb_list [i] = colormodels.rgb_from_xyz (xyz)
# note that b and g become negative for low T - MatPlotLib skips those on the semilog plot.
plots.color_vs_param_plot (
T_list,
rgb_list,
title,
filename,
plotfunc = pylab.semilogy,
tight = True,
xlabel = r'Temperature (K)',
ylabel = r'RGB Color')
def blackbody_spectrum_plot (T_K):
'''Draw the spectrum of a blackbody at the given temperature.'''
spectrum = blackbody_spectrum (T_K)
title = 'Blackbody Spectrum - T %d K' % (int (T_K))
filename = 'BlackbodySpectrum-%dK' % (int (T_K))
plots.spectrum_plot (
spectrum,
title,
filename,
xlabel = 'Wavelength (nm)',
ylabel = 'Specific Intensity')
#ylabel = 'Intensity ($W/m^2$)') # with LaTex symbols, the axis text gets too big...
# Create sample figures
def figures ():
'''Create some blackbody plots.'''
# patch plots
T_list_0 = plots.log_interpolate ( 1200.0, 20000.0, 48)
T_list_hot = plots.log_interpolate (10000.0, 40000.0, 24)
T_list_cool = plots.log_interpolate ( 950.0, 1200.0, 24)
blackbody_patch_plot (T_list_0, 'Blackbody Colors', 'Blackbody-Patch')
blackbody_patch_plot (T_list_hot, 'Hot Blackbody Colors', 'Blackbody-HotPatch')
blackbody_patch_plot (T_list_cool, 'Cool Blackbody Colors', 'Blackbody-CoolPatch')
# color vs temperature
blackbody_color_vs_temperature_plot (range (1200, 16000, 50), 'Blackbody Colors', 'Blackbody-Colors')
blackbody_color_vs_temperature_plot (range (10000, 40000, 100), 'Hot Blackbody Colors', 'Blackbody-HotColors')
blackbody_color_vs_temperature_plot (range (950, 1200, 1), 'Cool Blackbody Colors', 'Blackbody-CoolColors')
# spectrum of specific temperatures
blackbody_spectrum_plot (2000.0)
blackbody_spectrum_plot (3000.0) # Proxima Centauri
blackbody_spectrum_plot (SUN_TEMPERATURE) # Sun
blackbody_spectrum_plot (11000.0) # Rigel
blackbody_spectrum_plot (15000.0)
| colorpy/colorpy-0.1.0/blackbody.py | 6,812 | Given a temperature (K), return the xyz color of a thermal blackbody.
Draw a color vs temperature plot for the given temperature range.
Draw a patch plot of blackbody colors for the given temperature range.
Get the monochromatic specific intensity for a blackbody -
wl_nm = wavelength [nm]
T_K = temperature [K]
This is the energy radiated per second per unit wavelength per unit solid angle.
Reference - Shu, eq. 4.6, p. 78.
Get the spectrum of a blackbody, as a numpy array.
Draw the spectrum of a blackbody at the given temperature.
Create some blackbody plots.
blackbody.py - Color of thermal blackbodies.
Description:
Calculate the spectrum of a thermal blackbody at an arbitrary temperature.
Constants:
PLANCK_CONSTANT - Planck's constant, in J-sec
SPEED_OF_LIGHT - Speed of light, in m/sec
BOLTZMAN_CONSTANT - Boltzman's constant, in J/K
SUN_TEMPERATURE - Surface temperature of the Sun, in K
Functions:
blackbody_specific_intensity (wl_nm, T_K) -
Get the monochromatic specific intensity for a blackbody -
wl_nm = wavelength [nm]
T_K = temperature [K]
This is the energy radiated per second per unit wavelength per unit solid angle.
Reference - Shu, eq. 4.6, p. 78.
blackbody_spectrum (T_K) -
Get the spectrum of a blackbody, as a numpy array.
blackbody_color (T_K) -
Given a temperature (K), return the xyz color of a thermal blackbody.
Plots:
blackbody_patch_plot (T_list, title, filename) -
Draw a patch plot of blackbody colors for the given temperature range.
blackbody_color_vs_temperature_plot (T_list, title, filename) -
Draw a color vs temperature plot for the given temperature range.
blackbody_spectrum_plot (T_K) -
Draw the spectrum of a blackbody at the given temperature.
References:
Frank H. Shu, The Physical Universe. An Introduction to Astronomy,
University Science Books, Mill Valley, California. 1982. ISBN 0-935702-05-9.
Charles Kittel and Herbert Kroemer, Thermal Physics, 2nd edition,
W. H. Freeman, New York, 1980. ISBN 0-7167-1088-9.
License:
Copyright (C) 2008 Mark Kness
Author - Mark Kness - mkness@alumni.utexas.net
This file is part of ColorPy.
ColorPy is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ColorPy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with ColorPy. If not, see <http://www.gnu.org/licenses/>.
Physical constants in mks units J-sec m/sec J/K K precalculations that could be made global treat same as large exponent so large that the final result is nearly zero - avoid the giant intermediate scale by size of wavelength interval Figures note that b and g become negative for low T - MatPlotLib skips those on the semilog plot.ylabel = 'Intensity ($W/m^2$)') with LaTex symbols, the axis text gets too big... Create sample figures patch plots color vs temperature spectrum of specific temperatures Proxima Centauri Sun Rigel | 3,329 | en | 0.778121 |
import mugReader
from flask import Flask, request
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class readMugRFID(Resource):
def get(self):
return {'mugId': mugReader.readMug()}
api.add_resource(brewSettings, '/mugReader/')
if __name__ == "__main__":
#remove host for production
app.run(host= '192.168.1.183')
| endpoints/mugReader.py | 353 | remove host for production | 26 | en | 0.730339 |
#!/usr/bin/env python
#from .core import *
import numpy as np
import pandas as pd
import shutil
import urllib
import urlparse
from os.path import splitext, basename
import os
from os import sys, path
from pprint import pprint
import StringIO
import db
from gp import *
from core import *
from IPython.core.debugger import Tracer
class Annotation(UploadCsvConvert):
def __init__(self, xe):
xe.attrib['newCols'] = 'gid,annotation_type_id,content,annotation_field1,ds,tax_id'
UploadCsvConvert.__init__(self,xe=xe,dest='annotation')
self.type_col = 'annotation_type_id'
def get_type_col_value_sql(self):
return 'SELECT annotation_type_id FROM %s.annotation_type WHERE annotation_type_name = ?' % SyncDB.DATABASE
| database/task_class/annotation.py | 753 | !/usr/bin/env pythonfrom .core import * | 39 | en | 0.220965 |
# -*- coding: utf-8 -*-
import io
import json
import os
import sys
import shutil
from os import path
import django
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from django.conf import settings
import blueapps
PY_VER = sys.version
class Command(TemplateCommand):
help = u"基于蓝鲸开发框架初始化开发样例"
def add_arguments(self, parser):
parser.add_argument('directory', nargs='?', default='./',
help='Optional destination directory')
def handle(self, **options):
target = options.pop('directory')
# 先获取原内容
if not path.exists('config/default.py'):
raise CommandError("config/default.py does not exist,"
" please init a django project first.")
if PY_VER[0] == '2':
old_file = open('config/default.py')
else:
old_file = open('config/default.py', encoding='utf-8')
# if some directory is given, make sure it's nicely expanded
top_dir = path.abspath(path.expanduser(target))
if not path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please init first." % top_dir)
if not path.exists(path.join(top_dir, 'manage.py')):
raise CommandError("Current directory '%s' is not "
"a django project dir, please init first. "
"(bk-admin init ${app_code})" %
top_dir)
base_subdir = 'weixin_template'
append_file_tuple = (('', 'requirements.txt'),)
# Setup a stub settings environment for template rendering
if not settings.configured:
settings.configure()
django.setup()
template_dir = path.join(blueapps.__path__[0], 'conf', base_subdir)
run_ver = None
if PY_VER[0] == '2':
conf_file = open(path.join(os.getcwd(), 'config', '__init__.py'))
else:
conf_file = open(path.join(os.getcwd(), 'config', '__init__.py'), encoding='utf-8')
for line in conf_file.readlines():
if line.startswith('RUN_VER'):
run_ver = line[11:-2]
conf_file.close()
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
relative_dir = root[prefix_length:]
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
flag = root.endswith('sites')
for dirname in dirs[:]:
if (
dirname.startswith('.') or
dirname == '__pycache__' or
(flag and dirname != run_ver)
):
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class', '.json')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir, filename)
for old_suffix, new_suffix in self.rewrite_template_suffixes:
if new_path.endswith(old_suffix):
new_path = new_path[:-len(old_suffix)] + new_suffix
break # Only rewrite once
with io.open(old_path, 'rb') as template_file:
content = template_file.read()
w_mode = 'wb'
for _root, _filename in append_file_tuple:
if _root == relative_dir and _filename == filename:
w_mode = 'ab'
with io.open(new_path, w_mode) as new_file:
new_file.write(content)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
# 修改文件
modify_default_file(old_file)
# 获取原先的 default 文件并对其进行追加和覆盖
def modify_default_file(old_file):
# 打开覆盖前的文件和替换的 json 文件
if PY_VER[0] == '2':
with open("%s/conf/weixin_template/config/default.json" % blueapps.__path__[0],
'r') as json_file:
get_default_content(old_file, json_file)
else:
with open("%s/conf/weixin_template/config/default.json" % blueapps.__path__[0],
'r', encoding='utf-8') as json_file:
get_default_content(old_file, json_file)
def get_default_content(old_file, json_file):
with old_file as old_file:
# 获取 json 数据内容
result_content = old_file.read()
json_dict = json.load(json_file)
# 根据 key 进行替换会追加内容
for replace_property in json_dict:
# 获得 key 值
propertys = json_dict.get(replace_property)
# 寻找 key 值所在位置
start_index = result_content.find(str(replace_property))
# 获得 key 的 content 内容
content = propertys.get('content')
# mode 为 add 追加内容
if propertys.get('mode') == 'add':
end_index = result_content.find(')', start_index) - 1
temp_content = result_content[start_index:end_index]
# 检查最后一个是不是,结尾
if temp_content[-1] == ',' or temp_content[-1] == '(':
temp_content += '\n'
else:
temp_content += ',\n'
# 内容替换 content 需要进行 str 方法转换
result_content = ''.join(
[result_content[:start_index], temp_content,
str(content),
result_content[end_index:]])
# mode 为 cover 进行覆盖内容
elif propertys.get('mode') == 'cover':
end_index = result_content.find('\n', start_index)
# 即最后一个是 True 不需要做任何覆盖
if result_content[start_index: end_index].strip() == 'IS_USE_CELERY = False':
continue
# 需要位移 start_index 防止覆盖变量名称
start_index += len(replace_property)
# 内容覆盖
result_content = ''.join(
[result_content[:start_index],
'%s' % str(content),
result_content[end_index:]])
else:
# 其他情况
break
if PY_VER[0] == '2':
with open('config/default.py', 'w') as default_file:
default_file.write(result_content)
else:
with open('config/default.py', 'w',
encoding='utf-8') as default_file:
default_file.write(result_content) | blueapps/contrib/bk_commands/management/commands/startweixin.py | 7,390 | -*- coding: utf-8 -*- 先获取原内容 if some directory is given, make sure it's nicely expanded Setup a stub settings environment for template rendering Ignore some files as they cause various breakages. Only rewrite once 修改文件 获取原先的 default 文件并对其进行追加和覆盖 打开覆盖前的文件和替换的 json 文件 获取 json 数据内容 根据 key 进行替换会追加内容 获得 key 值 寻找 key 值所在位置 获得 key 的 content 内容 mode 为 add 追加内容 检查最后一个是不是,结尾 内容替换 content 需要进行 str 方法转换 mode 为 cover 进行覆盖内容 即最后一个是 True 不需要做任何覆盖 需要位移 start_index 防止覆盖变量名称 内容覆盖 其他情况 | 471 | zh | 0.88107 |
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.coins_forwarding_success_data_item import CoinsForwardingSuccessDataItem
globals()['CoinsForwardingSuccessDataItem'] = CoinsForwardingSuccessDataItem
class CoinsForwardingSuccessData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'product': (str,), # noqa: E501
'event': (str,), # noqa: E501
'item': (CoinsForwardingSuccessDataItem,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'product': 'product', # noqa: E501
'event': 'event', # noqa: E501
'item': 'item', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, product, event, item, *args, **kwargs): # noqa: E501
"""CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, product, event, item, *args, **kwargs): # noqa: E501
"""CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| cryptoapis/model/coins_forwarding_success_data.py | 12,529 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
noqa: F401 noqa: F401 noqa: F401 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 discard variable. noqa: E501 discard variable. | 6,671 | en | 0.809589 |
#!usr/bin/env python3.7
#-*-coding:utf-8-*-
import json
import discord
PATH = "config.json"
def singleton(class_):
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
@singleton
class Config:
def __init__(self):
with open(PATH,"r") as configfile:
self.config = json.load(configfile)
self.token = self.config["token"]
self.owners = self.config["owner"]
self.guildID = None
if self.config["self-guild"].get("mode","load") == "load":
self.guildID = self.config["self-guild"]["ID"]
self.guildRegion = self.parseRegion(self.config["self-guild"]["region"])
self.guild = None
self.adminrole = None
def __getitem__(self,item):
return self.config[item]
def initGuild(self, guild):
self.guild = guild
self.adminrole = discord.utils.get(self.guild.roles, name="Masakaki")
@classmethod
def parseRegion(cl, regionString):
key = regionString.lower()
if (key == "amsterdam"): return discord.VoiceRegion.amsterdam
elif (key == "brazil"): return discord.VoiceRegion.brazil
elif (key == "eu_central"): return discord.VoiceRegion.eu_central
elif (key == "eu_west"): return discord.VoiceRegion.eu_west
elif (key == "frankfurt"): return discord.VoiceRegion.frankfurt
elif (key == "hongkong"): return discord.VoiceRegion.hongkong
elif (key == "india"): return discord.VoiceRegion.india
elif (key == "japan"): return discord.VoiceRegion.japan
elif (key == "london"): return discord.VoiceRegion.london
elif (key == "russia"): return discord.VoiceRegion.russia
elif (key == "singapore"): return discord.VoiceRegion.singapore
elif (key == "southafrica"): return discord.VoiceRegion.southafrica
elif (key == "sydney"): return discord.VoiceRegion.sydney
elif (key == "us_central"): return discord.VoiceRegion.us_central
elif (key == "us_east"): return discord.VoiceRegion.us_east
elif (key == "us_south"): return discord.VoiceRegion.us_south
elif (key == "us_west"): return discord.VoiceRegion.us_west
return None
| src/utils/config.py | 2,331 | !usr/bin/env python3.7-*-coding:utf-8-*- | 40 | en | 0.483934 |
from __future__ import unicode_literals
import pytest
import itertools
import boto
import boto3
from botocore.exceptions import ClientError
from boto.exception import EC2ResponseError
from boto.ec2.instance import Reservation
import sure # noqa
from moto import mock_ec2_deprecated, mock_ec2
import pytest
from tests import EXAMPLE_AMI_ID
@mock_ec2_deprecated
def test_add_tag():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
instance.add_tag("a key", "some value", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
instance.add_tag("a key", "some value")
chain = itertools.chain.from_iterable
existing_instances = list(
chain([res.instances for res in conn.get_all_reservations()])
)
existing_instances.should.have.length_of(1)
existing_instance = existing_instances[0]
existing_instance.tags["a key"].should.equal("some value")
@mock_ec2_deprecated
def test_remove_tag():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
with pytest.raises(EC2ResponseError) as ex:
instance.remove_tag("a key", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set"
)
instance.remove_tag("a key")
conn.get_all_tags().should.have.length_of(0)
instance.add_tag("a key", "some value")
conn.get_all_tags().should.have.length_of(1)
instance.remove_tag("a key", "some value")
@mock_ec2_deprecated
def test_get_all_tags():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_with_special_characters():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some<> value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some<> value")
@mock_ec2_deprecated
def test_create_tags():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
tag_dict = {
"a key": "some value",
"another key": "some other value",
"blank key": "",
}
with pytest.raises(EC2ResponseError) as ex:
conn.create_tags(instance.id, tag_dict, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
conn.create_tags(instance.id, tag_dict)
tags = conn.get_all_tags()
set([key for key in tag_dict]).should.equal(set([tag.name for tag in tags]))
set([tag_dict[key] for key in tag_dict]).should.equal(
set([tag.value for tag in tags])
)
@mock_ec2_deprecated
def test_tag_limit_exceeded():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
tag_dict = {}
for i in range(51):
tag_dict["{0:02d}".format(i + 1)] = ""
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags(instance.id, tag_dict)
cm.value.code.should.equal("TagLimitExceeded")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
instance.add_tag("a key", "a value")
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags(instance.id, tag_dict)
cm.value.code.should.equal("TagLimitExceeded")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
tags = conn.get_all_tags()
tag = tags[0]
tags.should.have.length_of(1)
tag.name.should.equal("a key")
tag.value.should.equal("a value")
@mock_ec2_deprecated
def test_invalid_parameter_tag_null():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as cm:
instance.add_tag("a key", None)
cm.value.code.should.equal("InvalidParameterValue")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_invalid_id():
conn = boto.connect_ec2("the_key", "the_secret")
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags("ami-blah", {"key": "tag"})
cm.value.code.should.equal("InvalidID")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags("blah-blah", {"key": "tag"})
cm.value.code.should.equal("InvalidID")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_get_all_tags_resource_id_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"resource-id": instance.id})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
tags = conn.get_all_tags(filters={"resource-id": image_id})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(image_id)
tag.res_type.should.equal("image")
tag.name.should.equal("an image key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_resource_type_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"resource-type": "instance"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
tags = conn.get_all_tags(filters={"resource-type": "image"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(image_id)
tag.res_type.should.equal("image")
tag.name.should.equal("an image key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_key_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"key": "an instance key"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_value_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
reservation_b = conn.run_instances(EXAMPLE_AMI_ID)
instance_b = reservation_b.instances[0]
instance_b.add_tag("an instance key", "some other value")
reservation_c = conn.run_instances(EXAMPLE_AMI_ID)
instance_c = reservation_c.instances[0]
instance_c.add_tag("an instance key", "other value*")
reservation_d = conn.run_instances(EXAMPLE_AMI_ID)
instance_d = reservation_d.instances[0]
instance_d.add_tag("an instance key", "other value**")
reservation_e = conn.run_instances(EXAMPLE_AMI_ID)
instance_e = reservation_e.instances[0]
instance_e.add_tag("an instance key", "other value*?")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"value": "some value"})
tags.should.have.length_of(2)
tags = conn.get_all_tags(filters={"value": "some*value"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": "*some*value"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": "*some*value*"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": r"*value\*"})
tags.should.have.length_of(1)
tags = conn.get_all_tags(filters={"value": r"*value\*\*"})
tags.should.have.length_of(1)
tags = conn.get_all_tags(filters={"value": r"*value\*\?"})
tags.should.have.length_of(1)
@mock_ec2_deprecated
def test_retrieved_instances_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
reservation.should.be.a(Reservation)
reservation.instances.should.have.length_of(1)
instance = reservation.instances[0]
reservations = conn.get_all_reservations()
reservations.should.have.length_of(1)
reservations[0].id.should.equal(reservation.id)
instances = reservations[0].instances
instances.should.have.length_of(1)
instances[0].id.should.equal(instance.id)
conn.create_tags([instance.id], tags_to_be_set)
reservations = conn.get_all_reservations()
instance = reservations[0].instances[0]
retrieved_tags = instance.tags
# Cleanup of instance
conn.terminate_instances([instances[0].id])
# Check whether tag is present with correct value
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_retrieved_volumes_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2("the_key", "the_secret")
volume = conn.create_volume(80, "us-east-1a")
all_volumes = conn.get_all_volumes()
volume = all_volumes[0]
conn.create_tags([volume.id], tags_to_be_set)
# Fetch the volume again
all_volumes = conn.get_all_volumes()
volume = all_volumes[0]
retrieved_tags = volume.tags
volume.delete()
# Check whether tag is present with correct value
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_retrieved_snapshots_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2(
aws_access_key_id="the_key", aws_secret_access_key="the_secret"
)
volume = conn.create_volume(80, "eu-west-1a")
snapshot = conn.create_snapshot(volume.id)
conn.create_tags([snapshot.id], tags_to_be_set)
# Fetch the snapshot again
all_snapshots = conn.get_all_snapshots()
snapshot = [item for item in all_snapshots if item.id == snapshot.id][0]
retrieved_tags = snapshot.tags
conn.delete_snapshot(snapshot.id)
volume.delete()
# Check whether tag is present with correct value
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_filter_instances_by_wildcard_tags():
conn = boto.connect_ec2(
aws_access_key_id="the_key", aws_secret_access_key="the_secret"
)
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance_a = reservation.instances[0]
instance_a.add_tag("Key1", "Value1")
reservation_b = conn.run_instances(EXAMPLE_AMI_ID)
instance_b = reservation_b.instances[0]
instance_b.add_tag("Key1", "Value2")
reservations = conn.get_all_reservations(filters={"tag:Key1": "Value*"})
reservations.should.have.length_of(2)
reservations = conn.get_all_reservations(filters={"tag-key": "Key*"})
reservations.should.have.length_of(2)
reservations = conn.get_all_reservations(filters={"tag-value": "Value*"})
reservations.should.have.length_of(2)
@mock_ec2
def test_create_volume_with_tags():
client = boto3.client("ec2", "us-west-2")
response = client.create_volume(
AvailabilityZone="us-west-2",
Encrypted=False,
Size=40,
TagSpecifications=[
{
"ResourceType": "volume",
"Tags": [{"Key": "TEST_TAG", "Value": "TEST_VALUE"}],
}
],
)
assert response["Tags"][0]["Key"] == "TEST_TAG"
@mock_ec2
def test_create_snapshot_with_tags():
client = boto3.client("ec2", "us-west-2")
volume_id = client.create_volume(
AvailabilityZone="us-west-2",
Encrypted=False,
Size=40,
TagSpecifications=[
{
"ResourceType": "volume",
"Tags": [{"Key": "TEST_TAG", "Value": "TEST_VALUE"}],
}
],
)["VolumeId"]
snapshot = client.create_snapshot(
VolumeId=volume_id,
TagSpecifications=[
{
"ResourceType": "snapshot",
"Tags": [{"Key": "TEST_SNAPSHOT_TAG", "Value": "TEST_SNAPSHOT_VALUE"}],
}
],
)
expected_tags = [{"Key": "TEST_SNAPSHOT_TAG", "Value": "TEST_SNAPSHOT_VALUE"}]
assert snapshot["Tags"] == expected_tags
@mock_ec2
def test_create_tag_empty_resource():
# create ec2 client in us-west-1
client = boto3.client("ec2", region_name="us-west-1")
# create tag with empty resource
with pytest.raises(ClientError) as ex:
client.create_tags(Resources=[], Tags=[{"Key": "Value"}])
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter resourceIdSet"
)
@mock_ec2
def test_delete_tag_empty_resource():
# create ec2 client in us-west-1
client = boto3.client("ec2", region_name="us-west-1")
# delete tag with empty resource
with pytest.raises(ClientError) as ex:
client.delete_tags(Resources=[], Tags=[{"Key": "Value"}])
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter resourceIdSet"
)
@mock_ec2
def test_retrieve_resource_with_multiple_tags():
ec2 = boto3.resource("ec2", region_name="us-west-1")
blue, green = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=2, MaxCount=2)
ec2.create_tags(
Resources=[blue.instance_id],
Tags=[
{"Key": "environment", "Value": "blue"},
{"Key": "application", "Value": "api"},
],
)
ec2.create_tags(
Resources=[green.instance_id],
Tags=[
{"Key": "environment", "Value": "green"},
{"Key": "application", "Value": "api"},
],
)
green_instances = list(ec2.instances.filter(Filters=(get_filter("green"))))
green_instances.should.equal([green])
blue_instances = list(ec2.instances.filter(Filters=(get_filter("blue"))))
blue_instances.should.equal([blue])
def get_filter(color):
return [
{"Name": "tag-key", "Values": ["application"]},
{"Name": "tag-value", "Values": ["api"]},
{"Name": "tag-key", "Values": ["environment"]},
{"Name": "tag-value", "Values": [color]},
]
| tests/test_ec2/test_tags.py | 16,969 | noqa cm.value.request_id.should_not.be.none cm.value.request_id.should_not.be.none cm.value.request_id.should_not.be.none cm.value.request_id.should_not.be.none cm.value.request_id.should_not.be.none Cleanup of instance Check whether tag is present with correct value Fetch the volume again Check whether tag is present with correct value Fetch the snapshot again Check whether tag is present with correct value create ec2 client in us-west-1 create tag with empty resource create ec2 client in us-west-1 delete tag with empty resource | 535 | en | 0.400904 |
'''define the config file for ade20k and resnet101os16'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'ade20k',
'rootdir': os.path.join(os.getcwd(), 'ADE20k'),
})
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 130
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify segmentor config
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 150,
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'deeplabv3_resnet101os16_ade20k_train',
'logfilepath': 'deeplabv3_resnet101os16_ade20k_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'deeplabv3_resnet101os16_ade20k_test',
'logfilepath': 'deeplabv3_resnet101os16_ade20k_test/test.log',
'resultsavepath': 'deeplabv3_resnet101os16_ade20k_test/deeplabv3_resnet101os16_ade20k_results.pkl'
}
) | ssseg/cfgs/deeplabv3/cfgs_ade20k_resnet101os16.py | 1,192 | define the config file for ade20k and resnet101os16
modify dataset config modify dataloader config modify optimizer config modify losses config modify segmentor config modify inference config modify common config | 214 | en | 0.326299 |
# Generated by Django 2.1.2 on 2018-11-01 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("jbank", "0024_auto_20180425_1704"),
]
operations = [
migrations.AddField(
model_name="payout",
name="reference",
field=models.CharField(blank=True, default="", max_length=32, verbose_name="recipient reference"),
),
migrations.AlterField(
model_name="payout",
name="messages",
field=models.TextField(blank=True, default="", verbose_name="recipient messages"),
),
]
| jbank/migrations/0025_auto_20181101_1430.py | 644 | Generated by Django 2.1.2 on 2018-11-01 14:30 | 45 | en | 0.609097 |
import os
import sys
import numpy as np
import caffe
import argparse
parser = argparse.ArgumentParser(description='Computes 5-fold cross-validation results over Twitter five-agrees dataset')
parser.add_argument('-ov', '--oversampling', help='Enables (1) or disables (0) oversampling')
args = parser.parse_args()
if args.oversampling == 0:
oversampling = False
elif args.oversampling == 1:
oversampling = True
else:
sys.exit("oversampling must be 0 or 1")
subsets = ['test1', 'test2', 'test3', 'test4', 'test5']
mean_file = 'ilsvrc_2012_mean.npy'
accuracies = []
output_string = ""
for subset in subsets:
# Update paths for this subset
deploy_path = 'sentiment_deploy.prototxt'
caffemodel_path = 'twitter_finetuned_' + subset + '_iter_180.caffemodel'
ground_truth = 'ground_truth/' + subset + '/test.txt'
instanceList = []
correctLabels = 0
incorrectLabels = 0
positiveLabels = 0
negativeLabels = 0
positivePredictions = 0
negativePredictions = 0
gt_file = open(ground_truth, "r")
# Store images in a list
while (True):
line = gt_file.readline()
# Check if we have reached the end
if (len(line) == 0):
break
# Add the line to the list
instanceList.append(line)
# Load network
net = caffe.Classifier(deploy_path,
caffemodel_path,
mean=np.load(mean_file).mean(1).mean(1),
image_dims=(256, 256),
channel_swap=(2, 1, 0),
raw_scale=255)
# Loop through the ground truth file, predict each image's label and store the wrong ones
counter = 0
for instance in instanceList:
values = instance.split()
image_path = values[0]
sentiment = int(values[1])
# Load image
im = caffe.io.load_image(image_path)
# Make a forward pass and get the score
prediction = net.predict([im], oversample=oversampling)
# Check if the prediction was correct or not
if prediction[0].argmax() == sentiment:
correctLabels += 1
else:
incorrectLabels += 1
# Update label counter
if sentiment == 0:
negativeLabels += 1
else:
positiveLabels += 1
# Update prediction counter (negative = 0, positive = 1)
if prediction[0].argmax() == 0:
negativePredictions += 1
else:
positivePredictions += 1
counter += 1
if counter % 40 == 0:
print subset + ', ' + str(counter)
sys.stdout.flush()
gt_file.close()
accuracy = 100. * correctLabels / (correctLabels + incorrectLabels)
accuracies.append(accuracy)
# Print accuracy results
print '------------- ' + subset + ' -------------'
print 'Accuracy = ', str(accuracy)
print '---------------------------------'
output_string += 'Subset: {0}: \n Positive images: {1}\n Negative images: {2}\n Positive predictions: {3}\n Negative predictions: {4}\n'.format(
subset, str(positiveLabels), str(negativeLabels), str(positivePredictions), str(negativePredictions))
print '\nRESULTS:'
for i in range(0, 5):
print subsets[i] + ': ' + str(accuracies[i]) + '%'
print '\nMean accuracy = ' + str(1. * sum(accuracies) / len(accuracies))
print "\n-------------------------------------\n"
print output_string
| compute_cross_validation_accuracy.py | 3,475 | Update paths for this subset Store images in a list Check if we have reached the end Add the line to the list Load network Loop through the ground truth file, predict each image's label and store the wrong ones Load image Make a forward pass and get the score Check if the prediction was correct or not Update label counter Update prediction counter (negative = 0, positive = 1) Print accuracy results | 401 | en | 0.851917 |
"""
Make sure that tiddler fields which are not strings
are stringified, otherwise, the text serialization will
assplode.
"""
from tiddlyweb.serializer import Serializer
from tiddlyweb.model.tiddler import Tiddler
def setup_module(module):
pass
def test_float_field():
tiddler = Tiddler('foo', 'bar')
tiddler.fields['float'] = 100.5
serializer = Serializer('text')
serializer.object = tiddler
assert '100.5' in '%s' % serializer
| test/test_tiddler_fields_as_strings.py | 460 | Make sure that tiddler fields which are not strings
are stringified, otherwise, the text serialization will
assplode. | 118 | en | 0.848102 |
# a simple script to rename multiple files
import os
import re
path = 'myimages/'
files = os.listdir(path)
files.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
for i, file in enumerate(files):
os.rename(path + file, path + "rename_{}".format(i)+".jpg")
print('done!')
| simple_task_python/rename.py | 326 | a simple script to rename multiple files | 40 | en | 0.616114 |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# vcdomainprovisioningconfig filter
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
VCFilter = self.db.mock_model(model_name="VCFilter", db_table="vc_vcfilter")
self.db.add_column(
"vc_vcdomainprovisioningconfig",
"vc_filter",
models.ForeignKey(
VCFilter, verbose_name="VC Filter", null=True, blank=True, on_delete=models.CASCADE
),
)
| vc/migrations/0010_vcdomainprobvisioningconfig_vcfilter.py | 860 | -*- coding: utf-8 -*- ---------------------------------------------------------------------- vcdomainprovisioningconfig filter ---------------------------------------------------------------------- Copyright (C) 2007-2019 The NOC Project See LICENSE for details ---------------------------------------------------------------------- Third-party modules NOC modules | 364 | en | 0.249775 |
# Overloading Methods
class Point():
def __init__(self, x=0, y=0):
self.x = x
self.y = y
self.coords = (self.x, self.y)
def move(self, x, y):
self.x += x
self.y += y
# Overload __dunder__
def __add__(self, p):
return Point(self.x + p.x, self.y + p.y)
def __sub__(self, p):
return Point(self.x - p.x, self.y - p.y)
def __mul__(self, p):
return Point(self.x * p.x, self.y * p.y)
def length(self):
import math
return math.sqrt(self.x**2 + self.y**2)
def __gt__(self, p):
return self.length() > p.length()
def __ge__(self, p):
return self.length() >= p.length()
def __lt__(self, p):
return self.length() < p.length()
def __le__(self, p):
return self.length() <= p.length()
def __eq__(self, p):
# this math does not alway work out correctly remeber float comparisions
#return self.length() == p.length
return self.x == p.x and self.y == p.y
# need to __str__ to represent the output of #overloaded
def __str__(self):
return "(" + str(self.x) + "," + str(self.y) + ")"
p1 = Point(3,4)
p2 = Point(3,2)
p3 = Point(1,3)
p4 = Point(0,1)
p5 = p1 + p2
p6 = p4 - p1
p7 = p2 * p3
print(p5, p6, p7)
print(p1 == p2)
print(p1 > p2)
print(p4 <= p3)
| ObjectOrientedProgramming/OOPpart4.py | 1,356 | Overloading Methods Overload __dunder__ this math does not alway work out correctly remeber float comparisionsreturn self.length() == p.length need to __str__ to represent the output of overloaded | 196 | en | 0.719853 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| examples/cifar10/cifar10.py | 14,656 | Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
Download and extract the tarball from Alex's website.
Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=missing-docstring Basic model parameters. Global constants describing the CIFAR-10 data set. Constants describing the training process. The decay to use for the moving average. Epochs after which learning rate decays. Learning rate decay factor. Initial learning rate. If a model is trained with multiple GPUs, prefix all Op names with tower_name to differentiate the operations. Note that this prefix is removed from the names of the summaries when visualizing a model. Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training session. This helps the clarity of presentation on tensorboard. We instantiate all variables using tf.get_variable() instead of tf.Variable() in order to share variables across multiple GPU training runs. If we only ran this model on a single GPU, we could simplify this function by replacing all instances of tf.get_variable() with tf.Variable(). conv1 pool1 norm1 conv2 norm2 pool2 local3 Move everything into depth so we can perform a single matrix multiply. local4 linear layer(WX + b), We don't apply softmax here because tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits and performs the softmax internally for efficiency. Calculate the average cross entropy loss across the batch. The total loss is defined as the cross entropy loss plus all of the weight decay terms (L2 loss). Compute the moving average of all individual losses and the total loss. Attach a scalar summary to all individual losses and the total loss; do the same for the averaged version of the losses. Name each loss as '(raw)' and name the moving average version of the loss as the original loss name. Variables that affect learning rate. Decay the learning rate exponentially based on the number of steps. Generate moving averages of all losses and associated summaries. Compute gradients. Apply gradients. Add histograms for trainable variables. Add histograms for gradients. Track the moving averages of all trainable variables. | 5,566 | en | 0.792164 |
# Funcoes servem para quando tivermos coisas repetitivas poder simplificar o programa
def lin(): # para definir um afuncao ela tem que ter parenteses no finalk
print('=-'*30)
lin()
print('Bem Vindo')
lin()
nome = str(input('Qual seu nome? '))
lin()
print(f'Tenha um otimo dia {nome}!')
lin()
def mensagem(msg):
print('-'*30)
print(msg) # A mensagem que vai aparecer aqui o usuario vai digitar quando chamar a funcao
print('-'*30)
mensagem('SISTEMA DE ALUNOS')
| modulo 3/aulas/4.0 - Funcoes.py | 490 | Funcoes servem para quando tivermos coisas repetitivas poder simplificar o programa para definir um afuncao ela tem que ter parenteses no finalk A mensagem que vai aparecer aqui o usuario vai digitar quando chamar a funcao | 222 | pt | 0.936325 |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
from .generation import (
TRANSFORM_FACTORIES,
colorspace_factory,
group_transform_factory,
look_factory,
named_transform_factory,
produce_transform,
transform_factory,
transform_factory_clf_transform_to_group_transform,
transform_factory_default,
view_transform_factory,
)
from .generation import (
ConfigData,
VersionData,
deserialize_config_data,
generate_config,
serialize_config_data,
validate_config,
)
from .reference import (
build_aces_conversion_graph,
classify_aces_ctl_transforms,
conversion_path,
ctl_transform_to_node,
discover_aces_ctl_transforms,
filter_ctl_transforms,
filter_nodes,
node_to_ctl_transform,
plot_aces_conversion_graph,
print_aces_taxonomy,
unclassify_ctl_transforms,
)
from .reference import (
ColorspaceDescriptionStyle,
generate_config_aces,
)
from .cg import generate_config_cg
__all__ = [
"TRANSFORM_FACTORIES",
"colorspace_factory",
"group_transform_factory",
"look_factory",
"named_transform_factory",
"produce_transform",
"transform_factory",
"transform_factory_clf_transform_to_group_transform",
"transform_factory_default",
"view_transform_factory",
]
__all__ += [
"ConfigData",
"VersionData",
"deserialize_config_data",
"generate_config",
"serialize_config_data",
"validate_config",
]
__all__ += [
"build_aces_conversion_graph",
"classify_aces_ctl_transforms",
"conversion_path",
"ctl_transform_to_node",
"discover_aces_ctl_transforms",
"filter_ctl_transforms",
"filter_nodes",
"node_to_ctl_transform",
"plot_aces_conversion_graph",
"print_aces_taxonomy",
"unclassify_ctl_transforms",
]
__all__ += [
"ColorspaceDescriptionStyle",
"generate_config_aces",
]
__all__ += ["generate_config_cg"]
| opencolorio_config_aces/config/__init__.py | 1,959 | SPDX-License-Identifier: BSD-3-Clause Copyright Contributors to the OpenColorIO Project. | 88 | en | 0.556001 |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 15:49:49 2017
@author: tkoller
"""
import numpy as np
import numpy.linalg as nLa
from ..utils import unavailable
try:
import matplotlib.pyplot as plt
_has_matplotlib = True
except:
_has_matplotlib = False
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_3D(p, q, ax, n_points=100):
""" Plot an ellipsoid in 3D
Based on
https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
assert np.shape(p) == (3, 1), "p needs to be a 3x1 vector"
assert np.shape(q) == (3, 3), "q needs to be a spd 3x3 matrix"
assert np.allclose(q, 0.5 * (q + q.T), "q needs to be spd")
# transform to radius/center parametrization
U, s, rotation = linalg.svd(q)
assert np.all(s > 0), "q needs to be positive definite"
radii = 1.0 / np.sqrt(s)
# get x,y,z of sphere and transform
u = np.linspace(0.0, 2.0 * np.pi, n_points)
v = np.linspace(0.0, np.pi, n_points)
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
[x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]],
rotation) + center
# plot the result
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2)
return ax
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_2D(p, q, ax, n_points=100, color="r"):
""" Plot an ellipsoid in 2D
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
plt.sca(ax)
r = nLa.cholesky(q).T; # checks spd inside the function
t = np.linspace(0, 2 * np.pi, n_points);
z = [np.cos(t), np.sin(t)];
ellipse = np.dot(r, z) + p;
handle, = ax.plot(ellipse[0, :], ellipse[1, :], color)
return ax, handle
| safe_exploration/visualization/utils_visualization.py | 2,572 | Plot an ellipsoid in 2D
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
Plot an ellipsoid in 3D
Based on
https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
Created on Thu Sep 21 15:49:49 2017
@author: tkoller
-*- coding: utf-8 -*- transform to radius/center parametrization get x,y,z of sphere and transform plot the result checks spd inside the function | 912 | en | 0.489268 |
# -*- coding: utf-8 -*-
"""
Created on 2013-2014
Author : Edouard Cuvelier
Affiliation : Université catholique de Louvain - ICTEAM - UCL Crypto Group
Address : Place du Levant 3, 1348 Louvain-la-Neuve, BELGIUM
email : firstname.lastname@uclouvain.be
"""
from numpy import *
import gmpy
from Crypto.Random.random import randint
import random as rd
import tools.fingexp as fingexp
import tools.utils as utils
class Field(fingexp.FingExp):
'Class for Field'
def __init__(self,p):
'''Defines the modulus p which must be a prime
'''
self.F = self
self.p = gmpy.mpz(p) # prime modulus
self.char = self.p # characteristic
self.q = self.p+1 # order+1 #TODO : correct?
assert gmpy.is_prime(p)
self.rep = None
self.g = None
'''
g is a random quadratic residue used to compute square roots and it is
initialized the first time a square root is computed
'''
self.to_fingerprint = ["p"]
self.to_export = {"fingerprint": [],"value": ["p"]}
super(Field, self).__init__()
def load(self, data, fingerprints):
self.p = utils.b64tompz(data["p"])
def one(self):
'unit element for multiplication'
return FieldElem(1, self)
def zero(self):
'unit element for addition'
return FieldElem(0,self)
def elem(self,x):
''' return an element of value x
'''
if isinstance(x,FieldElem):
assert x.F == self
return x
m = gmpy.mpz(1)
assert isinstance(x,int) or isinstance(x, long) or type(x)==type(m)
return FieldElem(x,self)
def random(self,low=1,high=None):
''' Return a random element of the Field
'''
if high == None :
high = int(self.p-1)
rand = randint(low,high)
return self.elem(rand)
def __eq__(self, other):
'testing if we are working in the same field'
try:
return (self.p == other.p)
except:
return False
def add(self, a, b):
'''
field operation: addition mod p
'''
return FieldElem((a.val + b.val) % self.p, self)
def sub(self, a, b):
'''
field operation: substraction mod p
'''
return FieldElem((a.val - b.val) % self.p, self)
def neg(self, a):
'''
field operation: opposite mod p
'''
return FieldElem((self.p - a.val ) % self.p, self)
def mul(self, a, b):
'''
field operation: multiplication of field elements
'''
"""
if isinstance(a,FieldElem) and isinstance(b, FieldElem) and not a.F == b.F :
raise Exception("multiplication between elements of different fields")
"""
if not isinstance(b,FieldElem) :
# Multiplication by a scalar
if b<0:
return self.smul(-a,-b)
return self.smul(a,b)
else:
return self.pmul(a,b)
def smul(self,a,b):
''' Return a*b where a or b is scalar
'''
if not isinstance(b,FieldElem):
# b is scalar
#return self.dbleAndAdd(a,a,b)
return FieldElem((gmpy.mpz(b)*a.val)%(self.p),self)
#return self.pmul(a,a.F.elem(b))
else :
# a is scalar
#return self.dbleAndAdd(b,b,a)
return self.smul(b,a)
def sm(self,b,a):
''' Quick multiplication between a field element a and a scalar b
'''
return FieldElem((gmpy.mpz(b)*a.val)%(self.p),self)
def pmul(self,a,b):
''' product between two field element in Fp
'''
return FieldElem((a.val * b.val) % self.p, self)
def dbleAndAdd(self,P,Pp,n):
'return n*P using double and add technique'
#print "dblaad"
if n == 0 :
return self.zero();
if n == 1 :
return P
elif n%2 == 1 :
Q = self.dbleAndAdd(P,Pp,(n-1)/2)
return P+Q+Q
elif n%2 == 0 :
Q = self.dbleAndAdd(P,Pp,n/2)
return Q+Q
def powop(self, a, b):
'return a**b'
m = gmpy.mpz(1)
#self.count = 0
'exponentiation by a scalar'
if not isinstance(b, int) and not isinstance(b, long) and not type(b)==type(m):
raise Exception("Exponentation by a non integer, long or mpz")
c = b
if c > self.char-1 or c<0:
c = b%(self.char-1)
#elif :
# return self.powop(a.invert(),(-c))
if c == 0 :
assert not a.val%self.char == 0
return self.one()
elif c == 1 :
return a
else :
return self.sqrtAndMultply(a,a, c)
#return FieldElem(pow(a.val,b,self.char))
def sqrtAndMultply(self,P,Pp,n):
'return P**n using square and multiply technique'
if n == 0 :
return self.one()
elif n == 1 :
return P
elif n%2 == 1 :
Q = self.sqrtAndMultply(P,Pp,(n-1)/2)
return P*self.square(Q)
elif n%2 == 0 :
Q = self.sqrtAndMultply(P,Pp,n/2)
return self.square(Q)
def square(self,a):
'''
This method returns the square of a
'''
return FieldElem(pow(a.val,2, self.p), self)
def invert(self,a):
assert not (a.val%self.p == 0) # Do not invert zero!
return FieldElem(gmpy.invert(a.val, self.p), self)
#def invertible(self,a):
#return not int(a.invert().val) == 0
def div(self,a,b):
assert not (b.val%self.p == 0) # Do not invert zero!
return FieldElem((a.val*self.invert(b).val % self.p),self)
def findnonresidue(self):
'''
find a random non quadratic residue in the Field F,
that is, find g that is not a square in F, this is
needed to compute square roots
'''
g=self.random()
while g.isquadres():
#print g, " is quad res in ", self
g = self.random()
return g
def __str__(self):
return "F_"+str(self.p)
def jsonable(self):
return {'type': 'FqField', 'p': self.p}
class FieldElem():
def __init__(self, val, F):
'''Creating a new field element.
'''
#assert isinstance(F,Field)
self.F = F
self.val = gmpy.mpz(val)
self.poly = polynom(self.F,[self])
#self.to_fingerprint = ["F", "val"]
#self.to_export = {"fingerprint": ["F"],
# "value": ["val"]}
#super(FieldElem, self).__init__()
def __eq__(self, other):
try:
return ((self.val%self.F.char) == (other.val%self.F.char) and self.F == other.F)
except:
return False
def __add__(self, other):
return self.F.add(self, other)
def __neg__(self):
return self.F.neg(self)
def __sub__(self, other):
return self.F.sub(self, other)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
return self.F.mul(self, other)
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, e):
return self.F.powop(self, e)
def __div__(self,other):
return self.F.div(self,other)
def __truediv__(self,other):
return self.F.div(self,other)
def __str__(self):
return str(self.val)
def iszero(self):
return self == self.F.zero()
def invert(self):
return self.F.invert(self)
def invertible(self):
return self.F.invertible(self)
def isquadres(self):
''' This method return True if the element is a quadratic residue mod q
different than zero
it returns False otherwhise
'''
if (self+self.F.zero()).iszero() :
# case of element is zero
return False
else :
# If F's order is prime we use Euler's criterium
c = self**((self.F.q-1)/2) #TODO: Optimize this
return c==self.F.one()
def squareroot(self):
''' This method returns the positive square root of
an element of the field
using the Tonelli-Shanks algorithm
Carefull : if the element has no square root, the method does not
check this case and raises an error. Verification has to be done
before calling the method.
'''
g = self.F.g
if g == None :
g = self.F.findnonresidue()
self.F.g = g
q = self.F.q
s=0
t=self.F.q-1
while t%2==0:
s=s+1
t=t/2
# q-1 = (2**s)*t
e = 0
for i in range(2,s+1):
b = 2**(i-1)
b1 = b*2 # b1 = 2**i
c = ((self)*(g**(-e)))**((q-1)/b1)
if not c==self.F.one() :
e = e+b
h = self*(g**(-e))
b = (g**(e/2))*(h**((t+1)/2))
assert b**2 == self # FAILURE to find square root
return b
def fingerprint(self):
return fingexp.fingerprint(self.val)
def jsonable(self):
return {'type': 'FieldElem', 'F': self.F, 'val': self.val}
class ExtensionField(Field):
'''
This class defines extension fields and inherits field methods.
Depending on the degree of the extension field, we use
different algorithms to optimize the operations
'''
def __init__(self,F,irpoly,g=None,rep=None):
'''Define the base Field or extension Field and the irreducible polynomial
F is the base field on top of which the extension
field is built
irpoly is the irreducible polynomial used to build
the extension field as F/irpoly
g is a non quadratic residue used to compute square
roots, if it is set to None, computing a square root
will initialize g
rep is the representation of the root of irpoly
(note that letter 'A' is reserved for the Complex extension field)
'''
self.F = F
self.irpoly = irpoly
self.deg = len(irpoly.coef) # degree of the irreducible polynomial + 1
assert self.deg > 0
self.q = self.F.q**(self.deg-1) # order of the Field
self.tabular = self.table()
if rep == None :
self.rep = rd.choice(['B','C','D','E','F','G','H','J','K','L'])
#Choose a random representation letter
else :
self.rep = rep
self.char = F.char
self.primefield = gmpy.is_prime(self.char)
self.g = g # g is needed to compute square roots, it is a non quadratic residue
self.to_fingerprint = ["F","irpoly"]
self.to_export = {"fingerprint": [],"value": ["F","irpoly"]}
def one(self):
'unit element for multiplication'
One = [self.F.zero()]*(self.deg-1)
One[self.deg-2]= self.F.one()
return ExtensionFieldElem(self,polynom(self.F,One))
def zero(self):
'unit element for addition'
Zero = [self.F.zero()]*(self.deg-1)
return ExtensionFieldElem(self,polynom(self.F,Zero))
def unit(self):
''' root of the irreducible polynomial
e.g. return element 1*A+0 (or the complex value i) if the irpoly is X**2+1
'''
I = self.zero()
I.poly.coef[-2]=self.F.one()
return I
def elem(self,x):
''' Provided that x belongs to F, return an element of the extension field
of value x
'''
P = self.zero()
P.poly.coef[-1] = x
return P
def random(self):
''' Return a random element of the Extension Field
'''
polycoef = [0]*(self.deg-1)
for i in range(self.deg-1):
polycoef[i] = self.F.random()
poly = polynom(self.F,polycoef)
return ExtensionFieldElem(self,poly)
def __eq__(self, other):
'testing if we are working in the same extension field'
try:
return (self.F == other.F and self.irpoly == other.irpoly)
except:
return False
def add(self, a, b):
'''
field operation: addition of polynomial > addition of coefficients in the appropriate field
'''
#assert a.F == b.F and a.F.F == self.F
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
polysum = [0]*a.deg
for i in range(a.deg):
polysum[i]=a.poly.coef[i]+b.poly.coef[i]
P = polynom(self.F,polysum)
return ExtensionFieldElem(self,P)
def sub(self, a, b):
'''
field operation: substraction of polynomials > substraction of each coefficient in the appropriate field
'''
#assert a.F == b.F and a.F.F == self.F
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
c = self.neg(b)
return self.add(a,c)
def neg(self, a):
'''
field operation: opposite of a polynomial > opposite of each coefficient in appropriate field
'''
#assert a.F.F == self.F
ap = [0]*a.deg
for i in range(a.deg):
ap[i] = -a.poly.coef[i]
P = polynom(self.F,ap)
return ExtensionFieldElem(self,P)
def smul(self,a,b):
''' Return a*b where a or b is scalar
'''
if not isinstance(b,FieldElem):
# b is scalar
A = a.poly.coef
Pc = [0]*len(A)
for i in range(len(Pc)):
Pc[i] = A[i]*gmpy.mpz(b)
return ExtensionFieldElem(self,polynom(self.F,Pc))
else :
# a is scalar
return self.smul(b,a)
def pmul(self,a,b):
'''Multiplication between polynomials
'''
#assert a.F == b.F and a.F.F == self.F
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
# Simpler notations for reading
A = a.poly.coef
B = b.poly.coef
k = self.deg-1 # degree of the externsion field
if k == 2 and self.F.rep =='A':
# We are in the case that the extension field is Fp2
# We assume here that the irreductible polynom is X**2+1 (beta=-1)
# Complex multiplication
a0,a1,b0,b1 = A[0].val,A[1].val,B[0].val,B[1].val
p = self.char
v0 = a0*b0
v1 = a1*b1
c0 = ((a0+a1)*(b0+b1)-v0-v1)%p
c1 = (v1-v0)%p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
cp = polynom(self.F,[c0e,c1e])
C = ExtensionFieldElem(self,cp)
return C
elif k == 2:
# In this case, use Karatsuba multiplication algorithm
# notations
a0 = A[0]
a1 = A[1]
b0 = B[0]
b1 = B[1]
beta = -self.irpoly.coef[-1]
v0 = self.F.pmul(a0,b0)
v1 = self.F.pmul(a1,b1)
c0 = self.F.pmul((a0+a1),(b0+b1))-v0-v1 # coefficient of X
c1 = v1 + self.F.pmul(v0,beta) # independant term
cp = polynom(self.F,[c0,c1])
C = ExtensionFieldElem(self,cp)
return C
elif k == 3:
# In this case, use Karatsuba multiplication algorithm
# notations
a0,a1,a2 = A
b0,b1,b2 = B
beta = -self.irpoly.coef[-1]
v0,v1,v2 = self.F.pmul(a0,b0), self.F.pmul(a1,b1), self.F.pmul(a2,b2)
c0 = self.F.pmul((a0+a2),(b0+b2))-v0+v1-v2 # coefficient of X**2
c1 = self.F.pmul((a2+a1),(b2+b1))-v2-v1+self.F.pmul(beta,v0) # coefficient of X
c2 = v2+self.F.pmul(beta,(self.F.pmul((a1+a0),(b1+b0))-v1-v0)) # independant term
cp = polynom(self.F,[c0,c1,c2])
C = ExtensionFieldElem(self,cp)
return C
else :
prod = convolve(A,B)
return self.reduc2(prod) # return EProd % ired. polynomial
def square(self,a):
''' This algortihm returns the square of a in the field
using different methods if the degree of the extension
is 2,3 or more
'''
#print a.F
#print self
assert a.F == self
if not a.deg == self.deg-1 :
a = self.reduc(a)
#notations
A = a.poly.coef
k = self.deg-1 # degree of the extension
if k == 2 and self.F.rep == 'A':
# Using the complex multiplication
# We are in the case that the extension field is Fp2
# We assume here that the irreductible polynom is X**2+1 (beta=-1)
a1, a0 = A[0].val,A[1].val
p = self.char
v0 = a0*a1
c0 = ((a0+a1)*(a0-a1))%p
c1 = (v0+v0)%p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
cp = polynom(self.F,[c1e,c0e])
C = ExtensionFieldElem(self,cp)
return C
elif k == 2:
# Using the complex multiplication
a1, a0 = A
beta = -self.irpoly.coef[-1]
v0 = self.F.pmul(a0,a1)
c0 = self.F.pmul((a0+a1),(a0+self.F.pmul(a1,beta)))-v0-self.F.pmul(beta,v0)
c1 = v0+v0
cp = polynom(self.F,[c1,c0])
return ExtensionFieldElem(self,cp)
elif k == 3:
# Using Chung-Hasan Squaring2
a2,a1,a0 = A
#print a0
#print 'a0',a0.F, a0.F.deg-1
#print 'self',self.F, self.F.deg-1
assert a0.F == self.F
beta = -self.irpoly.coef[-1]
s0 = self.F.square(a0)
t1 = self.F.pmul(a0,a1)
s1 = t1+t1
s2 = self.F.square((a0-a1+a2))
t3 = a1*a2
s3 = t3+t3
s4 = self.F.square(a2)
c0 = s0 + self.F.pmul(beta,s3)
c1 = s1 + self.F.pmul(beta,s4)
c2 = s1 + s2 + s3 - s0 -s4
cp = polynom(self.F,[c2,c1,c0])
return ExtensionFieldElem(self,cp)
else :
return self.F.pmul(a,a)
def invert(self,a):
''' Ths method returns the inverse of a in the field
The inverse is computed by determining the Bezout coefficient using the
extended Euclide's algorithm or by specialized algorithms depending
on the degree of the extension (2 or 3)
'''
#assert self.invertible(a) #The element must be invertible
assert a.F == self
k = self.deg-1
if k == 2 and self.F.rep == 'A':
# inversion in a field of characteristic 2 over prime field
# We are in the case that the extension field is Fp2
# We assume here that the irreductible polynom is X**2+1 (mod=-1)
A = a.poly.coef
a1,a0 = A[0].val,A[1].val # a = a0+a1*i
p = self.char
norm = a0*a0+a1*a1
invnorm = gmpy.invert(norm,p)
c0 = (a0*invnorm) % p
c1 = (-a1*invnorm) % p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
invap = polynom(self.F,[c1e,c0e])
inva = ExtensionFieldElem(self,invap)
return inva
elif k == 2 :
# inversion in a field of characteristic 2 over prime field
A = a.poly.coef
a1,a0 = A[0],A[1] # a = a0+a1*i
#print 'A',A
#print 'a1',a1
mod = self.irpoly.coef[-1] # i**2 = -mod
#a1b,a0b,modb = self.F.elem(a1), self.F.elem(a0),self.F.elem(mod)
#print 'a1b',a1b
#a1b2 = self.F.square(a1b)
a12 = self.F.square(a1)
#mid = self.F.pmul(a1b2,modb)
mid = self.F.pmul(a12,mod)
#norm = self.F.square(a0b)+mid
norm = self.F.square(a0)+mid
#invnorm = self.F.invert(a0**2+mod*a1**2)
#invnorm = self.F.invert(norm.poly.coef[-1])
invnorm = self.F.invert(norm)
c = self.F.pmul(a0,invnorm) # c = -a1/(a0**2+mod*a1**2)
d = -self.F.pmul(a1,invnorm)
invap = polynom(self.F,[d,c])
inva = ExtensionFieldElem(self,invap)
return inva
elif k == 3 :
# inversion in char. 3 field
A = a.poly.coef
a2,a1,a0 = A[0],A[1],A[2]
mod = -self.irpoly.coef[-1]
z0 = self.F.zero()
z1 = self.F.one()
if a0 == z0:
#a0 = 0
if a1 == z0:
#a1 = 0
c0,c1,c2 = z0, self.F.invert(self.F.pmul(a2,mod)), z0
elif a2 == z0:
#a2 = 0
c0,c1,c2 = z0,z0,self.F.invert(self.F.pmul(a1,mod))
else :
#a1,a2 != 0
a22 = self.F.square(a2)
a12 = self.F.square(a1)
c2 = self.F.pmul(a12,self.F.invert((self.F.pmul(self.F.pmul(a22,a2),mod)+self.F.pmul(self.F.pmul(a12,a1),mod))))
c1 = self.F.pmul((z1-self.F.pmul(self.F.pmul(a1,c2),mod)),self.F.invert(self.F.pmul(a2,mod)))
c0 = self.F.pmul((-(self.F.pmul(self.F.pmul(a2,mod),c2))),self.F.invert(a1))
else :
#a0 != 0
if a1 == z0 and a2 == z0:
#a1 = 0 , a2 = 0
c0,c1,c2 = self.F.invert(a0),z0,z0
else :
a12 = self.F.pmul(a1,a2)
a12m = self.F.pmul(a12,mod)
a00 = self.F.square(a0)
abis = a00-a12m
if abis == z0:
#a0**2-(a1*a2*mod) = 0
a11 = self.F.square(a1)
a22 = self.F.square(a2)
a02 = self.F.pmul(a0,a2)
a01 = self.F.pmul(a0,a1)
c2 = self.F.pmul(-a,self.F.invert(self.F.pmul((a02-a11),mod)))
c1 = self.F.pmul(-a2,self.F.invert(a01-self.F.pmul(a22,mod)))
a1c2 = self.F.pmul(a1,c2)
a2c1 = self.F.pmul(a2,c1)
c0 = self.F.pmul((z1-self.F.pmul(a1c2+a2c1,mod)),self.F.invert(a0))
else :
#a0**2-(a1*a2*mod) != 0
if a1 == z0:
#a1 = 0
inva0 = self.F.invert(a0)
a02 = self.F.pmul(a0,a2)
a000 = self.F.pmul(a00,a0)
a22 = self.F.square(a2)
a222 = self.F.pmul(a22,a2)
mm = self.F.square(mod)
a222mm = self.F.pmul(a222,mm)
c2 = self.F.pmul(-a02,self.F.invert(a000+a222mm))
a02m = self.F.pmul(a02,mod)
a02mc2 = self.F.pmul(a02m,c2)
inva00 = self.F.square(inva0)
c1 = self.F.pmul(-a02mc2,inva00)
a2m = self.F.pmul(a2,mod)
a2mc1 = self.F.pmul(a2m,c1)
c0 = self.F.pmul(z1-a2mc1,inva0)
elif a2 == z0:
#a2 = 0
a11 = self.F.square(a1)
a111 = self.F.pmul(a11,a1)
a000 = self.F.pmul(a00,a0)
a111m = self.F.pmul(a111,mod)
inva0 = self.F.invert(a0)
c2 = self.F.pmul(a11,self.F.invert(a111m+a000))
a11m = self.F.pmul(a11,mod)
a11mc2 = self.F.pmul(a11m,c2)
inva00 = self.F.square(inva0)
c1 = self.F.pmul(a11mc2-a1,inva00)
a1m = self.F.pmul(a1,mod)
a1mc2 = self.F.pmul(a1m,c2)
c0 = self.F.pmul(z1-a1mc2,inva0)
else :
#a1,a2 != 0
a01 = self.F.pmul(a0,a1)
a22 = self.F.square(a2)
a22m = self.F.pmul(a22,mod)
a02 = self.F.pmul(a0,a2)
a11 = self.F.square(a1)
abus = a01-a22m
abos = self.F.pmul(a02-a11,mod)
invabis = self.F.invert(abis)
abb = self.F.pmul(abus,invabis)
abb1 = self.F.pmul(abb,a1)
abbbos = self.F.pmul(abb,abos)
c2 = self.F.pmul(abb1-a2,self.F.invert(abis-abbbos))
abosc2 = self.F.pmul(abos,c2)
c1 = self.F.pmul(-a1-abosc2,invabis)
a1c2 = self.F.pmul(a1,c2)
a2c1 = self.F.pmul(a2,c1)
c0 = self.F.pmul(z1-self.F.pmul(a1c2+a2c1,mod),self.F.invert(a0))
invap = polynom(self.F,[c2,c1,c0])
inva = ExtensionFieldElem(self,invap)
return inva
else :
# inversion in a field of char. != 2,3
# this inversion takes a longer time (than previous method)
# it uses extended Euclid's algorithm
P = ExtensionFieldElem(self,self.irpoly)
r,u,v = self.extendedeuclide(P,a)
n,d = r.poly.truedeg()
assert n == self.deg-2
c = r.poly.coef[len(r.poly.coef)-1].invert()
cp = polynom(self.F,[c])
ce = ExtensionFieldElem(self,cp)
return ce*v
def invertible(self,a):
''' Return True if a is invertible
'''
return not self.reduc(a)==self.zero()
def div(self,a,b):
return a*self.invert(b)
def eucldiv(self,a,b):
''' Return a/b and a%b
a and b are of length d-1 where d is the degree of the irreducible polynomial
'''
zero = self.F.zero()
izero = self.zero()
d = self.deg
assert not b.poly.iszero() # Do not divide by zero
if a.poly.iszero() :
return izero, izero # quotient is zero, remain is zero
elif a == b:
return self.one(), izero # quotient is one, remain is zero
#Notations
A = a.poly.coef
B = b.poly.coef
n, da = a.poly.truedeg() # position of first non zero elem of a and degree of a
m, db = b.poly.truedeg() # same for b
if da<db :
# deg(a)<deg(b)
return izero, a # quotient is zero, remain is a
elif da==db:
#deg(a)=deg(b)
deg = max(d-1,da)
rc = [zero]*(deg)
qc = [zero]*(deg)
q = A[n]/B[m]
for i in range(1,deg):
rc[i] = A[n+i]-q*B[m+i]
qc[deg-1] = q
rp = polynom(self.F,rc)
qp = polynom(self.F,qc)
remain = ExtensionFieldElem(self,rp)
quotient = ExtensionFieldElem(self,qp)
return quotient, remain
else :
# deg(a)>deg(b)
deg = max(d-1,da)
p = deg - da
rc = [zero]*(deg)
qc = [zero]*(deg)
rc[deg-da:] = A[n:]
pm=0
while p+pm+db<deg+1:
#k is the position of the index of the quotient
k = deg-(da-db)-1+pm
qc[k] = rc[p+pm]/B[m]
for i in range(db):
rc[i+p+pm] = rc[i+p+pm]- qc[k]*B[m+i]
pm=pm+1
rp = polynom(self.F,rc)
qp = polynom(self.F,qc)
remain = ExtensionFieldElem(self,rp)
quotient = ExtensionFieldElem(self,qp)
return quotient, remain
def reduc(self,a):
''' Return a % self.irpoly
The polynomial a = [a_0,...,a_n-1] is returned modulo the irreducible polynomial
The reduced polynomial has length at most d-1 where d is the length
of the irreducible polynomial
'''
assert a.F.F == self.F
if a.poly.iszero() :
return self.zero()
elif a.poly == self.irpoly :
return self.zero()
elif a.deg < self.deg :
c = [self.F.zero()]*(self.deg-1-a.deg)
newacoef = c+a.poly.coef
newapoly= polynom(self.F, newacoef)
newaelem = ExtensionFieldElem(self, newapoly)
return newaelem
else :
# Case where a is not zero or the irreducible polynomial and deg(a)>=deg(irpoly)
q,r = self.eucldiv(a,ExtensionFieldElem(self,self.irpoly))
r = self.trunc(r)
return self.reduc(r)
def reduc2(self,a):
''' a is a list of length (d-1)*2-1 (polynomial length)
this method returns the equivalent element of length d-1
using the table of equivalences (build from the irreducible polynomial)
in the function self.table()
'''
As = a[:(self.deg-2)]
Ad = a[(self.deg-2):]
b = list(dot(As,self.tabular)+Ad)
newapoly = polynom(self.F,b)
newa = ExtensionFieldElem(self,newapoly)
return newa
def trunc(self,a):
'''Return an ExtensionFieldElem of length d-1 where d = deg(irpoly)
'''
d = self.deg
if a.deg == d-1:
return a
c = a.poly.coef[a.deg-d+1:] # the (d-1) last elements of a
cp = polynom(self.F,c)
return ExtensionFieldElem(self,cp)
def table(self):
''' This method returns a table (usually) stored in self.tabular
which is used to compute reduction after a multiplication
between two elements
'''
d = self.deg
T = zeros((d-2,d-1),dtype=object_)
Pc = self.irpoly.coef[1:]
for i in range(0,d-2):
Qc = [self.F.zero()]*(2*(d-1)-1)
Qc[i+1:i+d] = Pc
Qp = polynom(self.F,Qc)
Qe = ExtensionFieldElem(self,Qp)
Q = self.reduc(-Qe)
T[i] = array(Q.poly.coef)
return T
def extendedeuclide(self,a,b):
'''Return s,u,v such as s = ua + vb, s is the gcd of a and b
This method is used to compute the inverse of a mod b (when s=1)
'''
#init
one = self.one()
zero = self.zero()
s = a
u = one
v = zero
sp = b
up = zero
vp = one
#loop : invariants are s = ua+vb and sp = up*a+vp*b
while not sp.poly.iszero() :
q,r = self.eucldiv(s,sp)
s,u,v,sp,up,vp = sp, up, vp, r, u-up*q,v-vp*q
return self.reduc(s),self.reduc(u),self.reduc(v)
def __str__(self):
return str(self.F)+"/"+str(self.irpoly)
def jsonable(self):
return {'type': 'Field Extension', 'F': self.F, 'irpoly': self.irpoly, 'degree':self.deg-1}
class ExtensionFieldElem(FieldElem):
def __init__(self,F,poly):
'''Define the Extension Field and the representative polynomial
'''
self.F = F
self.poly = poly
self.siz = len(poly.coef)
self.deg = self.siz
def __str__(self):
x = self.F.rep
p = self.poly
s = '('
if self.siz == 1 :
s = s+str(p.coef[0])
if self.siz == 2 :
s = s+str(p.coef[0])+'*'+x+' + '+str(p.coef[1])
if self.siz > 2 :
s =s+str(p.coef[0])+'*'+x+'**'+str(self.siz-1)
for i in range(1,self.siz-2):
s = s+' + '+str(p.coef[i])+'*'+x+'**'+str(self.siz-1-i)
s = s+' + '+str(p.coef[self.siz-2])+'*'+x +' + '+str(p.coef[self.siz-1])
return s+')'
def __eq__(self,other):
try:
return self.F == other.F and self.poly == other.poly
except:
return False
def fingerprint(self):
return self.poly.fingerprint()
def jsonable(self):
return {'type': 'ExtensionFieldElem', 'F': self.F, 'poly': self.poly, 'size': self.siz}
class polynom:
''' This class represents a polynomial written P = c_nX**n+...c_1X+c_0
c_0,...,c_n are in the Field F (which can be an ExtensionField) so they are either FieldElem or ExtensionFieldElem
coef is a list : coef = [c_n,...,c_0] of length n+1
'''
def __init__(self,F,coef):
self.F = F # The field in which coeficients belong
if isinstance(coef,list):
self.coef = coef # A list of coeficient in decreasing order (by convention) of the polynomial's degree
self.deg = len(coef) # The degree+1 of the polynomial
else :
#coef is not a list but a single element
self.coef = [coef]
self.deg = 1
def __eq__(self,other):
try:
return (self.F == other.F and self.coef == other.coef)
except:
return False
def __str__(self):
# Not consistent with representation letter of the fields
x = self.F.rep
if x == None:
x = 'X'
s = '('
if self.deg == 1 :
s = s+str(self.coef[0])
if self.deg == 2 :
s = s+str(self.coef[0])+'*'+x+' + '+str(self.coef[1])
if self.deg > 2 :
s =s+str(self.coef[0])+'*'+x+'**'+str(self.deg-1)
for i in range(1,self.deg-2):
s = s+' + '+str(self.coef[i])+'*'+x+'**'+str(self.deg-1-i)
s = s+' + '+str(self.coef[self.deg-2])+'*'+x +' + '+str(self.coef[self.deg-1])
return s+')'
def fingerprint(self):
L = []
for c in self.coef:
L.append(c.fingerprint())
return fingexp.fingerprint(L)
def iszero(self):
'''Return True if it is a zero polynomial (each coefficient is zero)
This does not return True if the polynomial is the polynomial that generates the extension field
'''
cond = True
for i in self.coef:
pcond = i.iszero()
cond = pcond*cond
return cond
def truedeg(self):
'''Return the position of the first non zero coefficient and the actual degree of the polynomial
'''
if self.iszero():
return 0,0
n = 0
while self.coef[n]==self.F.zero():
n = n+1
# n is the position of the first non zero coeff of the polynomial
return n, self.deg-n # position and actual degree of the polynomial
def jsonable(self):
return {'type': 'polynomial', 'F': self.F, 'coeficients': self.coef, 'degree': self.deg}
| mathTools/field.py | 34,962 | This class defines extension fields and inherits field methods.
Depending on the degree of the extension field, we use
different algorithms to optimize the operations
Class for Field
This class represents a polynomial written P = c_nX**n+...c_1X+c_0
c_0,...,c_n are in the Field F (which can be an ExtensionField) so they are either FieldElem or ExtensionFieldElem
coef is a list : coef = [c_n,...,c_0] of length n+1
testing if we are working in the same field
testing if we are working in the same extension field
Defines the modulus p which must be a prime
Creating a new field element.
Define the base Field or extension Field and the irreducible polynomial
F is the base field on top of which the extension
field is built
irpoly is the irreducible polynomial used to build
the extension field as F/irpoly
g is a non quadratic residue used to compute square
roots, if it is set to None, computing a square root
will initialize g
rep is the representation of the root of irpoly
(note that letter 'A' is reserved for the Complex extension field)
Define the Extension Field and the representative polynomial
field operation: addition mod p
field operation: addition of polynomial > addition of coefficients in the appropriate field
return n*P using double and add technique
return an element of value x
Provided that x belongs to F, return an element of the extension field
of value x
Return a/b and a%b
a and b are of length d-1 where d is the degree of the irreducible polynomial
Return s,u,v such as s = ua + vb, s is the gcd of a and b
This method is used to compute the inverse of a mod b (when s=1)
find a random non quadratic residue in the Field F,
that is, find g that is not a square in F, this is
needed to compute square roots
Ths method returns the inverse of a in the field
The inverse is computed by determining the Bezout coefficient using the
extended Euclide's algorithm or by specialized algorithms depending
on the degree of the extension (2 or 3)
Return True if a is invertible
This method return True if the element is a quadratic residue mod q
different than zero
it returns False otherwhise
Return True if it is a zero polynomial (each coefficient is zero)
This does not return True if the polynomial is the polynomial that generates the extension field
field operation: multiplication of field elements
field operation: opposite mod p
field operation: opposite of a polynomial > opposite of each coefficient in appropriate field
unit element for multiplication
unit element for multiplication
product between two field element in Fp
Multiplication between polynomials
return a**b
Return a random element of the Field
Return a random element of the Extension Field
Return a % self.irpoly
The polynomial a = [a_0,...,a_n-1] is returned modulo the irreducible polynomial
The reduced polynomial has length at most d-1 where d is the length
of the irreducible polynomial
a is a list of length (d-1)*2-1 (polynomial length)
this method returns the equivalent element of length d-1
using the table of equivalences (build from the irreducible polynomial)
in the function self.table()
Quick multiplication between a field element a and a scalar b
Return a*b where a or b is scalar
Return a*b where a or b is scalar
return P**n using square and multiply technique
This method returns the square of a
This algortihm returns the square of a in the field
using different methods if the degree of the extension
is 2,3 or more
This method returns the positive square root of
an element of the field
using the Tonelli-Shanks algorithm
Carefull : if the element has no square root, the method does not
check this case and raises an error. Verification has to be done
before calling the method.
field operation: substraction mod p
field operation: substraction of polynomials > substraction of each coefficient in the appropriate field
This method returns a table (usually) stored in self.tabular
which is used to compute reduction after a multiplication
between two elements
Return the position of the first non zero coefficient and the actual degree of the polynomial
Return an ExtensionFieldElem of length d-1 where d = deg(irpoly)
root of the irreducible polynomial
e.g. return element 1*A+0 (or the complex value i) if the irpoly is X**2+1
unit element for addition
unit element for addition
Created on 2013-2014
Author : Edouard Cuvelier
Affiliation : Université catholique de Louvain - ICTEAM - UCL Crypto Group
Address : Place du Levant 3, 1348 Louvain-la-Neuve, BELGIUM
email : firstname.lastname@uclouvain.be
-*- coding: utf-8 -*- prime modulus characteristic order+1 TODO : correct? Multiplication by a scalar b is scalarreturn self.dbleAndAdd(a,a,b)return self.pmul(a,a.F.elem(b)) a is scalarreturn self.dbleAndAdd(b,b,a)print "dblaad"self.count = 0elif : return self.powop(a.invert(),(-c))return FieldElem(pow(a.val,b,self.char)) Do not invert zero!def invertible(self,a):return not int(a.invert().val) == 0 Do not invert zero!print g, " is quad res in ", selfassert isinstance(F,Field)self.to_fingerprint = ["F", "val"]self.to_export = {"fingerprint": ["F"], "value": ["val"]}super(FieldElem, self).__init__() case of element is zero If F's order is prime we use Euler's criteriumTODO: Optimize this q-1 = (2**s)*t b1 = 2**i FAILURE to find square root degree of the irreducible polynomial + 1 order of the FieldChoose a random representation letter g is needed to compute square roots, it is a non quadratic residueassert a.F == b.F and a.F.F == self.Fassert a.F == b.F and a.F.F == self.Fassert a.F.F == self.F b is scalar a is scalarassert a.F == b.F and a.F.F == self.F Simpler notations for reading degree of the externsion field We are in the case that the extension field is Fp2 We assume here that the irreductible polynom is X**2+1 (beta=-1) Complex multiplication In this case, use Karatsuba multiplication algorithm notations coefficient of X independant term In this case, use Karatsuba multiplication algorithm notations coefficient of X**2 coefficient of X independant term return EProd % ired. polynomialprint a.Fprint selfnotations degree of the extension Using the complex multiplication We are in the case that the extension field is Fp2 We assume here that the irreductible polynom is X**2+1 (beta=-1) Using the complex multiplication Using Chung-Hasan Squaring2print a0print 'a0',a0.F, a0.F.deg-1print 'self',self.F, self.F.deg-1assert self.invertible(a) The element must be invertible inversion in a field of characteristic 2 over prime field We are in the case that the extension field is Fp2 We assume here that the irreductible polynom is X**2+1 (mod=-1) a = a0+a1*i inversion in a field of characteristic 2 over prime field a = a0+a1*iprint 'A',Aprint 'a1',a1 i**2 = -moda1b,a0b,modb = self.F.elem(a1), self.F.elem(a0),self.F.elem(mod)print 'a1b',a1ba1b2 = self.F.square(a1b)mid = self.F.pmul(a1b2,modb)norm = self.F.square(a0b)+midinvnorm = self.F.invert(a0**2+mod*a1**2)invnorm = self.F.invert(norm.poly.coef[-1]) c = -a1/(a0**2+mod*a1**2) inversion in char. 3 fielda0 = 0a1 = 0a2 = 0a1,a2 != 0a0 != 0a1 = 0 , a2 = 0a0**2-(a1*a2*mod) = 0a0**2-(a1*a2*mod) != 0a1 = 0a2 = 0a1,a2 != 0 inversion in a field of char. != 2,3 this inversion takes a longer time (than previous method) it uses extended Euclid's algorithm Do not divide by zero quotient is zero, remain is zero quotient is one, remain is zeroNotations position of first non zero elem of a and degree of a same for b deg(a)<deg(b) quotient is zero, remain is adeg(a)=deg(b) deg(a)>deg(b)k is the position of the index of the quotient Case where a is not zero or the irreducible polynomial and deg(a)>=deg(irpoly) the (d-1) last elements of ainitloop : invariants are s = ua+vb and sp = up*a+vp*b The field in which coeficients belong A list of coeficient in decreasing order (by convention) of the polynomial's degree The degree+1 of the polynomialcoef is not a list but a single element Not consistent with representation letter of the fields n is the position of the first non zero coeff of the polynomial position and actual degree of the polynomial | 8,221 | en | 0.784241 |
# -*- coding: utf-8 -*-
# Copyright (C) 2019 - 2020 by Pedro Mendes, Rector and Visitors of the
# University of Virginia, University of Heidelberg, and University
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2017 - 2018 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2006 - 2007 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CFunctionParameter(unittest.TestCase):
def setUp(self):
self.functions=COPASI.CRootContainer.getFunctionList()
self.function=self.functions.findFunction("Iso Uni Uni")
self.assert_(self.function!=None)
self.assert_(self.function.__class__==COPASI.CFunction)
self.parameters=self.function.getVariables()
self.assert_(self.parameters!=None)
self.assert_(self.parameters.__class__==COPASI.CFunctionParameters)
index=self.parameters.findParameterByName("Keq",COPASI.CFunctionParameter.DataType_FLOAT64)
self.parameter=self.parameters.getParameter(index)
self.assert_(self.parameter!=None)
self.assert_(self.parameter.__class__==COPASI.CFunctionParameter)
def test_getKey(self):
key=self.parameter.getKey()
self.assert_(type(key)==str)
def test_getType(self):
b=self.parameter.getType()
self.assert_(type(b)==int)
self.assert_(b==COPASI.CFunctionParameter.DataType_FLOAT64)
def test_setType(self):
t=COPASI.CFunctionParameter.DataType_INT32
self.parameter.setType(t)
self.assert_(self.parameter.getType()==t)
def test_getUsage(self):
b=self.parameter.getUsage()
self.assert_(type(b)==int)
self.assert_(b==COPASI.CFunctionParameter.Role_PARAMETER)
def test_setUsage(self):
t=COPASI.CFunctionParameter.Role_VOLUME
self.parameter.setUsage(t)
self.assert_(self.parameter.getUsage()==t)
def suite():
tests=[
"test_getKey"
,"test_getType"
,"test_setType"
,"test_getUsage"
,"test_setUsage"
]
return unittest.TestSuite(map(Test_CFunctionParameter,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
| copasi/bindings/python/unittests/Test_CFunctionParameter.py | 2,766 | -*- coding: utf-8 -*- Copyright (C) 2019 - 2020 by Pedro Mendes, Rector and Visitors of the University of Virginia, University of Heidelberg, and University of Connecticut School of Medicine. All rights reserved. Copyright (C) 2017 - 2018 by Pedro Mendes, Virginia Tech Intellectual Properties, Inc., University of Heidelberg, and University of of Connecticut School of Medicine. All rights reserved. Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual Properties, Inc., University of Heidelberg, and The University of Manchester. All rights reserved. Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual Properties, Inc., EML Research, gGmbH, University of Heidelberg, and The University of Manchester. All rights reserved. Copyright (C) 2006 - 2007 by Pedro Mendes, Virginia Tech Intellectual Properties, Inc. and EML Research, gGmbH. All rights reserved. | 910 | en | 0.83115 |
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_apimanagementapiexport_info
version_added: '2.9'
short_description: Get ApiExport info.
description:
- Get info of ApiExport.
options:
resource_group:
description:
- The name of the resource group.
required: true
type: str
service_name:
description:
- The name of the API Management service.
required: true
type: str
api_id:
description:
- >-
API revision identifier. Must be unique in the current API Management
service instance. Non-current revision has ;rev=n as a suffix where n is
the revision number.
required: true
type: str
format:
description:
- >-
Format in which to export the Api Details to the Storage Blob with Sas
Key valid for 5 minutes.
required: true
type: str
export:
description:
- Query parameter required to export the API details.
required: true
type: str
id:
description:
- ResourceId of the API which was exported.
type: str
value:
description:
- The object defining the schema of the exported Api Detail
type: dict
suboptions:
link:
description:
- >-
Link to the Storage Blob containing the result of the export
operation. The Blob Uri is only valid for 5 minutes.
type: str
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: ApiManagementGetApiExportInOpenApi2dot0
azure_rm_apimanagementapiexport_info:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
format: swagger-link
export: 'true'
- name: ApiManagementGetApiExportInOpenApi3dot0
azure_rm_apimanagementapiexport_info:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
format: openapi-link
export: 'true'
'''
RETURN = '''
api_export:
description: >-
A list of dict results where the key is the name of the ApiExport and the
values are the facts for that ApiExport.
returned: always
type: complex
contains:
apiexport_name:
description: The key is the name of the server that the values relate to.
type: complex
contains:
id:
description:
- ResourceId of the API which was exported.
returned: always
type: str
sample: null
format:
description:
- >-
Format in which the Api Details are exported to the Storage Blob
with Sas Key valid for 5 minutes.
returned: always
type: str
sample: null
value:
description:
- The object defining the schema of the exported Api Detail
returned: always
type: dict
sample: null
contains:
link:
description:
- >-
Link to the Storage Blob containing the result of the export
operation. The Blob Uri is only valid for 5 minutes.
returned: always
type: str
sample: null
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
from msrestazure.azure_exceptions import CloudError
class AzureRMApiExportInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=true
),
service_name=dict(
type='str',
required=true
),
api_id=dict(
type='str',
required=true
),
format=dict(
type='str',
required=true
),
export=dict(
type='str',
required=true
)
)
self.resource_group = None
self.service_name = None
self.api_id = None
self.format = None
self.export = None
self.id = None
self.value = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-01-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMApiExportInfo, self).__init__(self.module_arg_spec, supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.resource_group is not None and
self.service_name is not None and
self.api_id is not None and
self.format is not None and
self.export is not None):
self.results['api_export'] = self.format_item(self.get())
return self.results
def get(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.ApiManagement' +
'/service' +
'/{{ service_name }}' +
'/apis' +
'/{{ api_name }}')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ service_name }}', self.service_name)
self.url = self.url.replace('{{ api_name }}', self.name)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results['temp_item'] = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return results
def format_item(item):
return item
def main():
AzureRMApiExportInfo()
if __name__ == '__main__':
main()
| generated/intermediate/ansible-module-rest/azure_rm_apimanagementapiexport_info.py | 7,615 | !/usr/bin/python Copyright (c) 2019 Zim Kalinowski, (@zikalino) GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) prepare url self.log('Response : {0}'.format(response)) | 210 | en | 0.430175 |
# TODO: By PySCF-1.5 release
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1. code style
# * Indent: 3 -> 4
# * Constant should be all uppercase
# * Function/method should be all lowercase
# * Line wrap around 80 columns
# * Use either double quote or single quote, not mix
#
# 2. Conventions required by PySCF
# * Use PYSCF_TMPDIR to replace _TmpDir
#
# 3. Use proper functions provided by PySCF
#
# This file is adapted with permission from the wmme program of Gerald Knizia.
# See http://sites.psu.edu/knizia/software/
#====================================================
from __future__ import print_function
import numpy as np
from numpy import dot, array
from os import path
from sys import version_info
def GetModulePath():
# (hopefully) return the path of the .py file.
# idea is to leave wmme.py in the same directory as the wmme executable,
# and import invoke the scripts using it via, for example,
# PYTHONPATH=$HOME/dev/wmme:$PYTHONPATH python myscriptfile.py
import inspect
return path.dirname(path.abspath(inspect.getfile(inspect.currentframe())))
if 0:
# set executable/basis library directory explicitly.
_WmmeDir = "/home/cgk/dev/wmme"
else:
# set executable/basis library from path of wmme.py
_WmmeDir = None
_TmpDir = None # if None: use operating system default
_BasisLibDir = None # if None: same as _WmmeDir/bases
#ToAng = 0.5291772108
ToAng = 0.529177209 # molpro default.
def ElementNameDummy():
ElementNames = "X H He Li Be B C N O F Ne Na Mg Al Si P S Cl Ar K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe Cs Ba La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi Po At Rn".split()
ElementNumbers = dict([(o,i) for (i,o) in enumerate(ElementNames)])
return ElementNames, ElementNumbers
ElementNames, ElementNumbers = ElementNameDummy()
def mdot(*args):
"""chained matrix product: mdot(A,B,C,..) = A*B*C*...
No attempt is made to optimize the contraction order."""
r = args[0]
for a in args[1:]:
r = dot(r,a)
return r
def dot2(A,B): return dot(A.flatten(),B.flatten())
def nCartY(l):
return ((l+1)*(l+2))/2
class FAtom(object):
def __init__(self, Element, Position, Index):
self.Element = Element
self.Pos = Position
self.Index = Index
@property
def Label(self):
# return element and center index combined.
return "%2s%3s"%(self.Element,1 + self.Index)
@property
def iElement(self):
return ElementNumbers[self.Element]
def __str__(self):
return "%s (%6.3f,%6.3f,%6.3f)"%(self.Label, self.Pos[0], self.Pos[1], self.Pos[2])
class FAtomSet(object):
def __init__(self, Positions, Elements, Orientations=None, Name=None):
"""Positions: 3 x nAtom matrix. Given in atomic units (ABohr).
Elements: element name (e.g., H) for each of the positions.
Orientations: If given, a [3,3,N] array encoding the standard
orientation of the given atoms (for replicating potentials!). For
each atom there is a orthogonal 3x3 matrix denoting the ex,ey,ez
directions."""
self.Pos = Positions
assert(self.Pos.shape[0] == 3 and self.Pos.shape[1] == len(Elements))
self.Elements = Elements
self.Orientations = Orientations
self.Name = Name
def MakeXyz(self,NumFmt = "%15.8f",Scale=1.):
Lines = []
for i in range(len(self.Elements)):
Lines.append(" %5s {0} {0} {0}".format(NumFmt) % (\
self.Elements[i], Scale*self.Pos[0,i], Scale*self.Pos[1,i], Scale*self.Pos[2,i]))
return "\n".join(Lines)
def nElecNeutral(self):
"""return number of electrons present in the total system if neutral."""
return sum([ElementNumbers[o] for o in self.Elements])
def fCoreRepulsion1(self, iAt, jAt):
if iAt == jAt: return 0. # <- a core doesn't repulse itself.
ChA, ChB = [ElementNumbers[self.Elements[o]] for o in [iAt, jAt]]
return ChA * ChB / np.sum((self.Pos[:,iAt] - self.Pos[:,jAt])**2)**.5
def fCoreRepulsion(self):
N = len(self.Elements)
Charges = array([ElementNumbers[o] for o in self.Elements])
fCoreEnergy = 0
for i in range(N):
for j in range(i):
fCoreEnergy += self.fCoreRepulsion1(i,j)
#fCoreEnergy += Charges[i] * Charges[j] / np.sum((self.Pos[:,i] - self.Pos[:,j])**2)**.5
return fCoreEnergy
def __str__(self):
Caption = " %5s%15s %15s %15s" % ("ATOM", "POS/X", "POS/Y", "POS/Z")
return Caption + "\n" + self.MakeXyz()
def __len__(self): return len(self.Elements)
def __getitem__(self,key): return FAtom(self.Elements[key], self.Pos[:,key], key)
def __iter__(self):
for (iAt,(Type,Xyz)) in enumerate(zip(self.Elements, self.Pos.T)):
#yield (Type,Xyz)
yield FAtom(Type, Xyz, iAt)
class FBasisShell(object):
"""A generally contracted shell of spherical harmonic basis functions."""
def __init__(self, l, Exp, Co):
self.l = l
assert(isinstance(l,int) and l >= 0 and l <= 8)
self.Exp = np.array(Exp)
assert(self.Exp.ndim == 1)
self.Co = np.array(Co)
assert(self.Co.ndim == 2 and self.Co.shape[0] == len(self.Exp))
self.Element = None # designated element for the basis function
self.Comment = None # comment on the basis function (e.g., literature reference)
@property
def nExp(self):
return len(self.Exp)
@property
def nCo(self):
return self.Co.shape[1]
@property
def nFn(self):
return self.nCo * (2*self.l + 1)
@property
def nFnCa(self):
return self.nCo * nCartY(self.l)
@property
def AngMom(self): return self.l
def __str__(self):
Lines = []
Lines.append("BasisShell [l = %i, nExp = %i, nCo = %i]" % (self.l, self.nExp, self.nCo))
def FmtA(L):
return ", ".join("%12.5f" % o for o in L)
Lines.append(" Exps = [%s]" % FmtA(self.Exp))
for iCo in range(self.nCo):
Lines.append(" Co[%2i] = [%s]" % (iCo, FmtA(self.Co[:,iCo])))
return "\n".join(Lines)
class FBasisShell1(object):
"""A FBasisShell which is placed on a concrete atom."""
def __init__(self, Atom, ShellFn):
self.Atom = Atom
self.Fn = ShellFn
assert(isinstance(self.Fn, FBasisShell))
@property
def Pos(self):
return self.Atom.Pos
@property
def iAtom(self):
return self.Atom.Index
@property
def l(self): return self.Fn.l
@property
def nExp(self): return self.Fn.nExp
@property
def Exp(self): return self.Fn.Exp
@property
def nCo(self): return self.Fn.nCo
@property
def Co(self): return self.Fn.Co
@property
def nFn(self): return self.Fn.nFn
@property
def nFnCa(self): return self.Fn.nFnCa
class FBasisSet(object):
def __init__(self, Shells, Atoms):
# list of FBasisShell1 objects.
self.Shells = Shells
self.Atoms = Atoms
@property
def nFn(self):
n = 0
for Sh in self.Shells:
n += Sh.nFn
return n
@property
def nFnCa(self):
n = 0
for Sh in self.Shells:
n += Sh.nFnCa
return n
def __str__(self):
Lines = []
for o in self.Shells:
Lines.append("Atom %s %s" % (o.Atom, o.Fn))
return "\n".join(Lines)
def FmtCr(self):
#f = 1./ToAng
f = 1.
Lines = []
def Emit(s):
Lines.append(s)
def EmitArray(Name, A):
#Emit(" " + Name + "<" + " ".join("%.16e"%o for o in A) + ">")
Emit(" " + Name + "<" + " ".join("%r"%o for o in A) + ">")
# collect all unique FBasisShell objects.
BasisFns = []
BasisFnIds = {} # map id(BasisFn)->(index)
for Shell in self.Shells:
if id(Shell.Fn) not in BasisFnIds:
BasisFnIds[id(Shell.Fn)] = len(BasisFns)
BasisFns.append(Shell.Fn)
pass
Emit("Basis<Version<0.1> nFns<%i> nShells<%i>" % (len(BasisFns), len(self.Shells)))
# store the function declarations...
def EmitBasisFn(Fn):
Emit(" Fn<Id<%i> l<%i> nExp<%i> nCo<%i>" % (
BasisFnIds[id(Fn)], Fn.l, Fn.nExp, Fn.nCo))
EmitArray("Exp", Fn.Exp)
for Co in Fn.Co.T:
EmitArray("Co", Co)
Emit(" >")
pass
for Fn in BasisFns:
EmitBasisFn(Fn)
# ...and their distribution amongst atoms.
def EmitShell(Sh):
#Emit(" Shell<iAt<%i> x<%.16e> y<%.16e> z<%.16e> FnId<%i>>" % (
Emit(" Shell<iAt<%i> x<%r> y<%r> z<%r> FnId<%i>>" % (
Sh.Atom.Index, f*Sh.Atom.Pos[0], f*Sh.Atom.Pos[1], f*Sh.Atom.Pos[2], BasisFnIds[id(Sh.Fn)]))
pass
for Shell in self.Shells:
EmitShell(Shell)
Emit(">") # end of Basis
return "\n".join(Lines)
def GetAngmomList(self):
# list of all basis function angular momenta in the basis, for converting basis function orders and types.
ls = []
for Shell in self.Shells:
for iCo in range(Shell.nCo):
ls.append(Shell.l)
return ls
class FIntegralContext(object):
"""contains data describing how to evaluate quantum chemistry matrix
elements on electronic system as defined by the given atoms and basis
sets.
Note: Basis sets must either be basis set names (i.e., library names)
or FBasisSet objects.
"""
def __init__(self, Atoms, OrbBasis, FitBasis=None, BasisLibs=None):
self.Atoms = Atoms
self.OrbBasis = OrbBasis
self.FitBasis = FitBasis
self.BasisLibs = BasisLibs
def _InvokeBfint(self, Args, Outputs=None, Inputs=None, MoreBases=None):
Bases = {}
if self.OrbBasis: Bases['--basis-orb'] = self.OrbBasis
if self.FitBasis: Bases['--basis-fit'] = self.FitBasis
if MoreBases:
Bases = dict(list(Bases.items()) + list(MoreBases.items()))
return _InvokeBfint(self.Atoms, Bases, self.BasisLibs, Args, Outputs, Inputs)
def MakeBaseIntegrals(self, Smh=True, MakeS=False):
"""Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),
Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb)"""
# assemble arguments to integral generation program
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
# ^- calculate integrals in symmetrically orthogonalized AO basis
Outputs = []
Outputs.append(("--save-coreh", "INT1E"))
Outputs.append(("--save-fint2e", "INT2E"))
Outputs.append(("--save-overlap", "OVERLAP"))
CoreH, Int2e, Overlap = self._InvokeBfint(Args, Outputs)
nOrb = CoreH.shape[0]
Int2e = Int2e.reshape((Int2e.shape[0], nOrb, nOrb))
CoreEnergy = self.Atoms.fCoreRepulsion()
if MakeS:
return CoreEnergy, CoreH, Int2e, Overlap
else:
return CoreEnergy, CoreH, Int2e
def MakeOverlaps2(self, OrbBasis2):
"""calculate overlap between current basis and a second basis, as
described in OrbBasis2. Returns <1|2> and <2|2> matrices."""
Args = []
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs = []
Outputs.append(("--save-overlap-2", "OVERLAP_2"))
Outputs.append(("--save-overlap-12", "OVERLAP_12"))
#Outputs.append(("--save-overlap", "OVERLAP"))
Overlap2, Overlap12 = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap2, Overlap12
def MakeOverlap(self, OrbBasis2=None):
"""calculate overlap within main orbital basis, and, optionally, between main
orbital basis and a second basis, as described in OrbBasis2.
Returns <1|1>, <1|2>, and <2|2> matrices."""
Args = []
Outputs = []
Outputs.append(("--save-overlap", "OVERLAP_1"))
if OrbBasis2 is not None:
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs.append(("--save-overlap-12", "OVERLAP_12"))
Outputs.append(("--save-overlap-2", "OVERLAP_2"))
return self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
else:
MoreBases = None
Overlap, = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap
def MakeNuclearAttractionIntegrals(self, Smh=True):
"""calculate nuclear attraction integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-vnucN", "VNUC_N"))
VNucN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(VNucN.shape[0]**.5 + .5)
assert(nOrb**2 == VNucN.shape[0])
assert(VNucN.shape[1] == len(self.Atoms))
return VNucN.reshape(nOrb, nOrb, VNucN.shape[1])
def MakeNuclearSqDistanceIntegrals(self, Smh=True):
"""calculate <mu|(r-rA)^2|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-rsqN", "RSQ_N"))
RsqN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(RsqN.shape[0]**.5 + .5)
assert(nOrb**2 == RsqN.shape[0])
assert(RsqN.shape[1] == len(self.Atoms))
return RsqN.reshape(nOrb, nOrb, RsqN.shape[1])
def MakeKineticIntegrals(self, Smh=True):
"""calculate <mu|-1/2 Laplace|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-kinetic", "EKIN"))
Op = self._InvokeBfint(Args, Outputs)[0]
return Op
def MakeDipoleIntegrals(self, Smh=True):
r"""calculate dipole operator matrices <\mu|w|\nu> (w=x,y,z) in
main basis, for each direction. Returns nAo x nAo x 3 array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-dipole", "DIPN"))
DipN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(DipN.shape[0]**.5 + .5)
assert(nOrb**2 == DipN.shape[0])
assert(DipN.shape[1] == 3)
return DipN.reshape(nOrb, nOrb, 3)
def MakeOrbitalsOnGrid(self, Orbitals, Grid, DerivativeOrder=0):
"""calculate values of molecular orbitals on a grid of 3d points in space.
Input:
- Orbitals: nAo x nOrb matrix, where nAo must be compatible with
self.OrbBasis. The AO dimension must be contravariant AO (i.e., not SMH).
- Grid: 3 x nGrid array giving the coordinates of the grid points.
- DerivativeOrder: 0: only orbital values,
1: orbital values and 1st derivatives,
2: orbital values and up to 2nd derivatives.
Returns:
- nGrid x nDerivComp x nOrb array. If DerivativeOrder is 0, the
DerivComp dimension is omitted.
"""
Args = [("--eval-orbitals-dx=%s" % DerivativeOrder)]
Inputs = [("--eval-orbitals", "ORBITALS.npy", Orbitals)]\
+ [("--grid-coords", "GRID.npy", Grid)]
Outputs = [("--save-grid-values", "ORBS_ON_GRID")]
(ValuesOnGrid,) = self._InvokeBfint(Args, Outputs, Inputs)
nComp = [1,4,10][DerivativeOrder]
if nComp != 1:
ValuesOnGrid = ValuesOnGrid.reshape((Grid.shape[1], nComp, Orbitals.shape[1]))
return ValuesOnGrid
def MakeRaw2eIntegrals(self, Smh=True, Kernel2e="coulomb"):
"""compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),
where the fitting metric is *not* absorbed into the 2e integrals."""
# assemble arguments to integral generation program
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
# ^- calculate integrals in symmetrically orthogonalized AO basis
Args.append("--kernel2e='%s'" % Kernel2e)
Args.append("--solve-fitting-eq=false")
Outputs = []
Outputs.append(("--save-fint2e", "INT2E_3IX"))
Outputs.append(("--save-fitting-metric", "INT2E_METRIC"))
Int2e_Frs, Int2e_FG = self._InvokeBfint(Args, Outputs)
nOrb = int(Int2e_Frs.shape[1]**.5 + .5)
assert(nOrb**2 == Int2e_Frs.shape[1])
Int2e_Frs = Int2e_Frs.reshape((Int2e_Frs.shape[0], nOrb, nOrb))
assert(Int2e_Frs.shape[0] == Int2e_FG.shape[0])
assert(Int2e_FG.shape[0] == Int2e_FG.shape[1])
return Int2e_FG, Int2e_Frs
def _InvokeBfint(Atoms, Bases, BasisLibs, BaseArgs, Outputs, Inputs=None):
"""Outputs: an array of tuples (cmdline-arguments,filename-base).
We will generate arguments for each of them and try to read the
corresponding files as numpy arrays and return them in order."""
from tempfile import mkdtemp
from shutil import rmtree
#from commands import getstatusoutput
from subprocess import check_output, CalledProcessError
# make a directory to store our input/output in.
BasePath = mkdtemp(prefix="wmme.", dir=_TmpDir)
def Cleanup():
rmtree(BasePath)
pass
BfIntDir = _WmmeDir
if BfIntDir is None: BfIntDir = GetModulePath()
BasisLibDir = _BasisLibDir
if BasisLibDir is None:
BasisLibDir = path.join(BfIntDir,"bases")
MakeIntegralsExecutable = path.join(BfIntDir,"wmme")
# assemble arguments to integral generation program
FileNameXyz = path.join(BasePath, "ATOMS")
Args = [o for o in BaseArgs]
Args.append("--matrix-format=npy")
for BasisLib in BasisLibs:
Args.append("--basis-lib=%s" % path.join(BasisLibDir, BasisLib))
Args.append("--atoms-au=%s" % FileNameXyz)
iWrittenBasis = 0
for (ParamName, BasisObj) in Bases.items():
if BasisObj is None:
continue
if isinstance(BasisObj, FBasisSet):
# basis is given as an explicit FBasisSet object.
# Write the basis set to disk and supply the file name as argument
BasisFile = path.join(BasePath, "BASIS%i" % iWrittenBasis)
iWrittenBasis += 1
with open(BasisFile, "w") as File:
File.write(BasisObj.FmtCr())
Args.append("%s='!%s'" % (ParamName, BasisFile))
else:
assert(isinstance(BasisObj, str))
# it's just a basis set name: append the name to the arguments.
# (set will be read from library by wmme itself)
Args.append("%s=%s" % (ParamName, BasisObj))
pass
# make file names and arguments for output arrays
FileNameOutputs = []
for (ArgName,FileNameBase) in Outputs:
FileName = path.join(BasePath, FileNameBase)
FileNameOutputs.append(FileName)
Args.append("%s='%s'" % (ArgName, FileName))
XyzLines = "%i\n\n%s\n" % (len(Atoms), Atoms.MakeXyz("%24.16f"))
# ^- note on the .16f: it actually does make a difference. I had .8f
# there before, and it lead to energy changes on the order of 1e-8
# when treating only non-redundant subsystem out of a symmetric
# arrangement.
try:
with open(FileNameXyz, "w") as File:
File.write(XyzLines)
# save input arrays if provided.
if Inputs:
for (ArgName,FileNameBase,Array) in Inputs:
FileName = path.join(BasePath, FileNameBase)
np.save(FileName,Array)
Args.append("%s='%s'" % (ArgName, FileName))
Cmd = "%s %s" % (MakeIntegralsExecutable, " ".join(Args))
#print("!Invoking %s\n" % Cmd)
#iErr, Output = getstatusoutput(Cmd)
#if ( iErr != 0 ):
try:
Output = check_output(Cmd, shell=True)
if (version_info) >= (3,0):
# it returns a byte string in Python 3... which wouldn't be a problem
# if not all OTHER literals were converted to unicode implicitly.
Output = Output.decode("utf-8")
except CalledProcessError as e:
raise Exception("Integral calculation failed. Output was:\n%s\nException was: %s" % (e.output, str(e)))
OutputArrays = []
for FileName in FileNameOutputs:
OutputArrays.append(np.load(FileName))
except:
Cleanup()
raise
# got everything we need. Delete the temporary directory.
Cleanup()
return tuple(OutputArrays)
def ReadXyzFile(FileName,Scale=1./ToAng):
Text = open(FileName,"r").read()
Lines = Text.splitlines()
# allowed formats: <nAtoms> \n Desc \n <atom-list>
# or: <atom-list> (without any headers)
# in the first case, only the first nAtoms+2 lines are read, in the
# second case everything which does not look like a xyz line is
# ignored.
nAtoms = None
r = 0,-1
if ( len(Lines[0].split()) == 1 ):
nAtoms = int(Lines[0].split()[0])
r = 2,nAtoms+2
Atoms = []
Xyz = []
for Line in Lines:
ls = Line.split()
try:
Atom = ls[0]
x,y,z = float(ls[1]), float(ls[2]), float(ls[3])
except:
continue
Atom = Atom[0].upper() + Atom[1:].lower()
# maybe we should allow for (and ignore) group numbers after the
# elements?
if Atom not in ElementNames:
raise Exception("while reading '%s': unrecognized element '%s'." % (FileName,Atom))
Atoms.append(Atom)
Xyz.append((x,y,z))
Xyz = Scale*array(Xyz).T
if 0:
print("*read '%s':\n%s" % (FileName, str(FAtomSet(Xyz, Atoms))))
return Xyz, Atoms
def ReadAtomsFromXyzFile(FileName, Scale=1./ToAng):
Xyz,Elements = ReadXyzFile(FileName, Scale)
return FAtomSet(Xyz, Elements)
| pyscf/tools/Molpro2Pyscf/wmme.py | 22,035 | A generally contracted shell of spherical harmonic basis functions.
A FBasisShell which is placed on a concrete atom.
contains data describing how to evaluate quantum chemistry matrix
elements on electronic system as defined by the given atoms and basis
sets.
Note: Basis sets must either be basis set names (i.e., library names)
or FBasisSet objects.
Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),
Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb)
calculate dipole operator matrices <\mu|w|\nu> (w=x,y,z) in
main basis, for each direction. Returns nAo x nAo x 3 array.
calculate <mu|-1/2 Laplace|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array.
calculate nuclear attraction integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array.
calculate <mu|(r-rA)^2|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array.
calculate values of molecular orbitals on a grid of 3d points in space.
Input:
- Orbitals: nAo x nOrb matrix, where nAo must be compatible with
self.OrbBasis. The AO dimension must be contravariant AO (i.e., not SMH).
- Grid: 3 x nGrid array giving the coordinates of the grid points.
- DerivativeOrder: 0: only orbital values,
1: orbital values and 1st derivatives,
2: orbital values and up to 2nd derivatives.
Returns:
- nGrid x nDerivComp x nOrb array. If DerivativeOrder is 0, the
DerivComp dimension is omitted.
calculate overlap within main orbital basis, and, optionally, between main
orbital basis and a second basis, as described in OrbBasis2.
Returns <1|1>, <1|2>, and <2|2> matrices.
calculate overlap between current basis and a second basis, as
described in OrbBasis2. Returns <1|2> and <2|2> matrices.
compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),
where the fitting metric is *not* absorbed into the 2e integrals.
Outputs: an array of tuples (cmdline-arguments,filename-base).
We will generate arguments for each of them and try to read the
corresponding files as numpy arrays and return them in order.
Positions: 3 x nAtom matrix. Given in atomic units (ABohr).
Elements: element name (e.g., H) for each of the positions.
Orientations: If given, a [3,3,N] array encoding the standard
orientation of the given atoms (for replicating potentials!). For
each atom there is a orthogonal 3x3 matrix denoting the ex,ey,ez
directions.
chained matrix product: mdot(A,B,C,..) = A*B*C*...
No attempt is made to optimize the contraction order.
return number of electrons present in the total system if neutral.
TODO: By PySCF-1.5 release Copyright 2014-2020 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 1. code style * Indent: 3 -> 4 * Constant should be all uppercase * Function/method should be all lowercase * Line wrap around 80 columns * Use either double quote or single quote, not mix 2. Conventions required by PySCF * Use PYSCF_TMPDIR to replace _TmpDir 3. Use proper functions provided by PySCF This file is adapted with permission from the wmme program of Gerald Knizia. See http://sites.psu.edu/knizia/software/==================================================== (hopefully) return the path of the .py file. idea is to leave wmme.py in the same directory as the wmme executable, and import invoke the scripts using it via, for example, PYTHONPATH=$HOME/dev/wmme:$PYTHONPATH python myscriptfile.py set executable/basis library directory explicitly. set executable/basis library from path of wmme.py if None: use operating system default if None: same as _WmmeDir/basesToAng = 0.5291772108 molpro default. return element and center index combined. <- a core doesn't repulse itself.fCoreEnergy += Charges[i] * Charges[j] / np.sum((self.Pos[:,i] - self.Pos[:,j])**2)**.5yield (Type,Xyz) designated element for the basis function comment on the basis function (e.g., literature reference) list of FBasisShell1 objects.f = 1./ToAngEmit(" " + Name + "<" + " ".join("%.16e"%o for o in A) + ">") collect all unique FBasisShell objects. map id(BasisFn)->(index) store the function declarations... ...and their distribution amongst atoms.Emit(" Shell<iAt<%i> x<%.16e> y<%.16e> z<%.16e> FnId<%i>>" % ( end of Basis list of all basis function angular momenta in the basis, for converting basis function orders and types. assemble arguments to integral generation program ^- calculate integrals in symmetrically orthogonalized AO basisOutputs.append(("--save-overlap", "OVERLAP")) assemble arguments to integral generation program ^- calculate integrals in symmetrically orthogonalized AO basisfrom commands import getstatusoutput make a directory to store our input/output in. assemble arguments to integral generation program basis is given as an explicit FBasisSet object. Write the basis set to disk and supply the file name as argument it's just a basis set name: append the name to the arguments. (set will be read from library by wmme itself) make file names and arguments for output arrays ^- note on the .16f: it actually does make a difference. I had .8f there before, and it lead to energy changes on the order of 1e-8 when treating only non-redundant subsystem out of a symmetric arrangement. save input arrays if provided.print("!Invoking %s\n" % Cmd)iErr, Output = getstatusoutput(Cmd)if ( iErr != 0 ): it returns a byte string in Python 3... which wouldn't be a problem if not all OTHER literals were converted to unicode implicitly. got everything we need. Delete the temporary directory. allowed formats: <nAtoms> \n Desc \n <atom-list> or: <atom-list> (without any headers) in the first case, only the first nAtoms+2 lines are read, in the second case everything which does not look like a xyz line is ignored. maybe we should allow for (and ignore) group numbers after the elements? | 6,445 | en | 0.784572 |
__author__ = 'aakilomar'
import requests, json, time
from timeit import default_timer as timer
requests.packages.urllib3.disable_warnings()
host = "https://localhost:8443"
def cancel_event(eventid):
post_url = host + "/api/event/cancel/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def add_user(phone):
post_url = host + "/api/user/add/" + str(phone)
return requests.post(post_url,None, verify=False).json()
def rsvp(eventid,userid,message):
post_url = host + "/api/event/rsvp/" + str(eventid) + "/" + str(userid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def voteRequired(userid):
post_url = host + "/api/event/voterequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def upcomingVotes(groupid):
post_url = host + "/api/event/upcoming/vote/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def upcomingMeeting(groupid):
post_url = host + "/api/event/upcoming/meeting/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def votesPerGroupForEvent(groupid, eventid):
post_url = host + "/api/event/rsvp/totalspergroup/" + str(groupid) + "/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def addLogBook(userid, groupid, message):
post_url = host + "/api/logbook/add/" + str(userid) + "/" + str(groupid) + "/" + message
return requests.post(post_url,None, verify=False).json()
def addLogBookWithDate(userid, groupid, message, actionbydate):
post_url = host + "/api/logbook/addwithdate/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + actionbydate
return requests.post(post_url,None, verify=False).json()
def addLogBookWithDateAndAssign(userid, groupid, message, actionbydate, assigntouserid):
post_url = host + "/api/logbook/addwithdateandassign/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + actionbydate + "/" + str(assigntouserid)
return requests.post(post_url,None, verify=False).json()
def addLogBook(userid, groupid, message, replicate):
post_url = host + "/api/logbook/add/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + str(replicate)
return requests.post(post_url,None, verify=False).json()
def listReplicated(groupid):
post_url = host + "/api/logbook/listreplicated/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def listReplicated(groupid, completed):
post_url = host + "/api/logbook/listreplicated/" + str(groupid) + "/" + str(completed)
return requests.get(post_url,None, verify=False).json()
def setInitiatedSession(userid):
post_url = host + "/api/user/setinitiatedsession/" + str(userid)
return requests.post(post_url,None, verify=False).json()
def listReplicatedMessage(groupid, message):
post_url = host + "/api/logbook/listreplicatedbymessage/" + str(groupid) + "/" + message
return requests.get(post_url,None, verify=False).json()
def createAccount(userid,groupid,accountname):
post_url = host + "/api/account/add/" + str(userid) + "/" + str(groupid) + "/" + str(accountname)
return requests.post(post_url,None, verify=False).json()
def ussdStart(phonenumber,enteredUssd):
post_url = host + "/ussd/start?msisdn=" + str(phonenumber)
return requests.get(post_url,None, verify=False)
def add_user_to_group(userid,groupid):
post_url = host + "/api/group/add/usertogroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def remove_user_from_group(userid,groupid):
post_url = host + "/api/group/remove/userfromgroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def get_user_join_group(userid,groupid):
post_url = host + "/api/group/get/userjoingroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).content
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def voteRequired(userid):
post_url = host + "/api/event/voterequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def add_event(userid,groupid, name):
post_url = host + "/api/event/add/" + str(userid) + "/" + str(groupid) + "/" + name
return requests.post(post_url,None, verify=False).json()
#print cancel_event(5166)
#user = add_user("0823333332")
#user = add_user("0821111111")
#print "user-->" + str(user)
#print rsvp(5167,user['id'],"no")
#print rsvpRequired(user['id'])
#print voteRequired(user['id'])
#print upcomingVotes(231)
#print votesPerGroupForEvent(194,5103)
#print addLogBook(1,85,"X must do Y")
#print addLogBook(1,88,"Somebody must Y",True) # has sub groups
#print addLogBook(1,85,"Somebody must do X",True) # no subgroups
#print listReplicated(88,False)
#print addLogBookWithDateAndAssign(1,21,"aakil must do Y","2015-12-13 08:45:00",588)
#print addLogBookWithDate(1,21,"someone must do Y","2015-12-13 08:45:00")
#print setInitiatedSession(588)
#print(listReplicatedMessage(88,"Somebody must X"))
#print(createAccount(1,21,"acc 21"))
#for i in range(1,7,1):
## start = timer()
# print ussdStart("0826607134","")
# end = timer()
# print(end - start)
#print add_user_to_group(588,82)
#print remove_user_from_group(588,82)
#print get_user_join_group(588,82)
#print voteRequired(817)
print rsvpRequired(817)
print "klaarie"
| docs/tests/adhoc_requests.py | 5,668 | print cancel_event(5166)user = add_user("0823333332")user = add_user("0821111111")print "user-->" + str(user)print rsvp(5167,user['id'],"no")print rsvpRequired(user['id'])print voteRequired(user['id'])print upcomingVotes(231)print votesPerGroupForEvent(194,5103)print addLogBook(1,85,"X must do Y")print addLogBook(1,88,"Somebody must Y",True) has sub groupsprint addLogBook(1,85,"Somebody must do X",True) no subgroupsprint listReplicated(88,False)print addLogBookWithDateAndAssign(1,21,"aakil must do Y","2015-12-13 08:45:00",588)print addLogBookWithDate(1,21,"someone must do Y","2015-12-13 08:45:00")print setInitiatedSession(588)print(listReplicatedMessage(88,"Somebody must X"))print(createAccount(1,21,"acc 21"))for i in range(1,7,1): start = timer() print ussdStart("0826607134","") end = timer() print(end - start)print add_user_to_group(588,82)print remove_user_from_group(588,82)print get_user_join_group(588,82)print voteRequired(817) | 960 | en | 0.442137 |
#!---------------------------------------------------------------------!
#! Written by Madu Manathunga on 07/01/2021 !
#! !
#! Copyright (C) 2020-2021 Merz lab !
#! Copyright (C) 2020-2021 Götz lab !
#! !
#! This source file is a part of QUICK-GenInt code generator and !
#! is subjected to the terms of the Mozilla Public License, v. 2.0. !
#! If a copy of the MPL was not distributed with this file, you can !
#! obtain one at http://mozilla.org/MPL/2.0/. !
#!_____________________________________________________________________!
#!---------------------------------------------------------------------!
#! This source file contains classes necessary for generating one !
#! electron integrals. Note that we use vertical recurrence relations !
#! algorithm developed by Obara and Saika. See J. Chem. Phys. 1986, 84,!
#! 3963−3974 paper for theoretical details. !
#! !
#!---------------------------------------------------------------------!
import src.common.params as params
import src.common.file_handler as file_handler
from src.oei.iclass.OEint import OEint
# [p|f] class, subclass of OEint
class PFint(OEint):
def gen_int(self):
# write code paths for integrals. Note that we use C++ classes here.
for m in range(0,self.max_m+1):
if m == 0:
self.fhc.write("\n/* PF true integral, m=%d */ \n" % (m))
self.fhd.write("\n/* PF true integral, m=%d */ \n" % (m))
else:
self.fhc.write("\n/* PF auxilary integral, m=%d */ \n" % (m))
self.fhd.write("\n/* PF auxilary integral, m=%d */ \n" % (m))
self.fhc.write("class PFint_%d{ \n" % (m))
self.fhc.write("public: \n")
# write class variables; convention being used is s=0, p=1-3, d=4-9, f=10-19, g=20-34
self.fhc.write("#ifdef REG_PF \n")
for i in range(0,10):
for j in range(0,3):
self.fhc.write(" QUICKDouble x_%d_%d; // %s, %s \n" % (j+1, i+10, self.p_lbl[j], self.f_lbl[i]))
self.fhc.write("#endif \n")
# write class functions
self.fhc.write(" %s PFint_%d(QUICKDouble PAx, QUICKDouble PAy, QUICKDouble PAz,\n\
QUICKDouble PBx, QUICKDouble PBy, QUICKDouble PBz, QUICKDouble PCx, QUICKDouble PCy, QUICKDouble PCz,\n\
QUICKDouble TwoZetaInv, QUICKDouble* store, QUICKDouble* YVerticalTemp); \n" % (self.func_qualifier, m))
self.fhc.write("}; \n")
# write partial classes, these are useful to reduce the registry pressure
for i in range(0,10):
self.fhc.write("\n/* PF integral partial class, - Part %d, m=%d */ \n" % (i+1, m))
self.fhc.write("class PFint_%d_%d{ \n" % (m, i+1))
self.fhc.write("public: \n")
#self.fhc.write("#ifdef REG_PF \n")
for j in range(0,3):
self.fhc.write(" QUICKDouble x_%d_%d; // %s, %s \n" % (j+1, i+10, self.p_lbl[j], self.f_lbl[i]))
#self.fhc.write("#endif \n")
# write partial class functions
self.fhc.write(" %s PFint_%d_%d(QUICKDouble PAx, QUICKDouble PAy, QUICKDouble PAz,\n\
QUICKDouble PBx, QUICKDouble PBy, QUICKDouble PBz, QUICKDouble PCx, QUICKDouble PCy, QUICKDouble PCz,\n\
QUICKDouble TwoZetaInv, QUICKDouble* store, QUICKDouble* YVerticalTemp); \n" % (self.func_qualifier, m, i+1))
self.fhc.write("}; \n")
# write function definitions
self.fhd.write("%s PFint_%d::PFint_%d(QUICKDouble PAx, QUICKDouble PAy, QUICKDouble PAz,\n\
QUICKDouble PBx, QUICKDouble PBy, QUICKDouble PBz, QUICKDouble PCx, QUICKDouble PCy, QUICKDouble PCz,\n\
QUICKDouble TwoZetaInv, QUICKDouble* store, QUICKDouble* YVerticalTemp){ \n\n" % (self.func_qualifier, m, m))
self.fhd.write(" SDint_%d sd_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|d] for m=%d \n" % (m, m, m))
self.fhd.write(" SDint_%d sd_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|d] for m=%d \n" % (m+1, m+1, m+1))
self.fhd.write(" SFint_%d sf_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|f] for m=%d \n" % (m, m, m))
self.fhd.write(" SFint_%d sf_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|f] for m=%d \n\n" % (m+1, m+1, m+1))
# save all computed values into class variables that will reside in register/lmem space
self.fhd.write("#ifdef REG_PF \n")
for i in range(0,10):
for j in range(0,3):
tmp_mcal=[params.Mcal[i+10][0], params.Mcal[i+10][1], params.Mcal[i+10][2]]
for k in range(0,3):
#self.fhd.write("a(i,j) %d %d %d %d %d\n" % (tmp_mcal[0], tmp_mcal[1], tmp_mcal[2], params.Mcal[j+1][k], tmp_mcal[k]))
if params.Mcal[j+1][k] != 0:
self.fhd.write("#ifdef REG_SF \n")
self.fhd.write(" x_%d_%d = %s * sf_%d.x_%d_%d - %s * sf_%d.x_%d_%d; \n" % (j+1, i+10, self.PA[k], m, 0, i+10,\
self.PC[k], m+1, 0, i+10))
self.fhd.write("#else \n")
self.fhd.write(" x_%d_%d = %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d)- %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d); \n" % (j+1, i+10, self.PA[k], 0, i+10, m,\
self.PC[k], 0, i+10, m+1))
self.fhd.write("#endif \n")
if tmp_mcal[k] != 0:
tmp_mcal[k] -= 1
tmp_i=params.trans[tmp_mcal[0]][tmp_mcal[1]][tmp_mcal[2]]
self.fhd.write(" x_%d_%d += TwoZetaInv * %f * (sd_%d.x_%d_%d - sd_%d.x_%d_%d); \n" % (j+1, i+10, params.Mcal[i+10][k], m, 0, tmp_i-1, m+1, 0, tmp_i-1))
break
self.fhd.write("#else \n")
# save all computed values into store array in global memory
self.fhd.write(" QUICKDouble val; \n")
for i in range(0,10):
for j in range(0,3):
tmp_mcal=[params.Mcal[i+10][0], params.Mcal[i+10][1], params.Mcal[i+10][2]]
for k in range(0,3):
if params.Mcal[j+1][k] != 0:
self.fhd.write("#ifdef REG_SF \n")
self.fhd.write(" val = %s * sf_%d.x_%d_%d - %s * sf_%d.x_%d_%d; \n" % (self.PA[k], m, 0, i+10,\
self.PC[k], m+1, 0, i+10))
self.fhd.write("#else \n")
self.fhd.write(" val = %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d) - %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d); \n" % (self.PA[k], 0, i+10, m,\
self.PC[k], 0, i+10, m+1))
self.fhd.write("#endif \n")
if tmp_mcal[k] != 0:
tmp_mcal[k] -= 1
tmp_i=params.trans[tmp_mcal[0]][tmp_mcal[1]][tmp_mcal[2]]
self.fhd.write(" val += TwoZetaInv * %f * (sd_%d.x_%d_%d - sd_%d.x_%d_%d); \n" % (params.Mcal[i+10][k], m, 0, tmp_i-1, m+1, 0, tmp_i-1))
self.fhd.write(" LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d) = val; \n" % (j+1, i+10, m))
break
self.fhd.write("#endif \n")
self.fhd.write("\n } \n")
# write definition for partial classes, note that we will not write code for global memory based implementation here
for i in range(0,10):
self.fhd.write("\n/* PF integral partial class - Part %d, m=%d */ \n" % (i+1, m))
self.fhd.write("%s PFint_%d_%d::PFint_%d_%d(QUICKDouble PAx, QUICKDouble PAy, QUICKDouble PAz,\n\
QUICKDouble PBx, QUICKDouble PBy, QUICKDouble PBz, QUICKDouble PCx, QUICKDouble PCy, QUICKDouble PCz,\n\
QUICKDouble TwoZetaInv, QUICKDouble* store, QUICKDouble* YVerticalTemp){ \n\n" % (self.func_qualifier, m, i+1, m, i+1))
self.fhd.write(" SDint_%d sd_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|d] for m=%d \n" % (m, m, m))
self.fhd.write(" SDint_%d sd_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|d] for m=%d \n" % (m+1, m+1, m+1))
self.fhd.write(" SFint_%d sf_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|f] for m=%d \n" % (m, m, m))
self.fhd.write(" SFint_%d sf_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|f] for m=%d \n\n" % (m+1, m+1, m+1))
# save all computed values into class variables that will reside in register/lmem space
for j in range(0,3):
tmp_mcal=[params.Mcal[i+10][0], params.Mcal[i+10][1], params.Mcal[i+10][2]]
for k in range(0,3):
#self.fhd.write("a(i,j) %d %d %d %d %d\n" % (tmp_mcal[0], tmp_mcal[1], tmp_mcal[2], params.Mcal[j+1][k], tmp_mcal[k]))
if params.Mcal[j+1][k] != 0:
self.fhd.write("#ifdef REG_SF \n")
self.fhd.write(" x_%d_%d = %s * sf_%d.x_%d_%d - %s * sf_%d.x_%d_%d; \n" % (j+1, i+10, self.PA[k], m, 0, i+10,\
self.PC[k], m+1, 0, i+10))
self.fhd.write("#else \n")
self.fhd.write(" x_%d_%d = %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d)- %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d); \n" % (j+1, i+10, self.PA[k], 0, i+10, m,\
self.PC[k], 0, i+10, m+1))
self.fhd.write("#endif \n")
if tmp_mcal[k] != 0:
tmp_mcal[k] -= 1
tmp_i=params.trans[tmp_mcal[0]][tmp_mcal[1]][tmp_mcal[2]]
self.fhd.write(" x_%d_%d += TwoZetaInv * %f * (sd_%d.x_%d_%d - sd_%d.x_%d_%d); \n" % (j+1, i+10, params.Mcal[i+10][k], m, 0, tmp_i-1, m+1, 0, tmp_i-1))
break
self.fhd.write("\n } \n")
# generate code to save computed [p|f] integral
def save_int(self):
self.fha.write("\n // PF integral, m=%d \n" % (0))
self.fha.write(" if(I == 1 && J == 3){ \n")
self.fha.write(" PFint_0 pf(PAx, PAy, PAz, PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); \n")
self.fha.write("#ifdef REG_PF \n")
for i in range(0,10):
for j in range(0,3):
self.fha.write(" LOCSTORE(store, %d, %d, STOREDIM, STOREDIM) = pf.x_%d_%d;\n" % (j+1, i+10, j+1, i+10))
self.fha.write("#endif \n")
# include print statements if debug option is on
if OEint.debug == 1:
self.fha.write("\n#ifdef DEBUG_OEI \n")
for i in range(0,10):
for j in range(0,3):
self.fha.write(" printf(\"II %%d JJ %%d %s store[%d,%d] = %%f \\n\", II, JJ, LOCSTORE(store, %d, %d, STOREDIM, STOREDIM)); \n" % ( "PF", j+1, i+10, j+1, i+10))
self.fha.write("#endif \n\n")
self.fha.write(" } \n")
| src/oei/iclass/PFint.py | 12,254 | !---------------------------------------------------------------------!! Written by Madu Manathunga on 07/01/2021 !! !! Copyright (C) 2020-2021 Merz lab !! Copyright (C) 2020-2021 Götz lab !! !! This source file is a part of QUICK-GenInt code generator and !! is subjected to the terms of the Mozilla Public License, v. 2.0. !! If a copy of the MPL was not distributed with this file, you can !! obtain one at http://mozilla.org/MPL/2.0/. !!_____________________________________________________________________!!---------------------------------------------------------------------!! This source file contains classes necessary for generating one !! electron integrals. Note that we use vertical recurrence relations !! algorithm developed by Obara and Saika. See J. Chem. Phys. 1986, 84,!! 3963−3974 paper for theoretical details. !! !!---------------------------------------------------------------------! [p|f] class, subclass of OEint write code paths for integrals. Note that we use C++ classes here. write class variables; convention being used is s=0, p=1-3, d=4-9, f=10-19, g=20-34 write class functions write partial classes, these are useful to reduce the registry pressure self.fhc.write("ifdef REG_PF \n")self.fhc.write("endif \n") write partial class functions write function definitions save all computed values into class variables that will reside in register/lmem spaceself.fhd.write("a(i,j) %d %d %d %d %d\n" % (tmp_mcal[0], tmp_mcal[1], tmp_mcal[2], params.Mcal[j+1][k], tmp_mcal[k])) save all computed values into store array in global memory write definition for partial classes, note that we will not write code for global memory based implementation here save all computed values into class variables that will reside in register/lmem spaceself.fhd.write("a(i,j) %d %d %d %d %d\n" % (tmp_mcal[0], tmp_mcal[1], tmp_mcal[2], params.Mcal[j+1][k], tmp_mcal[k])) generate code to save computed [p|f] integral include print statements if debug option is on | 2,345 | en | 0.700219 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0062_no_invoice_for_historic_events'),
('workshops', '0062_add_stalled_unresponsive_tags'),
]
operations = [
]
| workshops/migrations/0063_merge.py | 324 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
"""
Django settings for project_name project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# for best-practices.
# SECURITY WARNING: keep the secret key used in production secret!
# Please set SECRET_KEY environment variable in your production environment
# (e.g. Heroku).
SECRET_KEY = os.getenv('SECRET_KEY', 'django-insecure-*%$(!wsn1rre5@c!$jm7w&$+s3y0xqn%cnmk)&6(ukjn)18b!0')
# Automatically determine environment by detecting if DATABASE_URL variable.
# DATABASE_URL is provided by Heroku if a database add-on is added
# (e.g. Heroku Postgres).
PRODUCTION = os.getenv('DATABASE_URL') is not None
# SECURITY WARNING: don't run with debug turned on in production!
# If you want to enable debugging on Heroku for learning purposes,
# set this to True.
DEBUG = not PRODUCTION
HEROKU_APP_NAME = os.getenv('HEROKU_APP_NAME', '')
ALLOWED_HOSTS = [f'{HEROKU_APP_NAME}.herokuapp.com']
if not PRODUCTION:
ALLOWED_HOSTS += ['.localhost', '127.0.0.1', '[::1]']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_name.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR / 'templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_name.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Set database settings automatically using DATABASE_URL.
if PRODUCTION:
DATABASES['default'] = dj_database_url.config(
conn_max_age=600, ssl_require=True
)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
# Feel free to change these according to your needs.
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# This is the directory for storing `collectstatic` results.
# This shouldn't be included in your Git repository.
STATIC_ROOT = BASE_DIR / 'staticfiles'
# You can use this directory to store project-wide static files.
STATICFILES_DIRS = [
BASE_DIR / 'static',
]
# Make sure the directories exist to prevent errors when doing `collectstatic`.
for directory in [*STATICFILES_DIRS, STATIC_ROOT]:
directory.mkdir(exist_ok=True)
# Enable compression and caching features of whitenoise.
# You can remove this if it causes problems on your setup.
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| project_name/settings.py | 4,691 | Django settings for project_name project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
Build paths inside the project like this: BASE_DIR / 'subdir'. Quick-start development settings See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ for best-practices. SECURITY WARNING: keep the secret key used in production secret! Please set SECRET_KEY environment variable in your production environment (e.g. Heroku). Automatically determine environment by detecting if DATABASE_URL variable. DATABASE_URL is provided by Heroku if a database add-on is added (e.g. Heroku Postgres). SECURITY WARNING: don't run with debug turned on in production! If you want to enable debugging on Heroku for learning purposes, set this to True. Application definition Database https://docs.djangoproject.com/en/3.2/ref/settings/databases Set database settings automatically using DATABASE_URL. Password validation https://docs.djangoproject.com/en/3.2/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/3.2/topics/i18n/ Feel free to change these according to your needs. Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/3.2/howto/static-files/ This is the directory for storing `collectstatic` results. This shouldn't be included in your Git repository. You can use this directory to store project-wide static files. Make sure the directories exist to prevent errors when doing `collectstatic`. Enable compression and caching features of whitenoise. You can remove this if it causes problems on your setup. | 1,784 | en | 0.733871 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
import random
import shutil
import socket
import string
import json
import ipaddress
import charms.leadership
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.kubernetes.flagmanager import FlagManager
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.host import service_stop
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
def service_cidr():
''' Return the charm's service-cidr config '''
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def reset_states_for_delivery():
'''An upgrade charm event was triggered by Juju, react to that here.'''
migrate_from_pre_snaps()
install_snaps()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# clear the flag managers
FlagManager('kube-apiserver').destroy_all()
FlagManager('kube-controller-manager').destroy_all()
FlagManager('kube-scheduler').destroy_all()
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
set_state('kubernetes-master.snaps.installed')
remove_state('kubernetes-master.components.started')
@when('config.changed.channel')
def channel_changed():
install_snaps()
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin')
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
charms.leadership.leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
api_opts = FlagManager('kube-apiserver')
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
api_opts.add('service-account-key-file', service_key)
controller_opts = FlagManager('kube-controller-manager')
controller_opts.add('service-account-private-key-file', service_key)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys, overwrite_local=False):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k) or overwrite_local:
# Fetch data from leadership broadcast
contents = charms.leadership.leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
msg = "Waiting on leaders crypto keys."
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('cdk-addons.configured', 'kube-api-endpoint.available',
'kube-control.connected')
def idle_status(kube_api, kube_control):
''' Signal at the end of the run that we are running. '''
if not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
else:
# All services should be up and running at this point. Double-check...
failing_services = master_services_down()
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes master running.')
else:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg)
def master_services_down():
"""Ensure master services are up and running.
Return: list of failing services"""
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not host.service_running(daemon):
failing_services.append(service)
return failing_services
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This hapens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
handle_etcd_relation(etcd)
configure_master_services()
hookenv.status_set('maintenance',
'Starting the Kubernetes master services.')
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
host.service_restart('snap.%s.daemon' % service)
hookenv.open_port(6443)
set_state('kubernetes-master.components.started')
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
# Note that the DNS server doesn't necessarily exist at this point. We know
# where we're going to put it, though, so let's send the info anyway.
dns_ip = get_dns_ip()
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
@when('kube-control.auth.requested')
@when('authentication.setup')
@when('leadership.is_leader')
def send_tokens(kube_control):
"""Send the tokens to the workers."""
kubelet_token = get_token('kubelet')
proxy_token = get_token('kube_proxy')
admin_token = get_token('admin')
# Send the data
requests = kube_control.auth_user()
for request in requests:
kube_control.sign_auth_request(request[0], kubelet_token,
proxy_token, admin_token)
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator master is waiting for a relation to workers.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set('blocked', 'Waiting for workers.')
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kubernetes-master.components.started')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
args = [
'arch=' + arch(),
'dns-ip=' + get_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'enable-dashboard=' + dbEnabled
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except:
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
@hook('stop')
def shutdown():
""" Stop the kubernetes master services
"""
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
'''Get an IP address for the DNS server on the provided cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .10 at the end of the network
ip = interface.network.network_address + 10
return ip.exploded
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .1 at the end of the network
ip = interface.network.network_address + 1
return ip.exploded
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
# Never use stale data, always prefer whats coming in during context
# building. if its stale, its because whats in unitdata is stale
data = api_opts.data
if data.get('etcd-servers-strict') or data.get('etcd-servers'):
api_opts.destroy('etcd-cafile')
api_opts.destroy('etcd-keyfile')
api_opts.destroy('etcd-certfile')
api_opts.destroy('etcd-servers', strict=True)
api_opts.destroy('etcd-servers')
# Set the apiserver flags in the options manager
api_opts.add('etcd-cafile', ca)
api_opts.add('etcd-keyfile', key)
api_opts.add('etcd-certfile', cert)
api_opts.add('etcd-servers', connection_string, strict=True)
def configure_master_services():
''' Add remaining flags for the master services and configure snaps to use
them '''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
scheduler_opts = FlagManager('kube-scheduler')
scheduler_opts.add('v', '2')
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts.add('allow-privileged', 'true', strict=True)
set_state('kubernetes-master.privileged')
else:
api_opts.add('allow-privileged', 'false', strict=True)
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts.add('service-cluster-ip-range', service_cidr())
api_opts.add('min-request-timeout', '300')
api_opts.add('v', '4')
api_opts.add('tls-cert-file', server_cert_path)
api_opts.add('tls-private-key-file', server_key_path)
api_opts.add('kubelet-certificate-authority', ca_cert_path)
api_opts.add('kubelet-client-certificate', client_cert_path)
api_opts.add('kubelet-client-key', client_key_path)
api_opts.add('logtostderr', 'true')
api_opts.add('insecure-bind-address', '127.0.0.1')
api_opts.add('insecure-port', '8080')
api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support
admission_control = [
'Initializers',
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
if get_version('kube-apiserver') < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
if get_version('kube-apiserver') < (1, 7):
hookenv.log('Removing Initializers from admission-control')
admission_control.remove('Initializers')
api_opts.add('admission-control', ','.join(admission_control), strict=True)
# Default to 3 minute resync. TODO: Make this configureable?
controller_opts.add('min-resync-period', '3m')
controller_opts.add('v', '2')
controller_opts.add('root-ca-file', ca_cert_path)
controller_opts.add('logtostderr', 'true')
controller_opts.add('master', 'http://127.0.0.1:8080')
scheduler_opts.add('v', '2')
scheduler_opts.add('logtostderr', 'true')
scheduler_opts.add('master', 'http://127.0.0.1:8080')
cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')
check_call(cmd)
cmd = (
['snap', 'set', 'kube-controller-manager'] +
controller_opts.to_s().split(' ')
)
check_call(cmd)
cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ')
check_call(cmd)
def setup_basic_auth(password=None, username='admin', uid='admin'):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if not os.path.isfile(tokens_fname):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db = unitdata.kv()
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
if status != 'Running':
return False
return True
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
| vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | 37,137 | Test if the add ons got installed
Returns: True is the addons got applied
Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise.
Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.
Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.
Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs
Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.
Configure CDK addons
Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly.
Add remaining flags for the master services and configure snaps to use
them
Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.
Create a kubernetes configuration for the master unit.
We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed
Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value.
Get an IP address for the DNS server on the provided cidr.
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
Get the IP address for the kubernetes service based on the cidr.
Get the password of user within the csv file provided.
Grab a token from the static file if present.
Save the client credentials and set appropriate daemon flags when
etcd declares itself as available
Signal at the end of the run that we are running.
Return boolean indicating whether or not to set allow-privileged=true.
Ensure master services are up and running.
Return: list of failing services
Inform the operator master is waiting for a relation to workers.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
React to changed 'allow-privileged' config value.
The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
Handle password change via the charms config.
Send configuration to the load balancer, and close access to the
public interface
An upgrade charm event was triggered by Juju, react to that here.
Send cluster DNS info
Send the data that is required to create a server certificate for
this server.
Send the tokens to the workers.
Return the charm's service-cidr config
Declare the application version to juju
Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.
Create the htacces file and the tokens.
Setup basic authentication and token access for the cluster.
Create a token file for kubernetes authentication.
Stop the kubernetes master services
Run the Kubernetes master components.
Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate
!/usr/bin/env python Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Override the default nagios shortname regex to allow periods, which we need because our bin names contain them (e.g. 'snap.foo.daemon'). The default regex in charmhelpers doesn't allow periods, but nagios itself does. remove old states disable old services rename auth files cleanup old files clear the flag managers password_changed is called during an upgrade. Nothing to do. Password not initialised Try first to fetch data from an old leadership broadcast. Generate the default service account token key read service account key for syndication this is slightly opaque, but we are sending file contents under its file path as a key. eg: {'/root/cdk/serviceaccount.key': 'RSA:2471731...'} The source of truth for non-leaders is the leader. Therefore we overwrite_local with whatever the leader has. the keys were not retrieved. Non-leaders have to retry. No change detected and we have already setup the authentication This races with other codepaths, and seems to require being created first This block may be extracted later, but for now seems to work as intended If the path does not exist, assume we need it Fetch data from leadership broadcast Default to logging the warning and wait for leader data to be set Write out the file and move on to the next item All services should be up and running at this point. Double-check... etcd is not returning a connection string. This hapens when the master unit disconnects from etcd and is ready to terminate. No point in trying to start master services and fail. Just return. key off of the connection string If the connection string changes, remove the started state to trigger handling of the master components Note that the DNS server doesn't necessarily exist at this point. We know where we're going to put it, though, so let's send the info anyway. Send the data Use the public ip of this unit as the Common Name for the certificate. Get the SDN gateway based on the cidr address. Create SANs that the tls layer will add to the server cert. Create a path safe name by removing path characters from the unit name. Request a server cert with this information. Get the potential list of loadbalancers from the relation object. Get the public address of loadbalancers so users can access the cluster. Get the port of the loadbalancer so users can access the cluster. Re-execute the rendering if the data has changed. Install the ceph common utilities. Render the ceph configuration from the ceph conf template The key can rotate independently of other ceph config, so validate it Enlist the ceph-admin key as a kubernetes secret We didn't have a key, and cannot proceed. Do not set state and allow this method to re-execute At first glance this is deceptive. The apply stanza will create if it doesn't exist, otherwise it will update the entry, ensuring our ceph-secret is always reflective of what we have in /etc/ceph assuming we have invoked this anytime that file would change. the enlistment in kubernetes failed, return and prepare for re-exec when complete, set a state relating to configuration of the storage backend that will allow other modules to hook into this and verify we have performed the necessary pre-req steps to interface with a ceph deployment. List of systemd services for which the checks will be removed The current nrpe-external-master interface doesn't handle a lot of logic, use the charm-helpers code for now. Get the package architecture for this system. Convert the binary result into a string. Get the options from the tls-client layer. Get all the paths to the tls information required for kubeconfig. Do we have everything we need? Create an absolute path for the kubeconfig file. Create the kubeconfig on this system so users can access the cluster. Make the config file readable by the ubuntu users so juju scp works. token and password are mutually exclusive. Error early if both are present. The developer has requested an impossible situation. see: kubectl config set-credentials --help Create the config file with the address of the master server. Delete old users Create the credentials using the client flags. This is mutually exclusive from password. They will not work together. Create a default context with the cluster. Make the config use this new context. Add .10 at the end of the network Add .1 at the end of the network Define where the etcd tls files will be kept. Create paths to the etcd client ca, key, and cert file locations. Save the client credentials (in relation data) to the paths provided. Never use stale data, always prefer whats coming in during context building. if its stale, its because whats in unitdata is stale Set the apiserver flags in the options manager Get the tls paths from the layer data. Handle static options for now FIXME: add etcd3 support Default to 3 minute resync. TODO: Make this configureable? | 9,219 | en | 0.87522 |
from ..api import rule
from ..api._endpoint import ApiEndpoint, maybe_login_required
from ..entities._entity import NotFound
from ..entities.commit import Commit, CommitSerializer
class CommitListAPI(ApiEndpoint):
serializer = CommitSerializer()
@maybe_login_required
def get(self):
"""
---
description: Get a list of commits.
responses:
"200": "CommitList"
"401": "401"
tags:
- Commits
"""
commits = Commit.all(order_by=Commit.timestamp.desc(), limit=500)
return self.serializer.many.dump(commits)
class CommitEntityAPI(ApiEndpoint):
serializer = CommitSerializer()
def _get(self, commit_id):
try:
commit = Commit.one(id=commit_id)
except NotFound:
self.abort_404_not_found()
return commit
@maybe_login_required
def get(self, commit_id):
"""
---
description: Get a commit.
responses:
"200": "CommitEntity"
"401": "401"
"404": "404"
parameters:
- name: commit_id
in: path
schema:
type: string
tags:
- Commits
"""
commit = self._get(commit_id)
return self.serializer.one.dump(commit)
commit_entity_view = CommitEntityAPI.as_view("commit")
commit_list_view = CommitListAPI.as_view("commits")
rule(
"/commits/<commit_id>/",
view_func=commit_entity_view,
methods=["GET"],
)
rule(
"/commits/",
view_func=commit_list_view,
methods=["GET"],
)
| conbench/api/commits.py | 1,612 | ---
description: Get a list of commits.
responses:
"200": "CommitList"
"401": "401"
tags:
- Commits
---
description: Get a commit.
responses:
"200": "CommitEntity"
"401": "401"
"404": "404"
parameters:
- name: commit_id
in: path
schema:
type: string
tags:
- Commits | 307 | en | 0.707107 |
############################################################################
# examples/multi_webcamera/host/test_module/__init__.py
#
# Copyright 2019, 2020 Sony Semiconductor Solutions Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name NuttX nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
from TestServer import TestServer
| examples/multi_webcamera/host/test_module/__init__.py | 1,792 | examples/multi_webcamera/host/test_module/__init__.py Copyright 2019, 2020 Sony Semiconductor Solutions Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name NuttX nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 1,540 | en | 0.866986 |
import io
import logging
import os
import json
import time
import boto3
import botocore
from markov.utils import log_and_exit, Logger, get_boto_config, \
SIMAPP_EVENT_ERROR_CODE_500, SIMAPP_EVENT_ERROR_CODE_400, \
SIMAPP_S3_DATA_STORE_EXCEPTION
LOG = Logger(__name__, logging.INFO).get_logger()
# The amount of time for the sim app to wait for sagemaker to produce
# the ip
SAGEMAKER_WAIT_TIME = 1200 # 20 minutes
class SageS3Client():
def __init__(self, bucket=None, s3_prefix=None, aws_region=None, s3_endpoint_url=None):
self.aws_region = aws_region
self.bucket = bucket
self.s3_prefix = s3_prefix
self.s3_endpoint_url = s3_endpoint_url
self.config_key = os.path.normpath(s3_prefix + "/ip/ip.json")
self.hyperparameters_key = os.path.normpath(s3_prefix + "/ip/hyperparameters.json")
self.done_file_key = os.path.normpath(s3_prefix + "/ip/done")
self.model_checkpoints_prefix = os.path.normpath(s3_prefix + "/model/") + "/"
LOG.info("Initializing SageS3Client...")
def get_client(self):
session = boto3.session.Session()
return session.client('s3', region_name=self.aws_region, endpoint_url=self.s3_endpoint_url, config=get_boto_config())
def _get_s3_key(self, key):
return os.path.normpath(self.model_checkpoints_prefix + "/" + key)
def write_ip_config(self, ip_address):
try:
s3_client = self.get_client()
data = {"IP": ip_address}
json_blob = json.dumps(data)
file_handle = io.BytesIO(json_blob.encode())
file_handle_done = io.BytesIO(b'done')
s3_client.upload_fileobj(file_handle, self.bucket, self.config_key)
s3_client.upload_fileobj(file_handle_done, self.bucket, self.done_file_key)
except botocore.exceptions.ClientError:
log_and_exit("Write ip config failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Write ip config failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def upload_hyperparameters(self, hyperparams_json):
try:
s3_client = self.get_client()
file_handle = io.BytesIO(hyperparams_json.encode())
s3_client.upload_fileobj(file_handle, self.bucket, self.hyperparameters_key)
except botocore.exceptions.ClientError:
log_and_exit("Hyperparameters failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Hyperparameters failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def get_ip(self):
s3_client = self.get_client()
time_elapsed = 0
try:
# Wait for sagemaker to produce the redis ip
while time_elapsed < SAGEMAKER_WAIT_TIME:
response = s3_client.list_objects(Bucket=self.bucket, Prefix=self.done_file_key)
if "Contents" in response:
break
time.sleep(1)
time_elapsed += 1
if time_elapsed % 5 == 0:
LOG.info("Waiting for SageMaker Redis server IP: Time elapsed: %s seconds",
time_elapsed)
if time_elapsed >= SAGEMAKER_WAIT_TIME:
log_and_exit("Timed out while attempting to retrieve the Redis IP",
SIMAPP_S3_DATA_STORE_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)
# Download the ip file
s3_client.download_file(self.bucket, self.config_key, 'ip.json')
with open("ip.json") as file:
ip_file = json.load(file)["IP"]
return ip_file
except botocore.exceptions.ClientError:
log_and_exit("Unable to retrieve redis ip",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to retrieve redis ip",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def download_file(self, s3_key, local_path):
s3_client = self.get_client()
try:
s3_client.download_file(self.bucket, s3_key, local_path)
return True
except botocore.exceptions.ClientError as err:
# It is possible that the file isn't there in which case we should
# return fasle and let the client decide the next action
if err.response['Error']['Code'] == "404":
return False
else:
log_and_exit("Unable to download file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to download file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def upload_file(self, s3_key, local_path):
s3_client = self.get_client()
try:
s3_client.upload_file(Filename=local_path,
Bucket=self.bucket,
Key=s3_key)
return True
except botocore.exceptions.ClientError:
log_and_exit("Unable to upload file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to upload file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
| src/rl_coach_2020_v2/src/markov/s3_client.py | 5,975 | The amount of time for the sim app to wait for sagemaker to produce the ip 20 minutes Wait for sagemaker to produce the redis ip Download the ip file It is possible that the file isn't there in which case we should return fasle and let the client decide the next action | 269 | en | 0.922674 |
"""Components for use in `CycleGroup`. For details, see `CycleGroup`."""
from __future__ import division, print_function
from six.moves import range
import numpy as np
import scipy.sparse as sparse
import unittest
from openmdao.core.explicitcomponent import ExplicitComponent
PSI = 1.
_vec_terms = {}
def _compute_vector_terms(system_size):
# Try/Except pattern is much faster than if key in ... if the key is present (which it will be
# outside of the first invocation).
try:
return _vec_terms[system_size]
except KeyError:
u = np.zeros(system_size)
u[[0, -1]] = np.sqrt(2)/2
v = np.zeros(system_size)
v[1:-1] = 1 / np.sqrt(system_size - 2)
cross_terms = np.outer(v, u) - np.outer(u, v)
same_terms = np.outer(u, u) + np.outer(v, v)
_vec_terms[system_size] = u, v, cross_terms, same_terms
return u, v, cross_terms, same_terms
def _compute_A(system_size, theta):
u, v, cross_terms, same_terms = _compute_vector_terms(system_size)
return (np.eye(system_size)
+ np.sin(theta) * cross_terms
+ (np.cos(theta) - 1) * same_terms)
def _compute_dA(system_size, theta):
u, v, cross_terms, same_terms = _compute_vector_terms(system_size)
return np.cos(theta) * cross_terms - np.sin(theta) * same_terms
def array_idx(i, var_size):
return slice(i * var_size, (i + 1) * var_size)
class ExplicitCycleComp(ExplicitComponent):
def _inputs_to_vector(self, inputs):
var_shape = self.metadata['var_shape']
num_var = self.metadata['num_var']
size = np.prod(var_shape)
x = np.zeros(num_var * size)
for i in range(num_var):
x_i = inputs[self._cycle_names['x'].format(i)].flat
x[size * i:size * (i + 1)] = x_i
return x
def _vector_to_outputs(self, vec, outputs):
var_shape = self.metadata['var_shape']
num_var = self.metadata['num_var']
size = np.prod(var_shape)
for i in range(num_var):
y_i = vec[size * i:size * (i + 1)].reshape(var_shape)
outputs[self._cycle_names['y'].format(i)] = y_i
def __str__(self):
return 'Explicit Cycle Component'
def initialize(self):
self.metadata.declare('jacobian_type', default='matvec',
values=['matvec', 'dense', 'sparse-coo', 'sparse-csr',
'sparse-csc'],
desc='method of assembling derivatives')
self.metadata.declare('partial_type', default='array',
values=['array', 'sparse', 'aij'],
desc='type of partial derivatives')
self.metadata.declare('num_var', type_=int, default=1,
desc='Number of variables per component')
self.metadata.declare('var_shape', type_=tuple, default=(3,),
desc='Shape of each variable')
self.metadata.declare('index', type_=int,
desc='Index of the component. Used for testing implicit connections')
self.metadata.declare('connection_type', type_=str, default='explicit',
values=['explicit', 'implicit'],
desc='How to connect variables.')
self.metadata.declare('finite_difference', default=False,
type_=bool,
desc='If the derivatives should be finite differenced.')
self.metadata.declare('num_comp', type_=int, default=2,
desc='Total number of components')
self.angle_param = 'theta'
self._cycle_names = {}
def _init_parameterized(self):
self.num_var = self.metadata['num_var']
self.var_shape = self.metadata['var_shape']
self.size = self.num_var * np.prod(self.var_shape)
if self.metadata['jacobian_type'] == 'matvec':
self.compute_jacvec_product = self.jacvec_product
if self.metadata['connection_type'] == 'implicit':
idx = self.metadata['index']
self._cycle_names['x'] = 'x_{}_{{}}'.format(idx)
self._cycle_names['y'] = 'x_{}_{{}}'.format(idx + 1)
self._cycle_names['theta'] = 'theta_{}'.format(idx)
self._cycle_names['theta_out'] = 'theta_{}'.format(idx + 1)
num_var = self.metadata['num_var']
self._cycle_promotes_in = [self._cycle_names['x'].format(i) for i in range(num_var)]
self._cycle_promotes_out = [self._cycle_names['y'].format(i) for i in range(num_var)]
self._cycle_promotes_in.append(self._cycle_names['theta'])
self._cycle_promotes_out.append(self._cycle_names['theta_out'])
else:
self._cycle_names['x'] = 'x_{}'
self._cycle_names['y'] = 'y_{}'
self._cycle_names['theta'] = 'theta'
self._cycle_names['theta_out'] = 'theta_out'
self._cycle_promotes_in = self._cycle_promotes_out = []
def setup(self):
for i in range(self.num_var):
self.add_input(self._cycle_names['x'].format(i), shape=self.var_shape)
self.add_output(self._cycle_names['y'].format(i), shape=self.var_shape)
self.add_input(self._cycle_names['theta'], val=1.)
self.add_output(self._cycle_names['theta_out'], shape=(1,))
# Setup partials
pd_type = self.metadata['partial_type']
if self.metadata['finite_difference']:
if self.metadata['jacobian_type'] == 'matvec':
raise unittest.SkipTest('not testing FD and matvec')
if pd_type != 'array':
raise unittest.SkipTest('only dense FD supported')
self.declare_partials('*', '*', method='fd')
elif self.metadata['jacobian_type'] != 'matvec' and pd_type != 'array':
num_var = self.num_var
var_shape = self.var_shape
var_size = np.prod(var_shape)
A = np.ones((self.size, self.size))
dA_x = np.ones((self.size, 1))
dtheta = np.array([[1.]])
angle_param = self._cycle_names[self.angle_param]
# if our subjacs are not dense, we must assign values here that
# match their type (data values don't matter, only structure).
# Otherwise, we assume they are dense and we'll get an error later
# when we assign a subjac with a type that doesn't match.
for out_idx in range(num_var):
out_var = self._cycle_names['y'].format(out_idx)
for in_idx in range(num_var):
in_var = self._cycle_names['x'].format(in_idx)
Aij = A[array_idx(out_idx, var_size), array_idx(in_idx, var_size)]
self.declare_partials(out_var, in_var,
**self._array2kwargs(Aij, pd_type))
self.declare_partials(out_var, angle_param,
**self._array2kwargs(dA_x[array_idx(out_idx, var_size)],
pd_type))
self.declare_partials(self._cycle_names['theta_out'], self._cycle_names['theta'],
**self._array2kwargs(dtheta, pd_type))
else:
# Declare everything
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
A = _compute_A(self.size, theta)
x = self._inputs_to_vector(inputs)
y = A.dot(x)
self._vector_to_outputs(y, outputs)
outputs[self._cycle_names['theta_out']] = theta
def jacvec_product(self, inputs, d_inputs, d_outputs, mode):
angle_param = self._cycle_names[self.angle_param]
x = self._inputs_to_vector(inputs)
angle = inputs[angle_param]
A = _compute_A(self.size, angle)
dA = _compute_dA(self.size, angle)
var_shape = self.metadata['var_shape']
var_size = np.prod(var_shape)
num_var = self.metadata['num_var']
x_name = self._cycle_names['x']
y_name = self._cycle_names['y']
theta_name = self._cycle_names['theta']
theta_out_name = self._cycle_names['theta_out']
if mode == 'fwd':
for j in range(num_var):
x_j = x_name.format(j)
if x_j in d_inputs:
dx = d_inputs[x_j].flat[:]
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
Aij = A[array_idx(i, var_size), array_idx(j, var_size)]
d_outputs[y_i] += Aij.dot(dx).reshape(var_shape)
if theta_name in d_inputs and theta_out_name in d_outputs:
dtheta = d_inputs[theta_name]
d_outputs[theta_out_name] += dtheta
if angle_param in d_inputs:
dangle = d_inputs[angle_param]
dy_dangle = (dA.dot(x)) * dangle
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
d_outputs[y_i] += dy_dangle[array_idx(i, var_size)].reshape(var_shape)
elif mode == 'rev':
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
dy_i = d_outputs[y_i].flat[:]
for j in range(num_var):
x_j = x_name.format(j)
if x_j in d_inputs:
Aij = A[array_idx(i, var_size), array_idx(j, var_size)]
d_inputs[x_j] += Aij.T.dot(dy_i).reshape(var_shape)
if angle_param in d_inputs:
dAij = dA[array_idx(i, var_size), array_idx(j, var_size)]
x_j_vec = inputs[x_j].flat[:]
d_inputs[angle_param] += x_j_vec.T.dot(dAij.T.dot(dy_i))
if theta_out_name in d_outputs and theta_name in d_inputs:
dtheta_out = d_outputs[theta_out_name]
d_inputs[theta_name] += dtheta_out
def make_jacobian_entry(self, A, pd_type):
if pd_type == 'aij':
return self.make_sub_jacobian(A, pd_type)[0]
return self.make_sub_jacobian(A, pd_type)
def make_sub_jacobian(self, A, pd_type):
if pd_type == 'array':
return A
if pd_type == 'sparse':
return sparse.csr_matrix(A)
if pd_type == 'aij':
data = []
rows = []
cols = []
A = np.atleast_2d(A)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if np.abs(A[i, j]) > 1e-15:
data.append(A[i, j])
rows.append(i)
cols.append(j)
return [np.array(data), np.array(rows), np.array(cols)]
raise ValueError('Unknown partial_type: {}'.format(pd_type))
def _array2kwargs(self, arr, pd_type):
jac = self.make_sub_jacobian(arr, pd_type)
if pd_type == 'aij':
return {'val': jac[0], 'rows': jac[1], 'cols': jac[2]}
else:
return {'val': jac}
def compute_partials(self, inputs, partials):
if self.metadata['jacobian_type'] != 'matvec' and not self.metadata['finite_difference']:
angle_param = self._cycle_names[self.angle_param]
angle = inputs[angle_param]
num_var = self.num_var
var_shape = self.var_shape
var_size = np.prod(var_shape)
x = self._inputs_to_vector(inputs)
size = self.size
A = _compute_A(size, angle)
dA = _compute_dA(size, angle)
dA_x = np.atleast_2d(dA.dot(x)).T
pd_type = self.metadata['partial_type']
dtheta = np.array([[1.]])
y_name = self._cycle_names['y']
x_name = self._cycle_names['x']
for out_idx in range(num_var):
out_var = y_name.format(out_idx)
for in_idx in range(num_var):
in_var = x_name.format(in_idx)
Aij = A[array_idx(out_idx, var_size), array_idx(in_idx, var_size)]
J_y_x = self.make_jacobian_entry(Aij, pd_type)
J_y_angle = self.make_jacobian_entry(dA_x[array_idx(out_idx, var_size)],
pd_type)
partials[out_var, in_var] = J_y_x
partials[out_var, angle_param] = J_y_angle
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
partials[theta_out, theta] = self.make_jacobian_entry(dtheta, pd_type)
class ExplicitFirstComp(ExplicitCycleComp):
def __str__(self):
return 'Explicit Cycle Component - First'
def setup(self):
self.add_input('psi', val=1.)
self.angle_param = 'psi'
self._cycle_names['psi'] = 'psi'
super(ExplicitFirstComp, self).setup()
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
psi = inputs[self._cycle_names['psi']]
A = _compute_A(self.size, psi)
y = A.dot(np.ones(self.size))
self._vector_to_outputs(y, outputs)
outputs[self._cycle_names['theta_out']] = theta
class ExplicitLastComp(ExplicitFirstComp):
def __str__(self):
return 'Explicit Cycle Component - Last'
def setup(self):
super(ExplicitLastComp, self).setup()
self.add_output('x_norm2', shape=(1,))
self._n = 1
# Setup partials
pd_type = self.metadata['partial_type']
if self.metadata['jacobian_type'] != 'matvec' and pd_type != 'array':
x = np.ones(self.var_shape)
for i in range(self.metadata['num_var']):
in_var = self._cycle_names['x'].format(i)
self.declare_partials('x_norm2', in_var,
**self._array2kwargs(x.flatten(), pd_type))
self.declare_partials(self._cycle_names['theta_out'], self._cycle_names['psi'],
**self._array2kwargs(np.array([1.]), pd_type))
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
psi = inputs[self._cycle_names['psi']]
k = self.metadata['num_comp']
x = self._inputs_to_vector(inputs)
outputs['x_norm2'] = 0.5*np.dot(x,x)
# theta_out has 1/2 the error as theta does to the correct angle.
outputs[self._cycle_names['theta_out']] = theta / 2 + (self._n * 2 * np.pi - psi) / (2 * k - 2)
def compute_partials(self, inputs, partials):
if self.metadata['jacobian_type'] != 'matvec' and not self.metadata['finite_difference']:
pd_type = self.metadata['partial_type']
for i in range(self.metadata['num_var']):
in_var = self._cycle_names['x'].format(i)
partials['x_norm2', in_var] = self.make_jacobian_entry(inputs[in_var].flat[:],
pd_type)
k = self.metadata['num_comp']
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
partials[theta_out, theta] = self.make_jacobian_entry(np.array([.5]), pd_type)
partials[theta_out, self._cycle_names['psi']] = \
self.make_jacobian_entry(np.array([-1/(2*k-2)]), pd_type)
def jacvec_product(self, inputs, d_inputs, d_outputs, mode):
if self.metadata['jacobian_type'] == 'matvec':
k = self.metadata['num_comp']
num_var = self.metadata['num_var']
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
psi = self._cycle_names['psi']
if mode == 'fwd':
if theta_out in d_outputs:
if theta in d_inputs:
d_outputs[theta_out] += 0.5 * d_inputs[theta]
if psi in d_inputs:
d_outputs[theta_out] += -d_inputs[psi] / (2 * k - 2)
for i in range(num_var):
in_var = self._cycle_names['x'].format(i)
if in_var in d_inputs and 'x_norm2' in d_outputs:
d_outputs['x_norm2'] += np.dot(inputs[in_var].flat, d_inputs[in_var].flat)
elif mode == 'rev':
if 'x_norm2' in d_outputs:
dxnorm = d_outputs['x_norm2']
for i in range(num_var):
x_i_name = self._cycle_names['x'].format(i)
if x_i_name in d_inputs:
d_inputs[x_i_name] += inputs[x_i_name] * dxnorm
if theta_out in d_outputs:
dtheta_out = d_outputs[theta_out]
if theta in d_inputs:
d_inputs[theta] += .5*dtheta_out
if psi in d_inputs:
d_inputs[psi] += -dtheta_out/(2*k-2)
| openmdao/test_suite/components/cycle_comps.py | 17,390 | Components for use in `CycleGroup`. For details, see `CycleGroup`.
Try/Except pattern is much faster than if key in ... if the key is present (which it will be outside of the first invocation). Setup partials if our subjacs are not dense, we must assign values here that match their type (data values don't matter, only structure). Otherwise, we assume they are dense and we'll get an error later when we assign a subjac with a type that doesn't match. Declare everything Setup partials theta_out has 1/2 the error as theta does to the correct angle. | 552 | en | 0.922667 |
import tensorflow as tf
from TransformerNet.layers import Encoder, Decoder
def Decoder_test(*args, **kwargs):
inputs = tf.random.uniform((64, 62), dtype=tf.int64, minval=0, maxval=200) # (batch_size, input_seq_len)
enc_output = Encoder(num_layers=2, d_model=512, num_heads=8,
d_ff=2048, input_vocab_size=8500,
maximum_position_encoding=10000)(inputs, False, None)
target = tf.random.uniform((64, 26), dtype=tf.int64, minval=0, maxval=200) # (batch_size, target_seq_len)
sample_decoder = Decoder(*args, **kwargs)
output, attn = sample_decoder(target,
enc_output=enc_output,
training=False,
look_ahead_mask=None,
padding_mask=None)
print(output.shape) # (batch_size, target_seq_len, d_model)
print(attn['decoder_layer2_attention2'].shape) # (batch_size, target_seq_len, input_seq_len)
if __name__ == "__main__":
Decoder_test(num_layers=2, d_model=512, num_heads=8,
d_ff=2048, target_vocab_size=8000,
maximum_position_encoding=5000)
| TransformerNet/layers/Decoder_test.py | 1,218 | (batch_size, input_seq_len) (batch_size, target_seq_len) (batch_size, target_seq_len, d_model) (batch_size, target_seq_len, input_seq_len) | 138 | en | 0.191667 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
oo_option lookup plugin for openshift-ansible
Usage:
- debug:
msg: "{{ lookup('oo_option', '<key>') | default('<default_value>', True) }}"
This returns, by order of priority:
* if it exists, the `cli_<key>` ansible variable. This variable is set by `bin/cluster --option <key>=<value> …`
* if it exists, the envirnoment variable named `<key>`
* if none of the above conditions are met, empty string is returned
'''
import os
# pylint: disable=no-name-in-module,import-error,unused-argument,unused-variable,super-init-not-called,too-few-public-methods,missing-docstring
try:
# ansible-2.0
from ansible.plugins.lookup import LookupBase
except ImportError:
# ansible-1.9.x
class LookupBase(object):
def __init__(self, basedir=None, runner=None, **kwargs):
self.runner = runner
self.basedir = self.runner.basedir
def get_basedir(self, variables):
return self.basedir
# Reason: disable too-few-public-methods because the `run` method is the only
# one required by the Ansible API
# Status: permanently disabled
# pylint: disable=too-few-public-methods
class LookupModule(LookupBase):
''' oo_option lookup plugin main class '''
# Reason: disable unused-argument because Ansible is calling us with many
# parameters we are not interested in.
# The lookup plugins of Ansible have this kwargs “catch-all” parameter
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
def __init__(self, basedir=None, **kwargs):
''' Constructor '''
self.basedir = basedir
# Reason: disable unused-argument because Ansible is calling us with many
# parameters we are not interested in.
# The lookup plugins of Ansible have this kwargs “catch-all” parameter
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
def run(self, terms, variables, **kwargs):
''' Main execution path '''
ret = []
for term in terms:
option_name = term.split()[0]
cli_key = 'cli_' + option_name
if 'vars' in variables and cli_key in variables['vars']:
ret.append(variables['vars'][cli_key])
elif option_name in os.environ:
ret.append(os.environ[option_name])
else:
ret.append('')
return ret
| lookup_plugins/oo_option.py | 2,604 | oo_option lookup plugin main class
Constructor
Main execution path
oo_option lookup plugin for openshift-ansible
Usage:
- debug:
msg: "{{ lookup('oo_option', '<key>') | default('<default_value>', True) }}"
This returns, by order of priority:
* if it exists, the `cli_<key>` ansible variable. This variable is set by `bin/cluster --option <key>=<value> …`
* if it exists, the envirnoment variable named `<key>`
* if none of the above conditions are met, empty string is returned
!/usr/bin/env python2 -*- coding: utf-8 -*- vim: expandtab:tabstop=4:shiftwidth=4 pylint: disable=no-name-in-module,import-error,unused-argument,unused-variable,super-init-not-called,too-few-public-methods,missing-docstring ansible-2.0 ansible-1.9.x Reason: disable too-few-public-methods because the `run` method is the only one required by the Ansible API Status: permanently disabled pylint: disable=too-few-public-methods Reason: disable unused-argument because Ansible is calling us with many parameters we are not interested in. The lookup plugins of Ansible have this kwargs “catch-all” parameter which is not used Status: permanently disabled unless Ansible API evolves pylint: disable=unused-argument Reason: disable unused-argument because Ansible is calling us with many parameters we are not interested in. The lookup plugins of Ansible have this kwargs “catch-all” parameter which is not used Status: permanently disabled unless Ansible API evolves pylint: disable=unused-argument | 1,517 | en | 0.64799 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import os
import itertools
from datetime import datetime
from dateutil.relativedelta import relativedelta
import subprocess
from ..train_utils import TSCVSplitter
class ParameterSweeper:
"""
The function of this class is currently replaced by HyperDrive.
But let's keep it to preserve the work already done, and also
in case we need more flexibility than what HyperDrive provides.
"""
def __init__(self, config):
self.work_directory = config["WorkDirectory"]
data_config = config["DataParams"]
self.data_path = data_config["DataPath"]
if "DataFile" in data_config:
data_file = data_config["DataFile"]
self.data_full_path = os.path.join(self.work_directory, self.data_path, data_file)
else:
self.data_full_path = os.path.join(self.work_directory, self.data_path)
parameters_config = config["Parameters"]
self.parameter_name_list = [n for n, _ in parameters_config.items()]
parameter_value_list = [p for _, p in parameters_config.items()]
self.parameter_combinations = list(itertools.product(*parameter_value_list))
features_config = config["Features"]
self.feature_selection_mode = features_config["FeatureSelectionMode"]
if self.feature_selection_mode == "Default":
# In default mode, simply iterate through each feature set in
# FeatureList
self.feature_list = features_config["FeatureList"]
else:
# Placeholder for more advanced feature selection strategy
pass
def sweep_parameters_script(self, script_config, cv_setting_file, params_setting_file):
script_command = script_config["ScriptCommand"]
script = os.path.join(self.work_directory, script_config["Script"])
task_list = []
parameter_sets = {}
count = 0
for f in self.feature_list:
for p in self.parameter_combinations:
count += 1
task = " ".join(
[
script_command,
script,
"-d",
self.data_full_path,
"-p",
params_setting_file,
"-c",
cv_setting_file,
"-s",
str(count),
]
)
task_list.append(task)
parameter_dict = {}
for n, v in zip(self.parameter_name_list, p):
parameter_dict[n] = v
parameter_sets[count] = {
"feature_set": f,
"features": self.feature_list[f],
"parameters": parameter_dict,
}
with open(params_setting_file, "w") as fp:
json.dump(parameter_sets, fp, indent=True)
# Run tasks in parallel
processes = []
for t in task_list:
process = subprocess.Popen(t, shell=True)
processes.append(process)
# Collect statuses
output = [p.wait() for p in processes]
print(output)
def sweep_parameters(self):
# placeholder for parameter sweeping in python
pass
def sweep_parameters_batch_ai(self):
# placeholder for parameter sweeping using batch ai
pass
def main(config_file):
with open(config_file) as f:
config = json.load(f)
datetime_format = config["DatetimeFormat"]
work_directory = config["WorkDirectory"]
cv_setting_file = os.path.join(work_directory, "cv_settings.json")
# parameter_setting_file = os.path.join(work_directory,
# 'parameter_settings.json')
cv = TSCVSplitter(config)
# This part adjusts the cv settings due to the specific problem setup
# of GEFCom2017. Different forecasting setups may require different
# adjustments. Most setups should not require any adjustment.
for k, v in cv.train_validation_split.items():
round_dict = {}
# Training data ends on 12/31, used to forecast Feb. and Mar.
train_end = datetime.strptime(v["train_range"][1], datetime_format)
# Jan. validation range
validation_start_1 = datetime.strptime(v["validation_range"][0], datetime_format)
validation_end_1 = validation_start_1 + relativedelta(months=1, hours=-1)
# Training data ends on 11/30, used to forecast Jan. and Feb.
train_end_prev = datetime.strftime(train_end + relativedelta(months=-1), datetime_format)
# Training data ends on 01/31, used to forecast Mar. and Apr.
train_end_next = datetime.strftime(train_end + relativedelta(months=1), datetime_format)
# Feb. validation range
validation_start_2 = validation_start_1 + relativedelta(months=1)
validation_end_2 = validation_start_2 + relativedelta(months=1, hours=-1)
# Mar. validation range
validation_start_3 = validation_start_1 + relativedelta(months=2)
validation_end_3 = validation_start_3 + relativedelta(months=1, hours=-1)
# Apr. validation range
validation_start_4 = validation_start_1 + relativedelta(months=3)
validation_end_4 = validation_start_4 + relativedelta(months=1, hours=-1)
validation_start_1 = datetime.strftime(validation_start_1, datetime_format)
validation_end_1 = datetime.strftime(validation_end_1, datetime_format)
validation_start_2 = datetime.strftime(validation_start_2, datetime_format)
validation_end_2 = datetime.strftime(validation_end_2, datetime_format)
validation_start_3 = datetime.strftime(validation_start_3, datetime_format)
validation_end_3 = datetime.strftime(validation_end_3, datetime_format)
validation_start_4 = datetime.strftime(validation_start_4, datetime_format)
validation_end_4 = datetime.strftime(validation_end_4, datetime_format)
round_dict[1] = {
"train_range": [v["train_range"][0], train_end_prev],
"validation_range": [validation_start_1, validation_end_1],
}
round_dict[2] = {
"train_range": [v["train_range"][0], train_end_prev],
"validation_range": [validation_start_2, validation_end_2],
}
round_dict[3] = {
"train_range": [v["train_range"][0], v["train_range"][1]],
"validation_range": [validation_start_2, validation_end_2],
}
round_dict[4] = {
"train_range": [v["train_range"][0], v["train_range"][1]],
"validation_range": [validation_start_3, validation_end_3],
}
round_dict[5] = {
"train_range": [v["train_range"][0], train_end_next],
"validation_range": [validation_start_3, validation_end_3],
}
round_dict[6] = {
"train_range": [v["train_range"][0], train_end_next],
"validation_range": [validation_start_4, validation_end_4],
}
cv.train_validation_split[k] = round_dict
with open(cv_setting_file, "w") as fp:
json.dump(cv.train_validation_split, fp, indent=True)
#
# ps = ParameterSweeper(config)
#
# script_config = config['ScriptParams']
# ps.sweep_parameters_script(script_config, cv_setting_file,
# parameter_setting_file)
if __name__ == "__main__":
main("backtest_config.json")
| contrib/tsperf/cross_validation/cross_validation.py | 7,638 | The function of this class is currently replaced by HyperDrive.
But let's keep it to preserve the work already done, and also
in case we need more flexibility than what HyperDrive provides.
Copyright (c) Microsoft Corporation. Licensed under the MIT License. In default mode, simply iterate through each feature set in FeatureList Placeholder for more advanced feature selection strategy Run tasks in parallel Collect statuses placeholder for parameter sweeping in python placeholder for parameter sweeping using batch ai parameter_setting_file = os.path.join(work_directory, 'parameter_settings.json') This part adjusts the cv settings due to the specific problem setup of GEFCom2017. Different forecasting setups may require different adjustments. Most setups should not require any adjustment. Training data ends on 12/31, used to forecast Feb. and Mar. Jan. validation range Training data ends on 11/30, used to forecast Jan. and Feb. Training data ends on 01/31, used to forecast Mar. and Apr. Feb. validation range Mar. validation range Apr. validation range ps = ParameterSweeper(config) script_config = config['ScriptParams'] ps.sweep_parameters_script(script_config, cv_setting_file, parameter_setting_file) | 1,283 | en | 0.7129 |
# coding: utf-8
# In[ ]:
import os
import re
import tarfile
import requests
from pugnlp.futil import path_status, find_files
# In[ ]:
# From the nlpia package for downloading data too big for the repo
BIG_URLS = {
'w2v': (
'https://www.dropbox.com/s/965dir4dje0hfi4/GoogleNews-vectors-negative300.bin.gz?dl=1',
1647046227,
),
'slang': (
'https://www.dropbox.com/s/43c22018fbfzypd/slang.csv.gz?dl=1',
117633024,
),
'tweets': (
'https://www.dropbox.com/s/5gpb43c494mc8p0/tweets.csv.gz?dl=1',
311725313,
),
'lsa_tweets': (
'https://www.dropbox.com/s/rpjt0d060t4n1mr/lsa_tweets_5589798_2003588x200.tar.gz?dl=1',
3112841563, # 3112841312,
),
'imdb': (
'https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1',
3112841563, # 3112841312,
),
}
# In[ ]:
# These functions are part of the nlpia package which can be pip installed and run from there.
def dropbox_basename(url):
filename = os.path.basename(url)
match = re.findall(r'\?dl=[0-9]$', filename)
if match:
return filename[:-len(match[0])]
return filename
def download_file(url, data_path='.', filename=None, size=None, chunk_size=4096, verbose=True):
"""Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https"""
if filename is None:
filename = dropbox_basename(url)
file_path = os.path.join(data_path, filename)
if url.endswith('?dl=0'):
url = url[:-1] + '1' # noninteractive download
if verbose:
tqdm_prog = tqdm
print('requesting URL: {}'.format(url))
else:
tqdm_prog = no_tqdm
r = requests.get(url, stream=True, allow_redirects=True)
size = r.headers.get('Content-Length', None) if size is None else size
print('remote size: {}'.format(size))
stat = path_status(file_path)
print('local size: {}'.format(stat.get('size', None)))
if stat['type'] == 'file' and stat['size'] == size: # TODO: check md5 or get the right size of remote file
r.close()
return file_path
print('Downloading to {}'.format(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive chunks
f.write(chunk)
r.close()
return file_path
def untar(fname):
if fname.endswith("tar.gz"):
with tarfile.open(fname) as tf:
tf.extractall()
else:
print("Not a tar.gz file: {}".format(fname))
# In[ ]:
# UNCOMMENT these 2 lines if you haven't already download the word2vec model and the imdb dataset
# download_file(BIG_URLS['w2v'][0])
# untar(download_file(BIG_URLS['imdb'][0]))
# In[ ]:
maxlen = 400
batch_size = 32
embedding_dims = 300
epochs = 2
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
import glob
import os
from random import shuffle
def pre_process_data(filepath):
"""
This is dependent on your training data source but we will try to generalize it as best as possible.
"""
positive_path = os.path.join(filepath, 'pos')
negative_path = os.path.join(filepath, 'neg')
pos_label = 1
neg_label = 0
dataset = []
for filename in glob.glob(os.path.join(positive_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((pos_label, f.read()))
for filename in glob.glob(os.path.join(negative_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((neg_label, f.read()))
shuffle(dataset)
return dataset
# In[ ]:
from nltk.tokenize import TreebankWordTokenizer
from gensim.models.keyedvectors import KeyedVectors
word_vectors = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True, limit=200000)
def tokenize_and_vectorize(dataset):
tokenizer = TreebankWordTokenizer()
vectorized_data = []
expected = []
for sample in dataset:
tokens = tokenizer.tokenize(sample[1])
sample_vecs = []
for token in tokens:
try:
sample_vecs.append(word_vectors[token])
except KeyError:
pass # No matching token in the Google w2v vocab
vectorized_data.append(sample_vecs)
return vectorized_data
# In[ ]:
def collect_expected(dataset):
""" Peel of the target values from the dataset """
expected = []
for sample in dataset:
expected.append(sample[0])
return expected
# In[ ]:
def pad_trunc(data, maxlen):
""" For a given dataset pad with zero vectors or truncate to maxlen """
new_data = []
# Create a vector of 0's the length of our word vectors
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = sample
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
# In[ ]:
import numpy as np
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
maxlen = 400
batch_size = 32 # How many samples to show the net before backpropogating the error and updating the weights
embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet
epochs = 2
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model1.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights1.h5")
print('Model saved.')
# In[ ]:
from keras.models import model_from_json
with open("lstm_model1.json", "r") as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights('lstm_weights1.h5')
# In[ ]:
sample_1 = "I'm hate that the dismal weather that had me down for so long, when will it break! Ugh, when does happiness return? The sun is blinding and the puffy clouds are too thin. I can't wait for the weekend."
# We pass a dummy value in the first element of the tuple just because our helper expects it from the way processed the initial data. That value won't ever see the network, so it can be whatever.
vec_list = tokenize_and_vectorize([(1, sample_1)])
# Tokenize returns a list of the data (length 1 here)
test_vec_list = pad_trunc(vec_list, maxlen)
test_vec = np.reshape(test_vec_list, (len(test_vec_list), maxlen, embedding_dims))
print("Sample's sentiment, 1 - pos, 2 - neg : {}".format(model.predict_classes(test_vec)))
print("Raw output of sigmoid function: {}".format(model.predict(test_vec)))
# In[ ]:
def test_len(data, maxlen):
total_len = truncated = exact = padded = 0
for sample in data:
total_len += len(sample)
if len(sample) > maxlen:
truncated += 1
elif len(sample) < maxlen:
padded += 1
else:
exact +=1
print('Padded: {}'.format(padded))
print('Equal: {}'.format(exact))
print('Truncated: {}'.format(truncated))
print('Avg length: {}'.format(total_len/len(data)))
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
test_len(vectorized_data, 400)
# In[ ]:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
maxlen = 200
batch_size = 32 # How many samples to show the net before backpropagating the error and updating the weights
embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet
epochs = 2
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model7.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights7.h5")
print('Model saved.')
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
# In[ ]:
def avg_len(data):
total_len = 0
for sample in data:
total_len += len(sample[1])
print(total_len/len(data))
print(avg_len(dataset))
# In[ ]:
def clean_data(data):
""" Shift to lower case, replace unknowns with UNK, and listify """
new_data = []
VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; '
for sample in data:
new_sample = []
for char in sample[1].lower(): # Just grab the string, not the label
if char in VALID:
new_sample.append(char)
else:
new_sample.append('UNK')
new_data.append(new_sample)
return new_data
listified_data = clean_data(dataset)
# In[ ]:
def char_pad_trunc(data, maxlen):
""" We truncate to maxlen or add in PAD tokens """
new_dataset = []
for sample in data:
if len(sample) > maxlen:
new_data = sample[:maxlen]
elif len(sample) < maxlen:
pads = maxlen - len(sample)
new_data = sample + ['PAD'] * pads
else:
new_data = sample
new_dataset.append(new_data)
return new_dataset
maxlen = 1500
# In[ ]:
def create_dicts(data):
""" Modified from Keras LSTM example"""
chars = set()
for sample in data:
chars.update(set(sample))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
return char_indices, indices_char
# In[ ]:
import numpy as np
def onehot_encode(dataset, char_indices, maxlen):
"""
One hot encode the tokens
Args:
dataset list of lists of tokens
char_indices dictionary of {key=character, value=index to use encoding vector}
maxlen int Length of each sample
Return:
np array of shape (samples, tokens, encoding length)
"""
X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))
for i, sentence in enumerate(dataset):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
return X
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
listified_data = clean_data(dataset)
maxlen = 1500
common_length_data = char_pad_trunc(listified_data, maxlen)
char_indices, indices_char = create_dicts(common_length_data)
encoded_data = onehot_encode(common_length_data, char_indices, maxlen)
# In[ ]:
split_point = int(len(encoded_data)*.8)
x_train = encoded_data[:split_point]
y_train = expected[:split_point]
x_test = encoded_data[split_point:]
y_test = expected[split_point:]
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, Flatten, LSTM
num_neurons = 40
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, len(char_indices.keys()))))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
# In[ ]:
batch_size = 32
epochs = 10
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("char_lstm_model3.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("char_lstm_weights3.h5")
print('Model saved.')
# In[ ]:
from nltk.corpus import gutenberg
print(gutenberg.fileids())
# In[ ]:
text = ''
for txt in gutenberg.fileids():
if 'shakespeare' in txt:
text += gutenberg.raw(txt).lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# In[ ]:
print(text[:500])
# In[ ]:
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
# In[ ]:
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
print(model.summary())
# In[ ]:
epochs = 6
batch_size = 128
model_structure = model.to_json()
with open("shakes_lstm_model.json", "w") as json_file:
json_file.write(model_structure)
for i in range(5):
model.fit(X, y,
batch_size=batch_size,
epochs=epochs)
model.save_weights("shakes_lstm_weights_{}.h5".format(i+1))
print('Model saved.')
# In[ ]:
### NOT IN CHAPTER, Just to reproduce output
from keras.models import model_from_json
with open('shakes_lstm_model.json', 'r') as f:
model_json = f.read()
model = model_from_json(model_json)
model.load_weights('shakes_lstm_weights_4.h5')
# In[ ]:
import random
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# In[ ]:
import sys
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
# In[ ]:
from keras.models import Sequential
from keras.layers import GRU
model = Sequential()
model.add(GRU(num_neurons, return_sequences=True, input_shape=X[0].shape))
# In[ ]:
from keras.models import Sequential
from keras.layers import LSTM
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=X[0].shape))
model.add(LSTM(num_neurons_2, return_sequences=True))
| nlpia/book/examples/ch09.py | 17,875 | We truncate to maxlen or add in PAD tokens
Shift to lower case, replace unknowns with UNK, and listify
Peel of the target values from the dataset
Modified from Keras LSTM example
Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https
One hot encode the tokens
Args:
dataset list of lists of tokens
char_indices dictionary of {key=character, value=index to use encoding vector}
maxlen int Length of each sample
Return:
np array of shape (samples, tokens, encoding length)
For a given dataset pad with zero vectors or truncate to maxlen
This is dependent on your training data source but we will try to generalize it as best as possible.
coding: utf-8 In[ ]: In[ ]: From the nlpia package for downloading data too big for the repo 3112841312, 3112841312, In[ ]: These functions are part of the nlpia package which can be pip installed and run from there. noninteractive download TODO: check md5 or get the right size of remote file filter out keep-alive chunks In[ ]: UNCOMMENT these 2 lines if you haven't already download the word2vec model and the imdb dataset download_file(BIG_URLS['w2v'][0]) untar(download_file(BIG_URLS['imdb'][0])) In[ ]: In[ ]: In[ ]: No matching token in the Google w2v vocab In[ ]: In[ ]: Create a vector of 0's the length of our word vectors In[ ]: How many samples to show the net before backpropogating the error and updating the weights Length of the token vectors we will create for passing into the Convnet In[ ]: In[ ]: In[ ]: In[ ]: We pass a dummy value in the first element of the tuple just because our helper expects it from the way processed the initial data. That value won't ever see the network, so it can be whatever. Tokenize returns a list of the data (length 1 here) In[ ]: In[ ]: How many samples to show the net before backpropagating the error and updating the weights Length of the token vectors we will create for passing into the Convnet In[ ]: In[ ]: In[ ]: In[ ]: Just grab the string, not the label In[ ]: In[ ]: In[ ]: In[ ]: In[ ]: In[ ]: In[ ]: In[ ]: In[ ]: In[ ]: In[ ]: In[ ]: cut the text in semi-redundant sequences of maxlen characters In[ ]: In[ ]: build the model: a single LSTM In[ ]: In[ ]: NOT IN CHAPTER, Just to reproduce output In[ ]: helper function to sample an index from a probability array In[ ]: In[ ]: In[ ]: | 2,357 | en | 0.713907 |
from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd, rip
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset == 'pascal':
train_set = pascal.VOCSegmentation(args, split='train')
val_set = pascal.VOCSegmentation(args, split='val')
if args.use_sbd:
sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'cityscapes':
train_set = cityscapes.CityscapesSegmentation(args, split='train')
val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'coco':
train_set = coco.COCOSegmentation(args, split='train')
val_set = coco.COCOSegmentation(args, split='val')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'rip':
classes = {'full': 7, 'level1': 2, 'level2': 3, 'level3': 5}
import os
from mypath import Path
data_root = Path.db_root_dir(args.dataset)
root = os.path.join(data_root, 'RipTrainingAllData')
patches, level = args.rip_mode.split('-')
if patches == 'patches':
patches = 'COCOJSONPatches'
elif patches == 'patches_v1':
patches = 'COCOJSONPatches_v1'
else:
patches = 'COCOJSONs'
# patches = 'COCOJSONPatches' if patches == 'patches' else 'COCOJSONs'
train_ann_file =os.path.join(data_root, patches, level, 'cv_5_fold', 'train_1.json')
val_ann_file =os.path.join(data_root, patches, level, 'cv_5_fold', 'val_1.json')
train_set = rip.RIPSegmentation(args, split='train', root=root, ann_file=train_ann_file)
val_set = rip.RIPSegmentation(args, split='val', root=root, ann_file=val_ann_file)
num_classes = classes[level]
# NOTE: drop_last=True here to avoid situation when batch_size=1 which causes BatchNorm2d errors
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_classes
else:
raise NotImplementedError
| dataloaders/__init__.py | 3,423 | patches = 'COCOJSONPatches' if patches == 'patches' else 'COCOJSONs' NOTE: drop_last=True here to avoid situation when batch_size=1 which causes BatchNorm2d errors | 163 | en | 0.762145 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""This module provides different kinds of serialization methods for XML event
streams.
"""
from itertools import chain
import re
from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind
from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI, COMMENT, XML_NAMESPACE
__all__ = ['encode', 'get_serializer', 'DocType', 'XMLSerializer',
'XHTMLSerializer', 'HTMLSerializer', 'TextSerializer']
__docformat__ = 'restructuredtext en'
def encode(iterator, method='xml', encoding=None, out=None):
"""Encode serializer output into a string.
:param iterator: the iterator returned from serializing a stream (basically
any iterator that yields unicode objects)
:param method: the serialization method; determines how characters not
representable in the specified encoding are treated
:param encoding: how the output string should be encoded; if set to `None`,
this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:since: version 0.4.1
:note: Changed in 0.5: added the `out` parameter
"""
if encoding is not None:
errors = 'replace'
if method != 'text' and not isinstance(method, TextSerializer):
errors = 'xmlcharrefreplace'
_encode = lambda string: string.encode(encoding, errors)
else:
_encode = lambda string: string
if out is None:
return _encode(''.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk))
def get_serializer(method='xml', **kwargs):
"""Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1
"""
if isinstance(method, basestring):
method = {'xml': XMLSerializer,
'xhtml': XHTMLSerializer,
'html': HTMLSerializer,
'text': TextSerializer}[method.lower()]
return method(**kwargs)
def _prepare_cache(use_cache=True):
"""Prepare a private token serialization cache.
:param use_cache: boolean indicating whether a real cache should
be used or not. If not, the returned functions
are no-ops.
:return: emit and get functions, for storing and retrieving
serialized values from the cache.
"""
cache = {}
if use_cache:
def _emit(kind, input, output):
cache[kind, input] = output
return output
_get = cache.get
else:
def _emit(kind, input, output):
return output
def _get(key):
pass
return _emit, _get, cache
class DocType(object):
"""Defines a number of commonly used DOCTYPE declarations as constants."""
HTML_STRICT = (
'html', '-//W3C//DTD HTML 4.01//EN',
'http://www.w3.org/TR/html4/strict.dtd'
)
HTML_TRANSITIONAL = (
'html', '-//W3C//DTD HTML 4.01 Transitional//EN',
'http://www.w3.org/TR/html4/loose.dtd'
)
HTML_FRAMESET = (
'html', '-//W3C//DTD HTML 4.01 Frameset//EN',
'http://www.w3.org/TR/html4/frameset.dtd'
)
HTML = HTML_STRICT
HTML5 = ('html', None, None)
XHTML_STRICT = (
'html', '-//W3C//DTD XHTML 1.0 Strict//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'
)
XHTML_TRANSITIONAL = (
'html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
)
XHTML_FRAMESET = (
'html', '-//W3C//DTD XHTML 1.0 Frameset//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd'
)
XHTML = XHTML_STRICT
XHTML11 = (
'html', '-//W3C//DTD XHTML 1.1//EN',
'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'
)
SVG_FULL = (
'svg', '-//W3C//DTD SVG 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'
)
SVG_BASIC = (
'svg', '-//W3C//DTD SVG Basic 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd'
)
SVG_TINY = (
'svg', '-//W3C//DTD SVG Tiny 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd'
)
SVG = SVG_FULL
@classmethod
def get(cls, name):
"""Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``
declaration for the specified name.
The following names are recognized in this version:
* "html" or "html-strict" for the HTML 4.01 strict DTD
* "html-transitional" for the HTML 4.01 transitional DTD
* "html-frameset" for the HTML 4.01 frameset DTD
* "html5" for the ``DOCTYPE`` proposed for HTML5
* "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD
* "xhtml-transitional" for the XHTML 1.0 transitional DTD
* "xhtml-frameset" for the XHTML 1.0 frameset DTD
* "xhtml11" for the XHTML 1.1 DTD
* "svg" or "svg-full" for the SVG 1.1 DTD
* "svg-basic" for the SVG Basic 1.1 DTD
* "svg-tiny" for the SVG Tiny 1.1 DTD
:param name: the name of the ``DOCTYPE``
:return: the ``(name, pubid, sysid)`` tuple for the requested
``DOCTYPE``, or ``None`` if the name is not recognized
:since: version 0.4.1
"""
return {
'html': cls.HTML, 'html-strict': cls.HTML_STRICT,
'html-transitional': DocType.HTML_TRANSITIONAL,
'html-frameset': DocType.HTML_FRAMESET,
'html5': cls.HTML5,
'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT,
'xhtml-transitional': cls.XHTML_TRANSITIONAL,
'xhtml-frameset': cls.XHTML_FRAMESET,
'xhtml11': cls.XHTML11,
'svg': cls.SVG, 'svg-full': cls.SVG_FULL,
'svg-basic': cls.SVG_BASIC,
'svg-tiny': cls.SVG_TINY
}.get(name.lower())
class XMLSerializer(object):
"""Produces XML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XMLSerializer()(elem.generate())))
<div><a href="foo"/><br/><hr noshade="True"/></div>
"""
_PRESERVE_SPACE = frozenset()
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, cache=True):
"""Initialize the XML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output, or the name of a DOCTYPE as
defined in `DocType.get`
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.4.2: The `doctype` parameter can now be a string.
:note: Changed in 0.6: The `cache` parameter was added
"""
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = cache
def _prepare_cache(self):
return _prepare_cache(self.cache)[:2]
def __call__(self, stream):
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
buf += [' ', attr, '="', escape(value), '"']
buf.append(kind is EMPTY and '/>' or '>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is XML_DECL and not have_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class XHTMLSerializer(XMLSerializer):
"""Produces XHTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XHTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br /><hr noshade="noshade" /></div>
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
_BOOLEAN_ATTRS = frozenset(['selected', 'checked', 'compact', 'declare',
'defer', 'disabled', 'ismap', 'multiple',
'nohref', 'noresize', 'noshade', 'nowrap'])
_PRESERVE_SPACE = frozenset([
QName('pre'), QName('http://www.w3.org/1999/xhtml}pre'),
QName('textarea'), QName('http://www.w3.org/1999/xhtml}textarea')
])
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, drop_xml_decl=True, cache=True):
super(XHTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
namespace_prefixes = namespace_prefixes or {}
namespace_prefixes['http://www.w3.org/1999/xhtml'] = ''
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.drop_xml_decl = drop_xml_decl
self.cache = cache
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
drop_xml_decl = self.drop_xml_decl
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
value = attr
elif attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr == 'xml:space':
continue
buf += [' ', attr, '="', escape(value), '"']
if kind is EMPTY:
if tag in empty_elems:
buf.append(' />')
else:
buf.append('></%s>' % tag)
else:
buf.append('>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is XML_DECL and not have_decl and not drop_xml_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class HTMLSerializer(XHTMLSerializer):
"""Produces HTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(HTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br><hr noshade></div>
"""
_NOESCAPE_ELEMS = frozenset([
QName('script'), QName('http://www.w3.org/1999/xhtml}script'),
QName('style'), QName('http://www.w3.org/1999/xhtml}style')
])
def __init__(self, doctype=None, strip_whitespace=True, cache=True):
"""Initialize the HTML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.6: The `cache` parameter was added
"""
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE,
self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
}, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
noescape_elems = self._NOESCAPE_ELEMS
have_doctype = False
noescape = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, _ in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
output = _get((kind, data))
if output is not None:
yield output
if (kind is START or kind is EMPTY) \
and data[0] in noescape_elems:
noescape = True
elif kind is END:
noescape = False
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
if value:
buf += [' ', attr]
elif ':' in attr:
if attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr != 'xmlns':
buf += [' ', attr, '="', escape(value), '"']
buf.append('>')
if kind is EMPTY:
if tag not in empty_elems:
buf.append('</%s>' % tag)
yield _emit(kind, data, Markup(''.join(buf)))
if tag in noescape_elems:
noescape = True
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
noescape = False
elif kind is TEXT:
if noescape:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class TextSerializer(object):
"""Produces plain text from an event stream.
Only text events are included in the output. Unlike the other serializer,
special XML characters are not escaped:
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a('<Hello!>', href='foo'), tag.br)
>>> print(elem)
<div><a href="foo"><Hello!></a><br/></div>
>>> print(''.join(TextSerializer()(elem.generate())))
<Hello!>
If text events contain literal markup (instances of the `Markup` class),
that markup is by default passed through unchanged:
>>> elem = tag.div(Markup('<a href="foo">Hello & Bye!</a><br/>'))
>>> print(elem.generate().render(TextSerializer, encoding=None))
<a href="foo">Hello & Bye!</a><br/>
You can use the ``strip_markup`` to change this behavior, so that tags and
entities are stripped from the output (or in the case of entities,
replaced with the equivalent character):
>>> print(elem.generate().render(TextSerializer, strip_markup=True,
... encoding=None))
Hello & Bye!
"""
def __init__(self, strip_markup=False):
"""Create the serializer.
:param strip_markup: whether markup (tags and encoded characters) found
in the text should be removed
"""
self.strip_markup = strip_markup
def __call__(self, stream):
strip_markup = self.strip_markup
for event in stream:
if event[0] is TEXT:
data = event[1]
if strip_markup and type(data) is Markup:
data = data.striptags().stripentities()
yield unicode(data)
class EmptyTagFilter(object):
"""Combines `START` and `STOP` events into `EMPTY` events for elements that
have no contents.
"""
EMPTY = StreamEventKind('EMPTY')
def __call__(self, stream):
prev = (None, None, None)
for ev in stream:
if prev[0] is START:
if ev[0] is END:
prev = EMPTY, prev[1], prev[2]
yield prev
continue
else:
yield prev
if ev[0] is not START:
yield ev
prev = ev
EMPTY = EmptyTagFilter.EMPTY
class NamespaceFlattener(object):
r"""Output stream filter that removes namespace information from the stream,
instead adding namespace attributes and prefixes as needed.
:param prefixes: optional mapping of namespace URIs to prefixes
>>> from genshi.input import XML
>>> xml = XML('''<doc xmlns="NS1" xmlns:two="NS2">
... <two:item/>
... </doc>''')
>>> for kind, data, pos in NamespaceFlattener()(xml):
... print('%s %r' % (kind, data))
START (u'doc', Attrs([('xmlns', u'NS1'), (u'xmlns:two', u'NS2')]))
TEXT u'\n '
START (u'two:item', Attrs())
END u'two:item'
TEXT u'\n'
END u'doc'
"""
def __init__(self, prefixes=None, cache=True):
self.prefixes = {XML_NAMESPACE.uri: 'xml'}
if prefixes is not None:
self.prefixes.update(prefixes)
self.cache = cache
def __call__(self, stream):
prefixes = dict([(v, [k]) for k, v in self.prefixes.items()])
namespaces = {XML_NAMESPACE.uri: ['xml']}
_emit, _get, cache = _prepare_cache(self.cache)
def _push_ns(prefix, uri):
namespaces.setdefault(uri, []).append(prefix)
prefixes.setdefault(prefix, []).append(uri)
cache.clear()
def _pop_ns(prefix):
uris = prefixes.get(prefix)
uri = uris.pop()
if not uris:
del prefixes[prefix]
if uri not in uris or uri != uris[-1]:
uri_prefixes = namespaces[uri]
uri_prefixes.pop()
if not uri_prefixes:
del namespaces[uri]
cache.clear()
return uri
ns_attrs = []
_push_ns_attr = ns_attrs.append
def _make_ns_attr(prefix, uri):
return 'xmlns%s' % (prefix and ':%s' % prefix or ''), uri
def _gen_prefix():
val = 0
while 1:
val += 1
yield 'ns%d' % val
_gen_prefix = _gen_prefix().next
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield kind, data, pos
continue
output = _get((kind, data))
if output is not None:
yield kind, output, pos
elif kind is START or kind is EMPTY:
tag, attrs = data
tagname = tag.localname
tagns = tag.namespace
if tagns:
if tagns in namespaces:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
else:
_push_ns_attr(('xmlns', tagns))
_push_ns('', tagns)
new_attrs = []
for attr, value in attrs:
attrname = attr.localname
attrns = attr.namespace
if attrns:
if attrns not in namespaces:
prefix = _gen_prefix()
_push_ns(prefix, attrns)
_push_ns_attr(('xmlns:%s' % prefix, attrns))
else:
prefix = namespaces[attrns][-1]
if prefix:
attrname = '%s:%s' % (prefix, attrname)
new_attrs.append((attrname, value))
data = _emit(kind, data, (tagname, Attrs(ns_attrs + new_attrs)))
yield kind, data, pos
del ns_attrs[:]
elif kind is END:
tagname = data.localname
tagns = data.namespace
if tagns:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
yield kind, _emit(kind, data, tagname), pos
elif kind is START_NS:
prefix, uri = data
if uri not in namespaces:
prefix = prefixes.get(uri, [prefix])[-1]
_push_ns_attr(_make_ns_attr(prefix, uri))
_push_ns(prefix, uri)
elif kind is END_NS:
if data in prefixes:
uri = _pop_ns(data)
if ns_attrs:
attr = _make_ns_attr(data, uri)
if attr in ns_attrs:
ns_attrs.remove(attr)
else:
yield kind, data, pos
class WhitespaceFilter(object):
"""A filter that removes extraneous ignorable white space from the
stream.
"""
def __init__(self, preserve=None, noescape=None):
"""Initialize the filter.
:param preserve: a set or sequence of tag names for which white-space
should be preserved
:param noescape: a set or sequence of tag names for which text content
should not be escaped
The `noescape` set is expected to refer to elements that cannot contain
further child elements (such as ``<style>`` or ``<script>`` in HTML
documents).
"""
if preserve is None:
preserve = []
self.preserve = frozenset(preserve)
if noescape is None:
noescape = []
self.noescape = frozenset(noescape)
def __call__(self, stream, ctxt=None, space=XML_NAMESPACE['space'],
trim_trailing_space=re.compile('[ \t]+(?=\n)').sub,
collapse_lines=re.compile('\n{2,}').sub):
mjoin = Markup('').join
preserve_elems = self.preserve
preserve = 0
noescape_elems = self.noescape
noescape = False
textbuf = []
push_text = textbuf.append
pop_text = textbuf.pop
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
if noescape:
data = Markup(data)
push_text(data)
else:
if textbuf:
if len(textbuf) > 1:
text = mjoin(textbuf, escape_quotes=False)
del textbuf[:]
else:
text = escape(pop_text(), quotes=False)
if not preserve:
text = collapse_lines('\n', trim_trailing_space('', text))
yield TEXT, Markup(text), pos
if kind is START:
tag, attrs = data
if preserve or (tag in preserve_elems or
attrs.get(space) == 'preserve'):
preserve += 1
if not noescape and tag in noescape_elems:
noescape = True
elif kind is END:
noescape = False
if preserve:
preserve -= 1
elif kind is START_CDATA:
noescape = True
elif kind is END_CDATA:
noescape = False
if kind:
yield kind, data, pos
class DocTypeInserter(object):
"""A filter that inserts the DOCTYPE declaration in the correct location,
after the XML declaration.
"""
def __init__(self, doctype):
"""Initialize the filter.
:param doctype: DOCTYPE as a string or DocType object.
"""
if isinstance(doctype, basestring):
doctype = DocType.get(doctype)
self.doctype_event = (DOCTYPE, doctype, (None, -1, -1))
def __call__(self, stream):
doctype_inserted = False
for kind, data, pos in stream:
if not doctype_inserted:
doctype_inserted = True
if kind is XML_DECL:
yield (kind, data, pos)
yield self.doctype_event
continue
yield self.doctype_event
yield (kind, data, pos)
if not doctype_inserted:
yield self.doctype_event
| Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | 31,267 | Defines a number of commonly used DOCTYPE declarations as constants.
A filter that inserts the DOCTYPE declaration in the correct location,
after the XML declaration.
Combines `START` and `STOP` events into `EMPTY` events for elements that
have no contents.
Produces HTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(HTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br><hr noshade></div>
Output stream filter that removes namespace information from the stream,
instead adding namespace attributes and prefixes as needed.
:param prefixes: optional mapping of namespace URIs to prefixes
>>> from genshi.input import XML
>>> xml = XML('''<doc xmlns="NS1" xmlns:two="NS2">
... <two:item/>
... </doc>''')
>>> for kind, data, pos in NamespaceFlattener()(xml):
... print('%s %r' % (kind, data))
START (u'doc', Attrs([('xmlns', u'NS1'), (u'xmlns:two', u'NS2')]))
TEXT u'\n '
START (u'two:item', Attrs())
END u'two:item'
TEXT u'\n'
END u'doc'
Produces plain text from an event stream.
Only text events are included in the output. Unlike the other serializer,
special XML characters are not escaped:
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a('<Hello!>', href='foo'), tag.br)
>>> print(elem)
<div><a href="foo"><Hello!></a><br/></div>
>>> print(''.join(TextSerializer()(elem.generate())))
<Hello!>
If text events contain literal markup (instances of the `Markup` class),
that markup is by default passed through unchanged:
>>> elem = tag.div(Markup('<a href="foo">Hello & Bye!</a><br/>'))
>>> print(elem.generate().render(TextSerializer, encoding=None))
<a href="foo">Hello & Bye!</a><br/>
You can use the ``strip_markup`` to change this behavior, so that tags and
entities are stripped from the output (or in the case of entities,
replaced with the equivalent character):
>>> print(elem.generate().render(TextSerializer, strip_markup=True,
... encoding=None))
Hello & Bye!
A filter that removes extraneous ignorable white space from the
stream.
Produces XHTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XHTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br /><hr noshade="noshade" /></div>
Produces XML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XMLSerializer()(elem.generate())))
<div><a href="foo"/><br/><hr noshade="True"/></div>
Initialize the XML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output, or the name of a DOCTYPE as
defined in `DocType.get`
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.4.2: The `doctype` parameter can now be a string.
:note: Changed in 0.6: The `cache` parameter was added
Initialize the HTML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.6: The `cache` parameter was added
Create the serializer.
:param strip_markup: whether markup (tags and encoded characters) found
in the text should be removed
Initialize the filter.
:param preserve: a set or sequence of tag names for which white-space
should be preserved
:param noescape: a set or sequence of tag names for which text content
should not be escaped
The `noescape` set is expected to refer to elements that cannot contain
further child elements (such as ``<style>`` or ``<script>`` in HTML
documents).
Initialize the filter.
:param doctype: DOCTYPE as a string or DocType object.
Prepare a private token serialization cache.
:param use_cache: boolean indicating whether a real cache should
be used or not. If not, the returned functions
are no-ops.
:return: emit and get functions, for storing and retrieving
serialized values from the cache.
Encode serializer output into a string.
:param iterator: the iterator returned from serializing a stream (basically
any iterator that yields unicode objects)
:param method: the serialization method; determines how characters not
representable in the specified encoding are treated
:param encoding: how the output string should be encoded; if set to `None`,
this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:since: version 0.4.1
:note: Changed in 0.5: added the `out` parameter
Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``
declaration for the specified name.
The following names are recognized in this version:
* "html" or "html-strict" for the HTML 4.01 strict DTD
* "html-transitional" for the HTML 4.01 transitional DTD
* "html-frameset" for the HTML 4.01 frameset DTD
* "html5" for the ``DOCTYPE`` proposed for HTML5
* "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD
* "xhtml-transitional" for the XHTML 1.0 transitional DTD
* "xhtml-frameset" for the XHTML 1.0 frameset DTD
* "xhtml11" for the XHTML 1.1 DTD
* "svg" or "svg-full" for the SVG 1.1 DTD
* "svg-basic" for the SVG Basic 1.1 DTD
* "svg-tiny" for the SVG Tiny 1.1 DTD
:param name: the name of the ``DOCTYPE``
:return: the ``(name, pubid, sysid)`` tuple for the requested
``DOCTYPE``, or ``None`` if the name is not recognized
:since: version 0.4.1
Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1
This module provides different kinds of serialization methods for XML event
streams.
-*- coding: utf-8 -*- Copyright (C) 2006-2009 Edgewall Software All rights reserved. This software is licensed as described in the file COPYING, which you should have received as part of this distribution. The terms are also available at http://genshi.edgewall.org/wiki/License. This software consists of voluntary contributions made by many individuals. For the exact contribution history, see the revision history and logs, available at http://genshi.edgewall.org/log/. | 7,469 | en | 0.659052 |
# -*- coding: utf-8 -*-
# Begin CVS Header
# $Source: /Volumes/Home/Users/shoops/cvs/copasi_dev/copasi/bindings/python/unittests/Test_CMoiety.py,v $
# $Revision: 1.11 $
# $Name: $
# $Author: shoops $
# $Date: 2010/07/16 18:55:59 $
# End CVS Header
# Copyright (C) 2010 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CMoiety(unittest.TestCase):
def setUp(self):
self.datamodel=COPASI.CCopasiRootContainer.addDatamodel()
self.model=self.datamodel.getModel()
self.model.createCompartment("comp1",1.0)
self.model.createCompartment("comp2",2.0)
m1=self.model.createMetabolite("A","comp1")
m2=self.model.createMetabolite("B","comp1")
m3=self.model.createMetabolite("C","comp1")
m4=self.model.createMetabolite("D","comp1")
m5=self.model.createMetabolite("E","comp2")
m6=self.model.createMetabolite("F","comp2")
m7=self.model.createMetabolite("G","comp2")
r=self.model.createReaction("react1")
r.addSubstrate(m1.getKey())
r.addProduct(m2.getKey())
r=self.model.createReaction("react2")
r.addSubstrate(m3.getKey())
r.addProduct(m5.getKey())
r=self.model.createReaction("react3")
r.addSubstrate(m6.getKey())
r.addProduct(m2.getKey())
self.model.compileIfNecessary()
self.moiety=self.model.getMoiety(0)
def test_getDescription(self):
desc=self.moiety.getDescription(self.model)
self.assert_(type(desc)==StringType)
def test_dependentNumber(self):
v=self.moiety.dependentNumber()
self.assert_(type(v)==FloatType)
def test_getNumber(self):
v=self.moiety.getNumber()
self.assert_(type(v)==FloatType)
def test_getKey(self):
key=self.moiety.getKey()
self.assert_(type(key)==StringType)
def test_getDependentNumber(self):
v=self.moiety.getDependentNumber()
self.assert_(type(v)==FloatType)
def suite():
tests=[
'test_getDescription'
,'test_getDependentNumber'
,'test_getNumber'
,'test_getKey'
,'test_getDependentNumber'
]
return unittest.TestSuite(map(Test_CMoiety,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
| copasi/bindings/python/unittests/Test_CMoiety.py | 2,530 | -*- coding: utf-8 -*- Begin CVS Header $Source: /Volumes/Home/Users/shoops/cvs/copasi_dev/copasi/bindings/python/unittests/Test_CMoiety.py,v $ $Revision: 1.11 $ $Name: $ $Author: shoops $ $Date: 2010/07/16 18:55:59 $ End CVS Header Copyright (C) 2010 by Pedro Mendes, Virginia Tech Intellectual Properties, Inc., University of Heidelberg, and The University of Manchester. All rights reserved. Copyright (C) 2008 by Pedro Mendes, Virginia Tech Intellectual Properties, Inc., EML Research, gGmbH, University of Heidelberg, and The University of Manchester. All rights reserved. | 601 | en | 0.670641 |
# build_features.py
# This module holds utility classes and functions that creates and manipulates input features
# This module also holds the various input transformers
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
def correlation_columns(dataset: pd.DataFrame, target_column: str, k: float=0.5):
"""
Columns that are correlated to the target point
Parameters
----------
dataset: pd.DataFrame
The pandas dataframe
target_column: str
The target column to calculate correlation against
k: float
The correlation cuttoff point; defaults to -0.5 and 0.5.
The values passed in represents the negative and positive cutofff
Returns
-------
columns: list
A list of columns that are correlated to the target column based on the cutoff point
"""
corr = np.abs(dataset.corr()[target_column])
corr_sorted = corr.sort_values(ascending=False)
columns = [col for col, value in zip(corr_sorted.index, corr_sorted.values) if value >= k and col != target_column]
return columns
class ColumnExtractor(BaseEstimator, TransformerMixin):
"""Columns Extractor based on correlation to the output label"""
def __init__(self, columns):
print(columns)
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self.columns]
if __name__ == '__main__':
correlation_columns(pd.read_csv('././data/raw/creditcard.csv'), 'Class', k=0.3) | credit-card-fraud/src/features/build_features.py | 1,572 | Columns Extractor based on correlation to the output label
Columns that are correlated to the target point
Parameters
----------
dataset: pd.DataFrame
The pandas dataframe
target_column: str
The target column to calculate correlation against
k: float
The correlation cuttoff point; defaults to -0.5 and 0.5.
The values passed in represents the negative and positive cutofff
Returns
-------
columns: list
A list of columns that are correlated to the target column based on the cutoff point
build_features.py This module holds utility classes and functions that creates and manipulates input features This module also holds the various input transformers | 680 | en | 0.725502 |
# Generated by Django 3.0 on 2019-12-12 08:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('team', '0002_auto_20191210_2330'),
('members', '0001_initial'),
]
operations = [
migrations.AddField(model_name='member', name='team', field=models.ForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, to='team.Team'), preserve_default=False,),
]
| backend/members/migrations/0002_member_team.py | 496 | Generated by Django 3.0 on 2019-12-12 08:54 | 43 | en | 0.765422 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| python/pyspark/pandas/generic.py | 104,770 | The base class for both DataFrame and Series.
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. noqa: F401 For running doctests and reference resolution in PyCharm. noqa: F401 (SPARK-34943) noqa: F401 (SPARK-34943) noqa: F401 (SPARK-34943) noqa: F401 (SPARK-34943) noqa: F401 (SPARK-34943) TODO: add 'axis' parameter TODO: add 'axis' parameter TODO: add 'axis' parameter TODO: add 'axis' parameter TODO: use pandas_udf to support negative values and other options later other window except unbounded ones is supported as of Spark 3.0. TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated since we're using this for `DataFrame.info` internally. We can drop it once our minimal pandas version becomes 1.0.0. type: ignore If path is none, just collect and use pandas's to_csv. 0.23 seems not having 'columns' parameter in Series' to_csv. type: ignore type: ignore type: ignore type: ignore type: ignore If path is none, just collect and use pandas's to_json. type: ignore To make the format consistent and readable by `read_json`, convert it to pandas' and use 'records' orient for now. type: ignore Make sure locals() call is at the top of the function so we don't capture local variables. type: ignore TODO: by argument only support the grouping name and as_index only for now. Documentation should be updated when it's supported. type: List[Union[Tuple, ps.Series]] Disable Arrow to keep row ordering. For Empty Series or DataFrame, returns None. For Empty Series or DataFrame, returns None. TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented. TODO: 'center' and 'axis' parameter should be implemented. 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607 If DataFrame has multiple columns, there is no change. If DataFrame has only a single value, use pandas API directly. The case of Series is simple. If Series has only a single value, just return it as a scalar. Otherwise, there is no change. `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0. Make sure locals() call is at the top of the function so we don't capture local variables. TODO: add 'downcast' when value parameter exists TODO: add 'downcast' when value parameter exists type: ignore type: ignore type: ignore type: ignore Special handle floating point types because Spark's count treats nan as a valid value, whereas pandas count doesn't include nan. | 56,549 | en | 0.536522 |
import importlib
import sys
import argparse
from multi_sample_factory.algorithms.utils.algo_utils import ExperimentStatus
from multi_sample_factory.runner.run_ngc import add_ngc_args
from multi_sample_factory.runner.run_slurm import add_slurm_args
from multi_sample_factory.utils.utils import log
def runner_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir', default='./train_dir', type=str, help='Directory for sub-experiments')
parser.add_argument('--run', default=None, type=str,
help='Name of the python module that describes the run, e.g. sample_factory.runner.runs.doom_battle_hybrid')
parser.add_argument('--runner', default='processes', choices=['processes', 'slurm'])
parser.add_argument('--runner', default='processes', choices=['processes', 'slurm', 'ngc'])
parser.add_argument('--pause_between', default=10, type=int, help='Pause in seconds between processes')
parser.add_argument('--num_gpus', default=1, type=int, help='How many GPUs to use')
parser.add_argument('--experiments_per_gpu', default=-1, type=int, help='How many experiments can we squeeze on a single GPU (-1 for not altering CUDA_VISIBLE_DEVICES at all)')
parser.add_argument('--max_parallel', default=4, type=int, help='Maximum simultaneous experiments')
parser.add_argument('--experiment_suffix', default='', type=str, help='Append this to the name of the experiment dir')
parser = add_slurm_args(parser)
parser = add_ngc_args(parser)
return parser
def parse_args():
args = runner_argparser().parse_args(sys.argv[1:])
return args
def main():
args = parse_args()
try:
# assuming we're given the full name of the module
run_module = importlib.import_module(f'{args.run}')
except ImportError:
try:
run_module = importlib.import_module(f'multi_sample_factory.runner.runs.{args.run}')
except ImportError:
log.error('Could not import the run module')
return ExperimentStatus.FAILURE
run_description = run_module.RUN_DESCRIPTION
run_description.experiment_suffix = args.experiment_suffix
if args.runner == 'processes':
from multi_sample_factory.runner.run_processes import run
run(run_description, args)
elif args.runner == 'slurm':
from multi_sample_factory.runner.run_slurm import run_slurm
run_slurm(run_description, args)
elif args.runner == 'ngc':
from multi_sample_factory.runner.run_ngc import run_ngc
run_ngc(run_description, args)
return ExperimentStatus.SUCCESS
if __name__ == '__main__':
sys.exit(main())
| multi_sample_factory/runner/run.py | 2,676 | assuming we're given the full name of the module | 48 | en | 0.735049 |
"""
Showcases *LLAB(l:c)* colour appearance model computations.
"""
import numpy as np
import colour
from colour.appearance.llab import CAM_ReferenceSpecification_LLAB
from colour.utilities import message_box
message_box('"LLAB(l:c)" Colour Appearance Model Computations')
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_0 = np.array([95.05, 100.00, 108.88])
Y_b = 20.0
L = 318.31
surround = colour.VIEWING_CONDITIONS_LLAB["ref_average_4_minus"]
message_box(
f'Converting to the "LLAB(l:c)" colour appearance model specification '
f"using given parameters:\n\n"
f"\tXYZ: {XYZ}\n"
f"\tXYZ_0: {XYZ_0}\n"
f"\tY_b: {Y_b}\n"
f"\tL: {L}\n"
f"\tsurround: {surround}"
)
specification = colour.XYZ_to_LLAB(XYZ, XYZ_0, Y_b, L, surround)
print(specification)
print("\n")
message_box(
'Broadcasting the current output "LLAB(l:c)" colour appearance '
"model specification to the reference specification.\n"
"The intent of this reference specification is to provide names "
'as closest as possible to the "Mark D. Fairchild" reference.\n'
"The current output specification is meant to be consistent with "
"the other colour appearance model specification by using same "
"argument names for consistency wherever possible."
)
print(CAM_ReferenceSpecification_LLAB(*specification.values))
| colour/examples/appearance/examples_llab.py | 1,331 | Showcases *LLAB(l:c)* colour appearance model computations. | 59 | en | 0.718263 |
import random
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_NO_OP = actions.FUNCTIONS.no_op.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_BUILD_SUPPLY_DEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id
_BUILD_BARRACKS = actions.FUNCTIONS.Build_Barracks_screen.id
_TRAIN_MARINE = actions.FUNCTIONS.Train_Marine_quick.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id
_HARVEST_GATHER = actions.FUNCTIONS.Harvest_Gather_screen.id
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
_PLAYER_ID = features.SCREEN_FEATURES.player_id.index
_PLAYER_SELF = 1
_PLAYER_HOSTILE = 4
_ARMY_SUPPLY = 5
_TERRAN_COMMANDCENTER = 18
_TERRAN_SCV = 45
_TERRAN_SUPPLY_DEPOT = 19
_TERRAN_BARRACKS = 21
_NEUTRAL_MINERAL_FIELD = 341
_NOT_QUEUED = [0]
_QUEUED = [1]
_SELECT_ALL = [2]
DATA_FILE = 'sparse_agent_data'
ACTION_DO_NOTHING = 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
]
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))
# Stolen from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
if np.random.uniform() < self.epsilon:
# choose best action
state_action = self.q_table.ix[observation, :]
# some actions have the same value
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
self.check_state_exist(s)
q_predict = self.q_table.ix[s, a]
if s_ != 'terminal':
q_target = r + self.gamma * self.q_table.ix[s_, :].max()
else:
q_target = r # next state is terminal
# update
self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(
pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class SparseAgent(base_agent.BaseAgent):
def __init__(self):
super(SparseAgent, self).__init__()
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_action = None
self.previous_state = None
self.cc_y = None
self.cc_x = None
self.move_number = 0
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def step(self, obs):
super(SparseAgent, self).step(obs)
if obs.last():
reward = obs.reward
self.qlearn.learn(str(self.previous_state), self.previous_action, reward, 'terminal')
self.qlearn.q_table.to_pickle(DATA_FILE + '.gz', 'gzip')
self.previous_action = None
self.previous_state = None
self.move_number = 0
return actions.FunctionCall(_NO_OP, [])
unit_type = obs.observation['screen'][_UNIT_TYPE]
if obs.first():
player_y, player_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
self.cc_y, self.cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
cc_y, cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
cc_count = 1 if cc_y.any() else 0
depot_y, depot_x = (unit_type == _TERRAN_SUPPLY_DEPOT).nonzero()
supply_depot_count = int(round(len(depot_y) / 69))
barracks_y, barracks_x = (unit_type == _TERRAN_BARRACKS).nonzero()
barracks_count = int(round(len(barracks_y) / 137))
if self.move_number == 0:
self.move_number += 1
current_state = np.zeros(8)
current_state[0] = cc_count
current_state[1] = supply_depot_count
current_state[2] = barracks_count
current_state[3] = obs.observation['player'][_ARMY_SUPPLY]
hot_squares = np.zeros(4)
enemy_y, enemy_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 32))
x = int(math.ceil((enemy_x[i] + 1) / 32))
hot_squares[((y - 1) * 2) + (x - 1)] = 1
if not self.base_top_left:
hot_squares = hot_squares[::-1]
for i in range(0, 4):
current_state[i + 4] = hot_squares[i]
if self.previous_action is not None:
self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))
rl_action = self.qlearn.choose_action(str(current_state))
self.previous_state = current_state
self.previous_action = rl_action
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
unit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
target = [unit_x[i], unit_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_MARINE:
if barracks_y.any():
i = random.randint(0, len(barracks_y) - 1)
target = [barracks_x[i], barracks_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_SELECT_ALL, target])
elif smart_action == ACTION_ATTACK:
if _SELECT_ARMY in obs.observation['available_actions']:
return actions.FunctionCall(_SELECT_ARMY, [_NOT_QUEUED])
elif self.move_number == 1:
self.move_number += 1
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if supply_depot_count < 2 and _BUILD_SUPPLY_DEPOT in obs.observation['available_actions']:
if self.cc_y.any():
if supply_depot_count == 0:
target = self.transformDistance(round(self.cc_x.mean()), -35, round(self.cc_y.mean()), 0)
elif supply_depot_count == 1:
target = self.transformDistance(round(self.cc_x.mean()), -25, round(self.cc_y.mean()), -25)
return actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_BARRACKS:
if barracks_count < 2 and _BUILD_BARRACKS in obs.observation['available_actions']:
if self.cc_y.any():
if barracks_count == 0:
target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), -9)
elif barracks_count == 1:
target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), 12)
return actions.FunctionCall(_BUILD_BARRACKS, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_MARINE:
if _TRAIN_MARINE in obs.observation['available_actions']:
return actions.FunctionCall(_TRAIN_MARINE, [_QUEUED])
elif smart_action == ACTION_ATTACK:
do_it = True
if len(obs.observation['single_select']) > 0 and obs.observation['single_select'][0][0] == _TERRAN_SCV:
do_it = False
if len(obs.observation['multi_select']) > 0 and obs.observation['multi_select'][0][0] == _TERRAN_SCV:
do_it = False
if do_it and _ATTACK_MINIMAP in obs.observation["available_actions"]:
x_offset = random.randint(-1, 1)
y_offset = random.randint(-1, 1)
return actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED,
self.transformLocation(int(x) + (x_offset * 8),
int(y) + (y_offset * 8))])
elif self.move_number == 2:
self.move_number = 0
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if _HARVEST_GATHER in obs.observation['available_actions']:
unit_y, unit_x = (unit_type == _NEUTRAL_MINERAL_FIELD).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
m_x = unit_x[i]
m_y = unit_y[i]
target = [int(m_x), int(m_y)]
return actions.FunctionCall(_HARVEST_GATHER, [_QUEUED, target])
return actions.FunctionCall(_NO_OP, []) | 7. Using Reward for Agent/reward_agent.py | 10,878 | Stolen from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow a list choose best action some actions have the same value choose random action next state is terminal update append new state to q table | 218 | en | 0.766727 |
"""
# Sheets Account
Read a Google Sheet as if it were are realtime source of transactions
for a GL account. Columns are mapped to attributes. The
assumption is that the sheet maps to a single account, and the
rows are the credit/debits to that account.
Can be used as a plugin, which will write new entries (for reference)
to a file, but also maintain a "live" view of the transactions.
We support most of the sane columns on a sheet:
- date
- narration
- payee
- account
- amount
- currency
- tags
- links
- Anything else, if non-empty cell, gets added as a META
Some things to look at are:
- Multi-currency Support
- Lot support?
- Other Directives: Note, Document, Balance?
- Smarter per-sheet caching of local results
I strongly suggest using "Transfer" accounts for all asset movements between
two accounts both of which are tracked via a Sheet. This simplifies the
"Matching" and allows each side to be reconciled independently.
TODO: Default Account when account column is blank?
"""
# stdlib imports
import logging
import decimal
import pprint
import typing
import datetime
import dateparser
import pathlib
import slugify
# Beancount imports
from beancount.core import data
from coolbeans.utils import safe_plugin, get_setting
from coolbeans.tools.sheets import google_connect, safe_open_sheet
from coolbeans.plugins.accountsync import apply_coolbean_settings
import gspread
STRIP_SYMOLS = '₱$'
DEFAULT_CURRENCY = "USD"
logger = logging.getLogger(__name__)
__plugins__ = ['apply_coolbean_settings', 'remote_entries_plugin']
def clean_slug(slug):
"""Clean a possible Slug string to remove dashes and lower case."""
return slug.replace('-', '').lower()
def coolbean_sheets(entries, context):
"""Given a set of entries, pull out any slugs and add them to the context"""
settings = context.setdefault('coolbean-accounts', {})
# Pull out any 'slug' meta data
for entry in entries:
if isinstance(entry, data.Open):
document = entry.meta.get('document_name', None)
tab = entry.meta.get('document_tab', None)
slug = entry.meta.get('slug', "")
if document and tab and slug:
settings[slug] = {
'account': entry.account,
'document': document,
'tab': tab,
'currencies': entry.currencies
}
else:
if document or tab:
print(f"Skipping {entry.account}: {document}/{tab}/{slug}")
return entries, []
def remote_entries(entries, options_map):
"""
@param entries:
@param options_map:
@return:
"""
errors = []
settings = options_map['coolbeans']
secrets_file = get_setting('google-apis', settings)
connection = google_connect(secrets_file)
new_entries_path = None
new_entries_file = get_setting('new-entries-bean', settings)
if new_entries_file:
new_entries_path = pathlib.Path(new_entries_file)
# Capture the configuration off the Open
remote_accounts = {}
for entry in entries:
if not isinstance(entry, data.Open):
continue
document_name = entry.meta.get('document_name', None)
default_currency = entry.currencies[0] if entry.currencies else DEFAULT_CURRENCY
if document_name:
options = dict(
document_name=document_name,
document_tab=entry.meta.get('document_tab', None),
reverse_amount=entry.meta.get('reverse', False),
default_currency=default_currency,
entry=entry,
entry_file=new_entries_path
)
remote_accounts[entry.account] = options
new_entries = []
for account, options in remote_accounts.items():
try:
new_entries += load_remote_account(
connection=connection,
errors=errors,
account=account,
options=options
)
except Exception as exc:
logger.error(f"while processing {account}", exc_info=exc)
if new_entries and new_entries_path:
from beancount.parser import printer
with new_entries_path.open("w") as stream:
printer.print_entries(new_entries, file=stream)
logger.info(f"Wrote {len(new_entries)} new account(s) to {new_entries_path}.")
return entries+new_entries, errors
remote_entries_plugin = safe_plugin(remote_entries)
ALIASES = {
'narration': ['description', 'notes', 'details', 'memo']
}
def clean_record(record: typing.Dict[str, str]):
"""This is a bit of a hack. But using get_all_records doesn't leave many
options"""
new_record = {}
for k, v in record.items():
k = slugify.slugify(k.lower().strip())
v = str(v)
# Combine multiple narration columns if needed:
for field, names in ALIASES.items():
new_record.setdefault(field, '')
if k in names:
# Add the value to Narration:
new_record[field] += ('. ' if new_record[field] else '') + v
k = None # Clear this Key
break
# Really Ugly hack around embeded currency symbols. Needs Cleanup
if k == 'amount':
v = v.replace(',', '')
for s in STRIP_SYMOLS:
v = v.replace(s, '')
if v and not v[0].isdecimal() and not v[0]=='-':
v = v[1:]
# Pull currency?
# Decimal is fussy
try:
v = decimal.Decimal(v)
except decimal.InvalidOperation:
v = 0
if k:
new_record[k] = v
return new_record
def load_remote_account(
connection: gspread.Client,
errors: list,
account: str,
options: typing.Dict[str, str]
):
"""Try to Load Entries from URL into Account.
options include:
- document_name -- the Actual Google Doc name
- document_tab -- the Tab name on the Doc
- default_currency - the entry currency if None is provided
- reverse_amount - if true, assume positive entries are credits
"""
entries = []
document_name = options['document_name']
document_tab = options.get('document_tab', 0) or 0
default_currency = options['default_currency']
reverse_amount = options.get('reverse_amount', False)
if not document_name:
return
m = -1 if reverse_amount else 1
logger.info(f"Attempting to download entries for {account} from {document_name}.{document_tab}")
workbook = connection.open(document_name)
sheet = None
try:
document_tab = int(document_tab)
sheet = workbook.get_worksheet(document_tab)
except ValueError:
pass
if sheet is None:
sheet = workbook.worksheet(document_tab)
records = sheet.get_all_records()
import re
row = 0
# logger.info(f"Found {len(records)} entries.")
for record in records:
row += 1
record = clean_record(record)
if 'date' not in record or not record['date']:
continue
if 'amount' not in record or not record['amount']:
continue
#if 'account' not in record or not record['account'].strip():
# continue
narration = record.pop('narration', None)
payee = record.pop('payee', None)
tagstr = record.pop('tags', '')
tags = set(re.split(r'\W+', tagstr)) if tagstr else set()
date = dateparser.parse(record.pop('date'))
if date:
date = datetime.date(year=date.year, month=date.month, day=date.day)
linkstr = record.pop('links', '')
links = set(re.split(r'\W+', linkstr)) if linkstr else set()
meta = {
'filename': str(options['entry_file']),
'lineno': 0,
'document-sheet-row': f"{document_name}/{document_tab}/{row+1}"
}
amount = decimal.Decimal(record.pop('amount')) * m
currency = record.pop('currency', default_currency)
entry_account = record.pop('account')
for k, v in record.items():
if v:
meta[k] = v
try:
if not entry_account:
errors.append(f"Skipping Record with Blank Account: {meta['document-sheet-row']}")
logger.warning(f"Skipping Record with Blank Account: {meta['document-sheet-row']}")
continue
entry = data.Transaction(
date=date,
narration=narration,
payee=payee,
tags=tags,
meta=meta,
links=links,
flag='*',
postings=[
data.Posting(
account=account,
units=data.Amount(amount, currency),
cost=None,
price=None,
flag='*',
meta={}
),
data.Posting(
account=entry_account,
units=data.Amount(-amount, currency),
cost=None,
price=None,
flag='*',
meta={}
)
]
)
entries.append(entry)
except Exception as exc:
logger.error(f"Error while parsing {record}", exc_info=exc)
errors.append(str(exc))
logger.info(f"Loaded {len(entries)} entries for {account} from {document_name}.{document_tab}")
return entries
| src/coolbeans/plugins/sheetsaccount.py | 9,782 | This is a bit of a hack. But using get_all_records doesn't leave many
options
Clean a possible Slug string to remove dashes and lower case.
Given a set of entries, pull out any slugs and add them to the context
Try to Load Entries from URL into Account.
options include:
- document_name -- the Actual Google Doc name
- document_tab -- the Tab name on the Doc
- default_currency - the entry currency if None is provided
- reverse_amount - if true, assume positive entries are credits
@param entries:
@param options_map:
@return:
# Sheets Account
Read a Google Sheet as if it were are realtime source of transactions
for a GL account. Columns are mapped to attributes. The
assumption is that the sheet maps to a single account, and the
rows are the credit/debits to that account.
Can be used as a plugin, which will write new entries (for reference)
to a file, but also maintain a "live" view of the transactions.
We support most of the sane columns on a sheet:
- date
- narration
- payee
- account
- amount
- currency
- tags
- links
- Anything else, if non-empty cell, gets added as a META
Some things to look at are:
- Multi-currency Support
- Lot support?
- Other Directives: Note, Document, Balance?
- Smarter per-sheet caching of local results
I strongly suggest using "Transfer" accounts for all asset movements between
two accounts both of which are tracked via a Sheet. This simplifies the
"Matching" and allows each side to be reconciled independently.
TODO: Default Account when account column is blank?
stdlib imports Beancount imports Pull out any 'slug' meta data Capture the configuration off the Open Combine multiple narration columns if needed: Add the value to Narration: Clear this Key Really Ugly hack around embeded currency symbols. Needs Cleanup Pull currency? Decimal is fussy logger.info(f"Found {len(records)} entries.")if 'account' not in record or not record['account'].strip(): continue | 1,998 | en | 0.838002 |
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.unit.rules import BaseRuleTestCase
from cfnlint.rules.resources.properties.OnlyOne import OnlyOne # pylint: disable=E0401
class TestPropertyOnlyOne(BaseRuleTestCase):
"""Test OnlyOne Property Configuration"""
def setUp(self):
"""Setup"""
super(TestPropertyOnlyOne, self).setUp()
self.collection.register(OnlyOne())
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative(
'test/fixtures/templates/bad/resources/properties/onlyone.yaml', 5)
| test/unit/rules/resources/properties/test_onlyone.py | 748 | Test OnlyOne Property Configuration
Setup
Test failure
Test Positive
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
pylint: disable=E0401 | 195 | en | 0.62839 |
import unittest
from unittest.mock import patch, call, Mock, MagicMock, mock_open
from botocore.exceptions import ClientError
from ground_truth.src import ground_truth
from common import _utils
required_args = [
'--region', 'us-west-2',
'--role', 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'--job_name', 'test_job',
'--manifest_location', 's3://fake-bucket/manifest',
'--output_location', 's3://fake-bucket/output',
'--task_type', 'fake-task',
'--worker_type', 'fake_worker',
'--ui_template', 's3://fake-bucket/ui_template',
'--title', 'fake-image-labelling-work',
'--description', 'fake job',
'--num_workers_per_object', '1',
'--time_limit', '180',
]
class GroundTruthTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
parser = ground_truth.create_parser()
cls.parser = parser
def test_create_parser(self):
self.assertIsNotNone(self.parser)
def test_main(self):
# Mock out all of utils except parser
ground_truth._utils = MagicMock()
ground_truth._utils.add_default_client_arguments = _utils.add_default_client_arguments
# Set some static returns
ground_truth._utils.get_labeling_job_outputs.return_value = ('s3://fake-bucket/output', 'arn:aws:sagemaker:us-east-1:999999999999:labeling-job')
with patch('builtins.open', mock_open()) as file_open:
ground_truth.main(required_args)
# Check if correct requests were created and triggered
ground_truth._utils.create_labeling_job.assert_called()
ground_truth._utils.wait_for_labeling_job.assert_called()
ground_truth._utils.get_labeling_job_outputs.assert_called()
# Check the file outputs
file_open.assert_has_calls([
call('/tmp/output_manifest_location.txt', 'w'),
call('/tmp/active_learning_model_arn.txt', 'w')
], any_order=True)
file_open().write.assert_has_calls([
call('s3://fake-bucket/output'),
call('arn:aws:sagemaker:us-east-1:999999999999:labeling-job')
], any_order=False)
def test_ground_truth(self):
mock_client = MagicMock()
mock_args = self.parser.parse_args(required_args)
response = _utils.create_labeling_job(mock_client, vars(mock_args))
mock_client.create_labeling_job.assert_called_once_with(
HumanTaskConfig={'WorkteamArn': None, 'UiConfig': {'UiTemplateS3Uri': 's3://fake-bucket/ui_template'},
'PreHumanTaskLambdaArn': '', 'TaskTitle': 'fake-image-labelling-work',
'TaskDescription': 'fake job', 'NumberOfHumanWorkersPerDataObject': 1,
'TaskTimeLimitInSeconds': 180,
'AnnotationConsolidationConfig': {'AnnotationConsolidationLambdaArn': ''}},
InputConfig={'DataSource': {'S3DataSource': {'ManifestS3Uri': 's3://fake-bucket/manifest'}}},
LabelAttributeName='test_job', LabelingJobName='test_job',
OutputConfig={'S3OutputPath': 's3://fake-bucket/output', 'KmsKeyId': ''},
RoleArn='arn:aws:iam::123456789012:user/Development/product_1234/*', Tags=[]
)
self.assertEqual(response, 'test_job')
def test_sagemaker_exception_in_ground_truth(self):
mock_client = MagicMock()
mock_exception = ClientError({"Error": {"Message": "SageMaker broke"}}, "ground_truth")
mock_client.create_labeling_job.side_effect = mock_exception
mock_args = self.parser.parse_args(required_args)
with self.assertRaises(Exception):
_utils.get_labeling_job_outputs(mock_client, vars(mock_args))
def test_wait_for_labeling_job_creation(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.side_effect = [
{"LabelingJobStatus": "InProgress"},
{"LabelingJobStatus": "Completed"},
{"LabelingJobStatus": "Should not be called"}
]
_utils.wait_for_labeling_job(mock_client, 'test-batch', 0)
self.assertEqual(mock_client.describe_labeling_job.call_count, 2)
def test_wait_for_labeling_job_creation(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.side_effect = [
{"LabelingJobStatus": "InProgress"},
{"LabelingJobStatus": "Failed"},
{"LabelingJobStatus": "Should not be called"}
]
with self.assertRaises(Exception):
_utils.wait_for_labeling_job(mock_client, 'test-batch', 0)
self.assertEqual(mock_client.describe_labeling_job.call_count, 2)
def test_get_labeling_job_output_from_job(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.return_value = {"LabelingJobOutput": {
"OutputDatasetS3Uri": "s3://path/",
"FinalActiveLearningModelArn": "fake-arn"
}}
output_manifest, active_learning_model_arn = _utils.get_labeling_job_outputs(mock_client, 'labeling-job', True)
self.assertEqual(output_manifest, 's3://path/')
self.assertEqual(active_learning_model_arn, 'fake-arn')
def test_pass_most_args(self):
required_args = [
'--region', 'us-west-2',
'--role', 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'--job_name', 'test_job',
'--manifest_location', 's3://fake-bucket/manifest',
'--output_location', 's3://fake-bucket/output',
'--task_type', 'image classification',
'--worker_type', 'fake_worker',
'--ui_template', 's3://fake-bucket/ui_template',
'--title', 'fake-image-labelling-work',
'--description', 'fake job',
'--num_workers_per_object', '1',
'--time_limit', '180',
]
arguments = required_args + ['--label_attribute_name', 'fake-attribute',
'--max_human_labeled_objects', '10',
'--max_percent_objects', '50',
'--enable_auto_labeling', 'True',
'--initial_model_arn', 'fake-model-arn',
'--task_availibility', '30',
'--max_concurrent_tasks', '10',
'--task_keywords', 'fake-keyword',
'--worker_type', 'public',
'--no_adult_content', 'True',
'--no_ppi', 'True',
'--tags', '{"fake_key": "fake_value"}'
]
response = _utils.create_labeling_job_request(vars(self.parser.parse_args(arguments)))
print(response)
self.assertEqual(response, {'LabelingJobName': 'test_job',
'LabelAttributeName': 'fake-attribute',
'InputConfig': {'DataSource': {'S3DataSource': {'ManifestS3Uri': 's3://fake-bucket/manifest'}},
'DataAttributes': {'ContentClassifiers': ['FreeOfAdultContent', 'FreeOfPersonallyIdentifiableInformation']}},
'OutputConfig': {'S3OutputPath': 's3://fake-bucket/output', 'KmsKeyId': ''},
'RoleArn': 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'StoppingConditions': {'MaxHumanLabeledObjectCount': 10, 'MaxPercentageOfInputDatasetLabeled': 50},
'LabelingJobAlgorithmsConfig': {'LabelingJobAlgorithmSpecificationArn': 'arn:aws:sagemaker:us-west-2:027400017018:labeling-job-algorithm-specification/image-classification',
'InitialActiveLearningModelArn': 'fake-model-arn',
'LabelingJobResourceConfig': {'VolumeKmsKeyId': ''}},
'HumanTaskConfig': {'WorkteamArn': 'arn:aws:sagemaker:us-west-2:394669845002:workteam/public-crowd/default',
'UiConfig': {'UiTemplateS3Uri': 's3://fake-bucket/ui_template'},
'PreHumanTaskLambdaArn': 'arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass',
'TaskKeywords': ['fake-keyword'],
'TaskTitle': 'fake-image-labelling-work',
'TaskDescription': 'fake job',
'NumberOfHumanWorkersPerDataObject': 1,
'TaskTimeLimitInSeconds': 180,
'TaskAvailabilityLifetimeInSeconds': 30,
'MaxConcurrentTaskCount': 10,
'AnnotationConsolidationConfig': {'AnnotationConsolidationLambdaArn': 'arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass'},
'PublicWorkforceTaskPrice': {'AmountInUsd': {'Dollars': 0, 'Cents': 0, 'TenthFractionsOfACent': 0}}},
'Tags': [{'Key': 'fake_key', 'Value': 'fake_value'}]}
)
| components/aws/sagemaker/tests/unit_tests/tests/test_ground_truth.py | 9,211 | Mock out all of utils except parser Set some static returns Check if correct requests were created and triggered Check the file outputs | 135 | en | 0.896853 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from functools import partial
import itertools
import operator
import re
from unittest import SkipTest
import textwrap
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import numpy.random as npr
import jax
from jax._src import api
from jax import core
from jax import lax
from jax import random
from jax import test_util as jtu
from jax import tree_util
from jax._src.util import unzip2
from jax.lib import xla_bridge
from jax.interpreters import xla
import jax.numpy as jnp # scan tests use numpy
import jax.scipy as jsp
from jax.config import config
config.parse_flags_with_absl()
# Some tests are useful for testing both lax.cond and lax.switch. This function
# provides a lax.cond-compatible interface to a two-branch lax.switch. Several
# tests in this file are parameterized such that they either call into lax.cond
# or into this function.
def cond_via_switch(pred, true_fun, false_fun, op, *args):
if len(args) > 0:
assert len(args) == 1
true_op, _true_fun, false_op, _false_fun = true_fun, false_fun, op, args[0]
op = (false_op, true_op)
false_fun = lambda op: _false_fun(op[0])
true_fun = lambda op: _true_fun(op[1])
index = lax.convert_element_type(pred, np.int32)
return lax.switch(index, [false_fun, true_fun], op)
COND_IMPLS = [
(lax.cond, 'cond'),
(cond_via_switch, 'switch'),
]
SCAN_IMPLS = [
(lax.scan, 'unroll1'),
(partial(lax.scan, unroll=2), 'unroll2'),
]
def while_loop_reference(cond, body, carry):
while cond(carry):
carry = body(carry)
return carry
def scan_reference(f, init, xs):
carry = init
ys = []
for x in xs:
(carry, y) = f(carry, x)
ys.append(lax.reshape(y, (1,) + np.shape(y)))
ys = lax.concatenate(ys, 0)
return carry, ys
def high_precision_dot(a, b):
return lax.dot(a, b, precision=lax.Precision.HIGHEST)
def posify(matrix):
return high_precision_dot(matrix, matrix.T.conj())
class LaxControlFlowTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
jax._src.lax.control_flow._initial_style_open_jaxpr.cache_clear()
jax._src.lax.control_flow._initial_style_jaxpr.cache_clear()
jax._src.lax.control_flow._initial_style_jaxprs_with_common_consts.cache_clear()
def testWhileWithTuple(self):
limit = 10
def loop_cond(state):
pos, _ = state
return lax.lt(pos, limit)
def loop_body(state):
pos, count = state
return (lax.add(pos, 1), lax.add(count, 1))
def loop(init):
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
self.assertEqual(loop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(3), limit - 3)
def testWhileWithManyArgs(self):
nargs = 256
def loop_cond(state):
return lax.lt(state[0], 2)
def loop_body(state):
return tuple(lax.add(s, 1) for s in state)
_ = lax.while_loop(loop_cond, loop_body, (0,) * nargs)
def testNestedWhile(self):
def outer_loop(num): # pylint: disable=missing-docstring
def cond_fun(state):
num, i, _ = state
return lax.lt(i, num)
def body_fun(state):
num, i, count = state
return (num, lax.add(i, 1), inner_loop(i, count))
init_val = (num, 0, 0)
_, i, count = lax.while_loop(cond_fun, body_fun, init_val)
return (i, count)
def inner_loop(i, count): # pylint: disable=missing-docstring
def cond_fun(state):
i, j, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, count = state
return (i, lax.add(j, 1), lax.add(count, 1))
init_val = (i, 0, count)
_, _, count = lax.while_loop(cond_fun, body_fun, init_val)
return count
cloop = api.jit(outer_loop)
self.assertEqual(outer_loop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(2), (2, 3))
self.assertEqual(cloop(4), (4, 10))
def testWhileWithClosure(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
return (lax.add(pos, 1), lax.add(count, inc))
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileWithClosureJit(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
f = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc))
return api.jit(f)(pos, inc)
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileTypeErrors(self):
"""Test typing error messages for while."""
tuple_treedef = tree_util.tree_structure((1., 1.))
leaf_treedef = tree_util.tree_structure(0.)
with self.assertRaisesRegex(TypeError,
re.escape(f"cond_fun must return a boolean scalar, but got pytree {tuple_treedef}.")):
lax.while_loop(lambda c: (1., 1.), lambda c: c, 0.)
with self.assertRaisesRegex(TypeError,
re.escape("cond_fun must return a boolean scalar, but got output type(s) [ShapedArray(float32[])].")):
lax.while_loop(lambda c: np.float32(1.), lambda c: c, np.float32(0.))
with self.assertRaisesRegex(TypeError,
re.escape("body_fun output and input must have same type structure, "
f"got {tuple_treedef} and {leaf_treedef}.")):
lax.while_loop(lambda c: True, lambda c: (1., 1.), 0.)
with self.assertRaisesWithLiteralMatch(TypeError,
("body_fun output and input must have identical types, got\n"
"ShapedArray(bool[], weak_type=True)\n"
"and\n"
"ShapedArray(float32[]).")):
lax.while_loop(lambda c: True, lambda c: True, np.float32(0.))
def testNestedWhileWithDynamicUpdateSlice(self):
num = 5
def update_entry(arr, val, i, j):
val = lax.reshape(val, [1, 1])
return lax.dynamic_update_slice(arr, val, (i, j))
def outer_loop(arr): # pylint: disable=missing-docstring
def cond_fun(state):
i, num, _, _ = state
return lax.lt(i, num)
def body_fun(state):
i, num, arr, out = state
return (lax.add(i, 1), num, arr, inner_loop(i, arr, out))
out = np.zeros(arr.shape, dtype=arr.dtype)
init_val = (0, num, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
def inner_loop(i, arr, out): # pylint: disable=missing-docstring
def cond_fun(state):
i, j, _, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, arr, out = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
arr_i_j = lax.dynamic_index_in_dim(arr_i, j, 0, False)
out = update_entry(out, arr_i_j, i, j)
return (i, lax.add(j, 1), arr, out)
init_val = (i, 0, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
cloop = api.jit(outer_loop)
arr = npr.RandomState(0).randn(5, 5)
self.assertAllClose(outer_loop(arr), np.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), np.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), np.tril(arr), check_dtypes=False)
def testLoopWithConjunctionCondition(self):
def sum_first_n(arr, num): # pylint: disable=missing-docstring
def cond_fun(state):
arr, num, i, _ = state
return lax.bitwise_and(lax.lt(i, num), lax.lt(i, arr.shape[0]))
def body_fun(state):
arr, num, i, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, num, lax.add(i, 1), lax.add(total, arr_i))
init_val = (arr, num, 0, 0.)
_, _, _, total = lax.while_loop(cond_fun, body_fun, init_val)
return total
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testWhileLoopBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < 3, lambda x: x + 2, x)
ans = api.vmap(fun)(np.array([0, 1, 2, 3]))
expected = np.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = api.jit(fun)
ans = api.vmap(fun)(np.array([0, 1, 2, 3]))
expected = np.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopAxisIndexBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < lax.axis_index('i'), lambda x: x + 2, x)
ans = api.vmap(fun, axis_name='i')(np.array([0, 0, 0, 0]))
expected = np.array([0, 2, 2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = api.jit(fun)
ans = api.vmap(fun, axis_name='i')(np.array([0, 0, 0, 0]))
expected = np.array([0, 2, 2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopCondConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < y, lambda x: x + 2, x)
ans = api.vmap(fun, in_axes=(None, 0))(0, np.array([2, 3]))
expected = np.array([2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopBodyConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < 3, lambda x: x + y, x)
ans = api.vmap(fun, in_axes=(None, 0))(0, jnp.array([2, 3]))
expected = np.array([4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopTupleBatched(self):
def cond_fun(loop_carry):
x, y = loop_carry
return x + y < 5
def body_fun(loop_carry):
x, y = loop_carry
x = x + 1
return x, y
def fun(x, y):
return lax.while_loop(cond_fun, body_fun, (x, y))
ans = api.vmap(fun)(np.array([0, 0]), np.array([1, 2]))
expected = (np.array([4, 3]), np.array([1, 2]))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_issue_3204(self):
# Error during XLA code generation for vmap of nested loops
def test(a, b):
val = 0
i = 0
j = 0
condfun_1 = lambda inp: inp[1] < a + 1
condfun_2 = lambda inp: inp[2] < b + 1
def bodyfun_1(inp):
val, i, j = inp
j = 0
def bodyfun_2(inp):
val, i, j = inp
val += i + j
j += 1
return (val, i, j)
result = lax.while_loop(condfun_2, bodyfun_2, (val, i, j))
val = result[0]
i += 1
return (val, i, j)
result = lax.while_loop(condfun_1, bodyfun_1, (val, i, j))
return result[0]
arr = np.arange(5)
vmap_test = api.vmap(test, (0, 0))
vmap_test(arr, arr)
def testForiLoopErrors(self):
"""Test typing error messages for while."""
with self.assertRaisesRegex(
TypeError, "arguments to fori_loop must have equal types"):
lax.fori_loop(np.int16(0), jnp.int32(10), (lambda i, c: c), jnp.float32(7))
def testForiLoopBatched(self):
def body_fun(i, loop_carry):
x, y = loop_carry
x = x + 1
y = y + 2
return x, y
def fun(x):
return lax.fori_loop(0, 10, body_fun, (x, 0))
ans = api.vmap(fun)(np.array([0, 1]))
expected = (np.array([10, 11]), np.array([20, 20]))
self.assertAllClose(ans, expected, check_dtypes=False)
def testForiLoopBatchedIssue1190(self):
cond_fun = lambda carry: carry[0] < 4
body_fun = lambda carry: (carry[0] + 1, carry[1] + 1)
f = lambda x: lax.while_loop(cond_fun, body_fun, (0, x))
jaxpr = api.make_jaxpr(api.vmap(f))(jnp.arange(3))
eqn = jaxpr.jaxpr.eqns[0]
self.assertIs(eqn.primitive, lax.while_p)
self.assertEqual(eqn.params['cond_jaxpr'].in_avals[0].shape, ())
def testForiLoopBasic(self):
def body_fun(i, tot):
return lax.add(tot, i)
def count(num):
return lax.fori_loop(0, num, body_fun, 0)
self.assertEqual(count(2), 1)
self.assertEqual(count(3), 3)
self.assertEqual(count(4), 6)
for args_maker in [lambda: [2], lambda: [3], lambda: [4]]:
self._CompileAndCheck(count, args_maker)
def testForiLoopClosure(self):
def count(num):
def body_fun(i, tot):
return lax.add(num, lax.add(tot, i))
return lax.fori_loop(0, num, body_fun, 0)
cfun = api.jit(count)
self.assertEqual(count(2), 1 + 2**2)
self.assertEqual(count(2), cfun(2))
self.assertEqual(count(3), 3 + 3**2)
self.assertEqual(count(3), cfun(3))
self.assertEqual(count(4), 6 + 4**2)
self.assertEqual(count(4), cfun(4))
def testForiLoopTupleState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i))
init_val = (arr, 0.)
_, total = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun,
init_val)
return total
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopDictState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state['arr'], state['total']
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return {'arr': arr, 'total': lax.add(total, arr_i)}
init_val = {'arr': arr, 'total': 0.}
out_val = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return out_val['total']
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopEmptyTupleInState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total, _ = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i), ())
init_val = (arr, 0., ())
_, tot, _ = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return tot
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testCond(self):
def fun(x):
if x < 3:
return (x, x)
else:
y = lax.mul(2, x)
return y, lax.mul(2, y)
@api.jit
def cfun(x):
def false_fun(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
return lax.cond(lax.lt(x, 3), lambda x: (x, x), false_fun, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(0), (0, 0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(1), (1, 1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(2), (2, 2))
self.assertEqual(fun(3), cfun(3))
self.assertEqual(fun(3), (6, 12))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(fun(4), (8, 16))
def testSwitch(self):
def branch(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
branches = [lambda x: (x, x),
branch,
lambda x: (x, -x)]
def fun(x):
if x <= 0:
return branches[0](x)
elif x == 1:
return branches[1](x)
else:
return branches[2](x)
def cfun(x):
return lax.switch(x, branches, x)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(3), cfun(3))
cfun = api.jit(cfun)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(3), cfun(3))
def testSwitchResidualsMerge(self):
def get_conds(fun):
jaxpr = api.make_jaxpr(api.grad(fun))(0., 0)
return [eqn for eqn in jaxpr.jaxpr.eqns if eqn.primitive.name == 'cond']
def branch_invars_len(cond_eqn):
lens = [len(jaxpr.jaxpr.invars) for jaxpr in cond_eqn.params['branches']]
assert len(set(lens)) == 1
return lens[0]
def branch_outvars_len(cond_eqn):
lens = [len(jaxpr.jaxpr.outvars) for jaxpr in cond_eqn.params['branches']]
assert len(set(lens)) == 1
return lens[0]
branches1 = [
lambda x: jnp.sin(x),
lambda x: jnp.cos(x)] # branch residuals overlap, should be reused
branches2 = branches1 + [
lambda x: jnp.sinh(x)] # another overlapping residual, expect reuse
branches3 = branches2 + [
lambda x: jnp.sin(x) + jnp.cos(x)] # requires one more residual slot
def fun1(x, i):
return lax.switch(i + 1, branches1, x)
def fun2(x, i):
return lax.switch(i + 1, branches2, x)
def fun3(x, i):
return lax.switch(i + 1, branches3, x)
fwd1, bwd1 = get_conds(fun1)
fwd2, bwd2 = get_conds(fun2)
fwd3, bwd3 = get_conds(fun3)
fwd1_num_out = branch_outvars_len(fwd1)
fwd2_num_out = branch_outvars_len(fwd2)
fwd3_num_out = branch_outvars_len(fwd3)
assert fwd1_num_out == fwd2_num_out
assert fwd3_num_out == fwd2_num_out + 1
bwd1_num_in = branch_invars_len(bwd1)
bwd2_num_in = branch_invars_len(bwd2)
bwd3_num_in = branch_invars_len(bwd3)
assert bwd1_num_in == bwd2_num_in
assert bwd3_num_in == bwd2_num_in + 1
def testOneBranchSwitch(self):
branch = lambda x: -x
f = lambda i, x: lax.switch(i, [branch], x)
x = 7.
self.assertEqual(f(-1, x), branch(x))
self.assertEqual(f(0, x), branch(x))
self.assertEqual(f(1, x), branch(x))
cf = api.jit(f)
self.assertEqual(cf(-1, x), branch(x))
self.assertEqual(cf(0, x), branch(x))
self.assertEqual(cf(1, x), branch(x))
cf = api.jit(f, static_argnums=0)
self.assertEqual(cf(-1, x), branch(x))
self.assertEqual(cf(0, x), branch(x))
self.assertEqual(cf(1, x), branch(x))
def testIssue1379(self):
def fun(pred):
return lax.cond(pred, lambda x: (True, x), lambda x: (False, x), pred)
@api.jit
def cfun(pred):
return fun(pred)
self.assertEqual(fun(0), cfun(0), (False,0))
self.assertEqual(fun(0.), cfun(0.), (False,0.))
self.assertEqual(fun(1), cfun(1), (True,1))
self.assertEqual(fun(1.), cfun(1.), (True,1.))
# test that proper errors are raised for wrong types
for pred in ["abc", [], [1,2]]:
for f in [fun, cfun]:
self.assertRaises(TypeError, f, pred)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testNestedCond(self, cond):
def fun(x):
if x < 2:
return lax.mul(2, x)
else:
if x < 5:
return lax.mul(3, x)
else:
return lax.mul(4, x)
@api.jit
def cfun(x):
return cond(
lax.lt(x, 2),
lambda x: lax.mul(2, x),
lambda x: cond(lax.lt(x, 5),
x, lambda x: lax.mul(3, x),
4, lambda y: lax.mul(y, x)),
x)
self.assertEqual(cfun(1), 2)
self.assertEqual(cfun(3), 9)
self.assertEqual(cfun(6), 24)
self.assertEqual(cfun(1), fun(1))
self.assertEqual(cfun(3), fun(3))
self.assertEqual(cfun(6), fun(6))
def testCondTypeErrors(self):
"""Test typing error messages for cond."""
with self.assertRaisesRegex(TypeError,
re.escape("Pred type must be either boolean or number, got <function")):
lax.cond(lambda x: True, lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got foo of type <class 'str'>")):
lax.cond("foo", lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got (1.0, 1.0) of type <class 'tuple'>")):
lax.cond((1., 1.), lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("true_fun and false_fun output must have same type structure, "
f"got {tree_util.tree_structure(2.)} and {tree_util.tree_structure((3., 3.))}.")):
lax.cond(True, lambda top: 2., lambda fop: (3., 3.), 1.)
with self.assertRaisesRegex(
TypeError, textwrap.dedent(
r"""
true_fun and false_fun output must have identical types, got
ShapedArray\(float32\[1\]\)
and
ShapedArray\(float32\[\].*\).""").strip()):
lax.cond(True,
lambda top: jnp.array([1.], jnp.float32),
lambda fop: jnp.float32(1.),
1.)
def testSwitchErrors(self):
"""Test typing error messages for switch."""
with self.assertRaisesRegex(TypeError,
re.escape("Index type must be an integer, got <function")):
lax.switch(lambda x: True, [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Index type must be an integer, got foo.")):
lax.switch("foo", [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Branch index must be scalar, got (1.0, 1.0) of shape (2,).")):
lax.switch((1., 1.), [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(ValueError,
re.escape("Empty branch sequence")):
lax.switch(0, [], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("branch 0 and 1 outputs must have same type structure, "
f"got {tree_util.tree_structure(2.)} and {tree_util.tree_structure((3., 3.))}.")):
lax.switch(1, [lambda _: 2., lambda _: (3., 3.)], 1.)
with self.assertRaisesRegex(
TypeError, textwrap.dedent(
r"""
branch 0 and 1 outputs must have identical types, got
ShapedArray\(float32\[1\]\)
and
ShapedArray\(float32\[\].*\).""").strip()):
lax.switch(1, [lambda _: jnp.array([1.], jnp.float32),
lambda _: jnp.float32(1.)],
1.)
def testCondOneBranchConstant(self):
def fun(x):
if x < 3:
return 5.
else:
return x
@api.jit
def cfun(x):
return lax.cond(lax.lt(x, 3), lambda x: 5, lambda x: x, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), 5)
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), 4)
def testCondOneBranchConstantTuple(self):
def fun(x):
if x < 3:
return (1., 2., 3.)
else:
return (x, 2., 4.)
@api.jit
def cfun(x):
return lax.cond(lax.lt(x, 3),
lambda x: (1, 2., 3.),
lambda x: (x, 2., 4.),
x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), (1, 2., 3.))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), (4, 2., 4.))
def testCondBatched(self):
def fun(x, y, z):
pred = lax.lt(x, 3)
true_fun = lambda y: y
false_fun = lambda z: lax.neg(z)
return lax.cond(pred, y, true_fun, z, false_fun)
# these cases stay as cond
x = jnp.array(2)
y = jnp.array([1, 2])
z = jnp.array([3, 4])
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z)
expected = np.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array(4)
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = api.jit(fun)
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = jnp.array(5)
ans = api.vmap(fun, (None, 0, None))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, None)))(x, y, z)
expected = np.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
# these cases become select
x = jnp.array([2, 4])
ans = api.vmap(fun, (0, 0, None))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (0, 0, None)))(x, y, z)
expected = np.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = jnp.array([3, 4])
ans = api.vmap(fun)(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun))(x, y, z)
expected = np.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testSwitchBatched(self):
def fun(index, x, y, z):
branches = [lambda xyz: xyz[0],
lambda xyz: lax.neg(xyz[1]),
lambda xyz: lax.sign(xyz[2])]
return lax.switch(index, branches, (x, y, z))
# these cases stay as cond
x = jnp.array(0)
y = jnp.array([1, 2])
z = jnp.array([3, 4])
w = jnp.array(9)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0, None)))(x, y, z, w)
expected = np.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array(1)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0, None)))(x, y, z, w)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = api.jit(fun)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = jnp.array(5)
ans = api.vmap(fun, (None, 0, None, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, None, None)))(x, y, z, w)
expected = np.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
# these cases become select
x = jnp.array([0, 1])
ans = api.vmap(fun, (0, 0, None, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (0, 0, None, None)))(x, y, z, w)
expected = np.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = jnp.array([3, 4])
w = jnp.array([9, 9])
ans = api.vmap(fun)(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun))(x, y, z, w)
expected = np.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testCondJVP(self):
def fun_ref(x):
if x < 3:
return (x, x)
else:
y = 2 * x
return y, 2 * y
def fun(x):
def false_fun(x):
y = 2 * x
return y, 2 * y
return lax.cond(x < 3, lambda x: (x, x), false_fun, x)
x = 3.14
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
x = 2.72
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
def testSwitchJVP(self):
def branch(x):
y = 2 * x
return y, 2 * y
branches = [lambda x: (x, x),
branch,
lambda x: (x, -x)]
def fun_ref(x):
idx = x // 1
if idx <= 0:
return branches[0](x)
elif idx == 1:
return branches[1](x)
else:
return branches[2](x)
def fun(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-0.7, 0.7, 1.7, 2.7, 3.7]:
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJVP2(self, cond):
def fun_ref(x):
if x < 3:
return 2.
else:
return 2. * x
def fun(x):
return cond(x < 3, (), lambda _: 2., x, lambda x: 2. * x)
x = 3.14
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
x = 2.72
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
def testCondGrad(self):
def f_ref(x):
return 3. * x if x < 2 else jnp.sin(x)
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
x = 2.14
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
x = 1.72
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
def testCondGradVmapNan(self):
eps = 1e-3
def safe1(x):
return lax.cond(x < eps, lambda _: eps, lambda _: jnp.sqrt(x), ())
out = api.grad(lambda x: api.vmap(safe1)(x).sum())(np.zeros(10))
self.assertFalse(np.isnan(out).any())
def testSwitchGrad(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f_ref(x):
idx = x // 1
if idx <= 0:
return branches[0](x)
elif idx == 1:
return branches[1](x)
else:
return branches[2](x)
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-0.7, 0.7, 1.7, 2.7, 3.7]:
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
def testSwitchGradWithWeakTypeMismatch(self): # issue #4696, PR #4896
dtype = jnp.ones(1).dtype
dtype = jnp.float32 if dtype == jnp.float32 else jnp.float64
branches = [
lambda x: x, # This preserves the weak type of x.
lambda x: x + dtype(1), # This strips the weak type of x.
]
def f_ref(x):
i = x.astype(jnp.int32)
return branches[i](x)
def f(x):
return lax.switch(x.astype(jnp.int32), branches, x)
for x in [0., 1.]:
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad2(self, cond):
def f_ref(x):
z = jnp.array([1., 2.]) * x if x[0] < 2 else jnp.sin(x)
return z.sum()
def _f(x):
return cond(
x[0] < 2,
lambda x: jnp.array([1., 2.]) * x,
lambda x: jnp.sin(x),
x)
f = lambda x: api.jit(_f)(x).sum()
x = 2.14 * jnp.ones(2)
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
x = 1.72 * jnp.ones(2)
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"],
rtol={jnp.float32: 1e-2, jnp.float64: 2e-3})
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad3(self, cond):
def fun_ref(x):
if x < 3:
return 2.
else:
return 2. * x
def fun(x):
return cond(x < 3, (), lambda _: 2., x, lambda x: 2. * x)
x = 3.14
ans = api.grad(fun)(x)
expected = api.grad(fun_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd", "rev"])
x = 2.72
ans = api.grad(fun)(x)
expected = api.grad(fun_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd", "rev"])
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad4(self, cond):
def fun_ref(x, y):
if x < 3:
return 2. * jnp.sin(y)
else:
return 2. * jnp.cos(x)
def fun(x, y):
return cond(
x < 3,
(), lambda _: 2. * jnp.sin(y),
x, lambda x: 2. * x)
y = 5.8
x = 3.14
ans = api.grad(fun, 1)(x, y)
expected = api.grad(fun_ref, 1)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x, y), order=2, modes=["fwd", "rev"])
x = 2.72
ans = api.grad(fun, 1)(x, y)
expected = api.grad(fun_ref, 1)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x, y), order=2, modes=["fwd", "rev"])
def testCondLinearize(self):
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
y, f_lin = api.linearize(f, 1.)
self.assertAllClose(y, 3., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = api.linearize(f, 4.)
self.assertAllClose(y, jnp.sin(4.), check_dtypes=False)
self.assertAllClose(f_lin(2.), jnp.cos(4.) * 2., check_dtypes=False)
def testSwitchLinearize(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
# branch 0
y, f_lin = api.linearize(f, -1.)
self.assertAllClose(y, -3., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = api.linearize(f, 0.)
self.assertAllClose(y, 0., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
# branch 1
y, f_lin = api.linearize(f, 1.)
self.assertAllClose(y, jnp.sin(1.), check_dtypes=False)
self.assertAllClose(f_lin(2.), jnp.cos(1.) * 2., check_dtypes=False)
# branch 2
y, f_lin = api.linearize(f, 2.)
self.assertAllClose(y, -2., check_dtypes=False)
self.assertAllClose(f_lin(2.), -2., check_dtypes=False)
y, f_lin = api.linearize(f, 3.)
self.assertAllClose(y, -3., check_dtypes=False)
self.assertAllClose(f_lin(2.), -2., check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondLinearize2(self, cond):
def f_ref(x):
z = jnp.array([1., 2.]) * x if x[0] < 2 else jnp.cos(jnp.sin(x))
return z.sum()
def f(x):
return cond(
x[0] < 2,
lambda x: jnp.array([1., 2.]) * x,
lambda x: jnp.cos(jnp.sin(x)),
x).sum()
x = 2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
x = -2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
f = api.jit(f)
x = 2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
def testCondJit(self):
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = api.jit(f)(4.)
expected = f(4.)
self.assertAllClose(y, expected, check_dtypes=False)
def testSwitchJit(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-1., 0., 1., 2., 3.]:
y = api.jit(f)(x)
expected = f(x)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJitDisabled(self, cond):
def f_ref(x):
return 3. * x if x < 2 else jnp.sin(x)
def f(x):
return cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
with api.disable_jit():
y = f(1.)
expected = f_ref(1.)
self.assertAllClose(y, expected, check_dtypes=False)
with api.disable_jit():
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondWithConsts(self, cond):
def f(x):
return cond(x < 2,
lambda x: np.array([1., 2.]) * x,
lambda x: np.array([3., 4.]) * jnp.sin(x),
x)
def f_ref(x):
if x < 2:
return np.array([1., 2.]) * x
else:
return np.array([3., 4.]) * np.sin(x)
y = f(1.)
expected = f_ref(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = f(4.)
expected = f_ref(4.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJitWithConsts(self, cond):
def f(x):
return cond(x < 2,
lambda x: np.array([1., 2.]) * x,
lambda x: np.array([3., 4.]) * jnp.sin(x),
x)
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = api.jit(f)(4.)
expected = f(4.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondVmapGrad(self, cond):
# https://github.com/google/jax/issues/2264
def f_1(x): return x ** 2
def f_2(x): return x ** 3
def f(x): return cond(x > 0, f_1, f_2, x)
def g(x): return jnp.where(x > 0, f_1(x), f_2(x))
x = jnp.linspace(-1, 1, 20)
ans = api.vmap(api.grad(f))(x)
expected = api.vmap(api.grad(g))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testIssue1263(self):
def f(rng, x):
cond = random.bernoulli(rng)
return lax.cond(cond, x, lambda x: x, jnp.abs(x) - 1., lambda x: x)
def body_fn(i, state):
rng, x = state
key, subkey = random.split(rng)
return key, f(subkey, x)
def g(rng, x):
return lax.fori_loop(0, 10, body_fn, (rng, x))
api.vmap(g)(random.split(random.PRNGKey(0), 3), jnp.ones((3, 4)))
def testIssue514(self):
# just check this doesn't crash
lax.cond(True,
(0, 0), lambda x: (x[0], 0),
(1, 1), lambda x: x)
def testIssue649(self):
from jax import lax
def body(x):
a, b = x
return (7, b + 1)
def cond(x):
a, b = x
return b < 10
out = lax.while_loop(cond, body, (33, 4))
self.assertEqual(out, (7, 10))
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanImpl(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = scan(f, c, as_)
expected = scan_reference(f, c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanJVP(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.jvp( lambda c, as_: scan(f, c, as_), (c, as_), (c, as_))
expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float64: 1e-14, np.float32: 1e-5})
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanLinearize(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.linearize(lambda c, as_: scan(f, c, as_), c, as_)[1](c, as_)
expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float64: 1e-14})
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testScanGrad(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.sum(jnp.sin(a)) + jnp.sum(jnp.sin(c)) + jnp.sum(jnp.sin(d))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.grad(lambda c, as_: list( scan(f, c, as_))[0].sum())(c, as_)
expected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float32: 2e-5, np.float64: 1e-13})
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["rev"],
atol=1e-3, rtol=5e-3)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testScanRnn(self):
r = npr.RandomState(0)
n_in = 4
n_hid = 2
n_out = 1
length = 3
W_trans = r.randn(n_hid, n_hid + n_in).astype(jnp.float_)
W_out = r.randn(n_out, n_hid + n_in).astype(jnp.float_)
params = W_trans, W_out
inputs = r.randn(length, n_in).astype(jnp.float_)
targets = r.randn(length, n_out).astype(jnp.float_)
def step(params, state, input):
W_trans, W_out = params
stacked = jnp.concatenate([state, input])
output = jnp.tanh(jnp.dot(W_out, stacked))
next_state = jnp.tanh(jnp.dot(W_trans, stacked))
return next_state, output
def rnn(params, inputs):
init_state = jnp.zeros(n_hid)
_, outputs = lax.scan(partial(step, params), init_state, inputs)
return outputs
def loss(params, inputs, targets):
predictions = rnn(params, inputs)
return jnp.sum((predictions - targets)**2)
# evaluation doesn't crash
loss(params, inputs, targets)
# jvp evaluation doesn't crash
api.jvp(lambda params: loss(params, inputs, targets), (params,), (params,))
# jvp numerical check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, modes=["fwd"],
rtol={np.float32: 2e-2, np.float64: 1e-6})
# linearize works
_, expected = api.jvp(loss, (params, inputs, targets),
(params, inputs, targets))
_, linfun = api.linearize(loss, params, inputs, targets)
ans = linfun(params, inputs, targets)
self.assertAllClose(ans, expected, check_dtypes=False)
# gradient evaluation doesn't crash
api.grad(loss)(params, inputs, targets)
# gradient check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, rtol=2e-2)
# we can vmap to batch things
batch_size = 7
batched_inputs = r.randn(batch_size, length, n_in).astype(jnp.float_)
batched_targets = r.randn(batch_size, length, n_out).astype(jnp.float_)
batched_loss = api.vmap(lambda x, y: loss(params, x, y))
losses = batched_loss(batched_inputs, batched_targets)
expected = np.stack(list(map(lambda x, y: loss(params, x, y),
batched_inputs, batched_targets)))
self.assertAllClose(losses, expected, check_dtypes=False, rtol=1e-2)
def testIssue711(self):
# Tests reverse-mode differentiation through a scan for which the scanned
# function also involves reverse-mode differentiation.
# See https://github.com/google/jax/issues/711
def harmonic_bond(conf, params):
return jnp.sum(conf * params)
def minimize_structure(test_params):
energy_fn = partial(harmonic_bond, params=test_params)
def apply_carry(carry, _):
i, x = carry
new_x = x - 0.1 * api.grad(energy_fn)(x)
new_carry = (i+1, new_x)
return new_carry, _
x0 = jnp.array([1., 2., 3.])
carry_final, _ = lax.scan(apply_carry, (0, x0), jnp.zeros((75, 0)))
_, x_final = carry_final
return x_final
initial_params = 0.5
minimize_structure(initial_params) # doesn't crash
def loss(test_params):
x_final = minimize_structure(test_params)
return jnp.sum(jnp.sin(1.0 - x_final))
api.grad(loss)(0.25) # doesn't crash
def testIssue744(self):
Point = collections.namedtuple('Point', ['x', 'y'])
p0 = Point(x=jnp.array(1), y=jnp.array(2))
def plus_one(p, iter_idx):
return Point(p.x+1, p.y+1), iter_idx
self.assertRaisesRegex(
ValueError,
'scan got value with no leading axis to scan over.*',
lambda: lax.scan(plus_one, p0, list(range(5))))
def testScanTypeErrors(self):
"""Test typing error messages for scan."""
a = jnp.arange(5)
# Body output not a tuple
with self.assertRaisesRegex(TypeError,
re.escape("scan body output must be a pair, got ShapedArray(float32[]).")):
lax.scan(lambda c, x: np.float32(0.), 0, a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure((0, 0, 0,))} "
f"and {tree_util.tree_structure((1, (2, 3)))}")):
lax.scan(lambda c, x: ((0, 0, 0), x), (1, (2, 3)), a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure(a)} and {tree_util.tree_structure(None)}.")):
lax.scan(lambda c, x: (0, x), None, a)
with self.assertRaisesWithLiteralMatch(
TypeError,
"scan carry output and input must have identical types, got\n"
"ShapedArray(int32[])\n"
"and\n"
"ShapedArray(float32[])."):
lax.scan(lambda c, x: (np.int32(0), x), np.float32(1.0), a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure(a)} and {tree_util.tree_structure((1, 2))}.")):
lax.scan(lambda c, x: (0, x), (1, 2), a)
@parameterized.named_parameters(
{"testcase_name": "_{}".format(scan_name),
"scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS)
def testScanHigherOrderDifferentiation(self, scan):
d = 0.75
def f(c, a):
b = jnp.sin(c * jnp.sum(jnp.cos(d * a)))
c = 0.9 * jnp.cos(d * jnp.sum(jnp.sin(c * a)))
return c, b
as_ = jnp.arange(6.).reshape((3, 2))
c = 1.
jtu.check_grads(lambda c, as_: scan(f, c, as_), (c, as_),
modes=["rev"], order=2, rtol={np.float32: 6e-3})
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_in_axes={}_impl={}".format(
jit_scan, jit_f, in_axes, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "in_axes": in_axes,
"scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS
for in_axes in itertools.product([None, 0, 1], [None, 0, 1, 2])
if in_axes != (None, None))
def testScanVmap(self, jit_scan, jit_f, in_axes, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_shape = [5, 3]
c_shape = [4]
c_bdim, as_bdim = in_axes
if c_bdim is not None:
c_shape.insert(c_bdim, 7)
if as_bdim is not None:
as_shape.insert(as_bdim, 7)
as_ = rng.randn(*as_shape)
c = rng.randn(*c_shape)
ans = api.vmap(lambda c, as_: scan(f, c, as_), in_axes)(c, as_)
expected = api.vmap(lambda c, as_: scan_reference(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol=1e-5, atol=1e-5)
def testScanVmapTuples(self):
def f(c, a):
a1, a2 = a
c1, c2 = c
b = jnp.sum(jnp.cos(a1)) * jnp.sum(jnp.tan(c2 * a2))
c = c1 * jnp.sin(jnp.sum(a1 * a2)), c2 * jnp.cos(jnp.sum(a1))
return c, b
in_axes = (0, (1, 2))
r = np.random.RandomState(0)
as_ = (r.randn(3, 7), r.randn(3, 4, 7))
c = (r.randn(7, 2), r.randn(7))
expected_c_out, expected_bs = [], []
for i in range(7):
c_out, bs = lax.scan(f, (c[0][i], c[1][i]), (as_[0][:,i], as_[1][:,:,i]))
expected_c_out.append(c_out)
expected_bs.append(bs)
expected_c_out_0, expected_c_out_1 = unzip2(expected_c_out)
expected_c_out = (jnp.stack(expected_c_out_0), jnp.stack(expected_c_out_1))
expected_bs = jnp.stack(expected_bs)
expected = expected_c_out, expected_bs
ans = api.vmap(lambda c, as_: lax.scan(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
def testScanVmapFixpoint(self):
def f(carry_init):
def scan_body(c, x):
# The carry is a 4-tuple, the last element starts batched,
# and the carry is shifted left at each iteration.
return ((c[1], c[2], c[3], 0.), None)
return lax.scan(scan_body, (0., 1., 2., carry_init), jnp.zeros(2))
carry_init = jnp.array([3., 4., 5.])
carry_out, _ = api.vmap(f)(carry_init)
self.assertAllClose(carry_out[3], jnp.array([0., 0., 0.]), check_dtypes=False)
self.assertAllClose(carry_out[2], jnp.array([0., 0., 0.]), check_dtypes = False)
# After two shifts, we get the carry_init
self.assertAllClose(carry_out[1], carry_init, check_dtypes=False)
self.assertAllClose(carry_out[0], jnp.array([2., 2., 2.]), check_dtypes = False)
def testIssue757(self):
# code from https://github.com/google/jax/issues/757
def fn(a):
return jnp.cos(a)
def loop(val):
iterations = 10
def apply_carry(x, i):
return api.grad(fn, argnums=(0,))(x)[0], i
final_val, _ = lax.scan(
apply_carry,
val,
jnp.arange(iterations)
)
return final_val
arg = 0.5
api.jit(api.jacfwd(loop, argnums=(0,)))(arg) # doesn't crash
def testIssue804(self):
num_devices = xla_bridge.device_count()
f = partial(lax.scan, lambda c, x: (c + lax.psum(x, "i") , c), 0.)
api.pmap(f, axis_name="i")(jnp.ones((num_devices, 4))) # doesn't crash
def testMap(self):
f = lambda x: x ** 2
xs = jnp.arange(10)
expected = xs ** 2
actual = lax.map(f, xs)
self.assertAllClose(actual, expected)
def testMapEmpty(self):
# https://github.com/google/jax/issues/2412
ans = lax.map(lambda x: x * x, jnp.array([]))
expected = jnp.array([])
self.assertAllClose(ans, expected)
def testCaching(self):
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def testCaching2(self):
# This second caching test shows a different kind of caching that we haven't
# implemented (but could!), namely that Python functions that are distinct
# objects but are equivalent functions trigger cache hits. This kind of
# caching could be salient when using lambda functions with control flow:
#
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
#
# To get a cache hit on the second line we'd need to form a jaxpr and
# compare them for equality (including the literals on identity). We could
# implement that by adding a __hash__/__eq__ to core.Jaxpr and
# core.ClosedJaxpr (see #1221).
raise SkipTest("not implemented")
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def testWhileCondConstant(self):
out = lax.while_loop(lambda _: False, lambda _: (), ()) # doesn't crash
self.assertEqual(out, ())
@parameterized.named_parameters(
{"testcase_name": "_jit_loop={}_jit_body={}_jit_cond={}".format(
jit_loop, jit_body, jit_cond),
"jit_loop": jit_loop, "jit_body": jit_body, "jit_cond": jit_cond}
for jit_loop in [False, True]
for jit_body in [False, True]
for jit_cond in [False, True])
def testWhileJVP(self, jit_loop=True, jit_body=False, jit_cond=True):
cond = lambda x: x[0, 2] <= 8
body = lambda x: x * x
if jit_cond:
cond = api.jit(cond)
if jit_body:
body = api.jit(body)
loop = partial(lax.while_loop, cond, body)
if jit_loop:
loop = api.jit(loop)
loop_ref = partial(while_loop_reference, cond, body)
x = jnp.arange(9.).reshape((3, 3))
ans = api.jvp(loop, (x,), (x,))
expected = api.jvp(loop_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(loop, (x,), order=2, modes=["fwd"])
def testWhileJVPViaForiLoop(self):
f = lambda x: lax.fori_loop(0, 3, lambda i, x: x * 2, x)
self.assertAllClose(f(2.), 16., check_dtypes=False)
self.assertAllClose(api.jvp(f, (2.,), (1.,)), (16., 8.), check_dtypes=False)
jtu.check_grads(f, (2.,), order=2, modes=["fwd"])
f = lambda x: lax.fori_loop(0, 3, lambda i, x: x * (i + 1), x)
self.assertAllClose(f(2.), 12., check_dtypes=False)
self.assertAllClose(api.jvp(f, (2.,), (1.,)), (12., 6.), check_dtypes=False)
jtu.check_grads(f, (2.,), order=2, modes=["fwd"])
def testWhileJVPWithGrowingNonzeroTangents(self):
rng = np.random.RandomState(0)
def cond(state):
i, x, y, z = state
return i < 2
def body(state):
i, x, y, z = state
y = x * x
z = y * y
return i + 1, x, y, z
y, z = rng.randn(2), rng.randn(2)
def loop(loop_impl, x):
return loop_impl(cond, body, (0, x, y, z))[1]
loop_lax = partial(loop, lax.while_loop)
loop_ref = partial(loop, while_loop_reference)
x = rng.randn(2)
ans = api.jvp(loop_lax, (x,), (x,))
expected = api.jvp(loop_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(loop_lax, (x,), order=2, modes=["fwd"])
@parameterized.named_parameters(
dict(testcase_name="_loop={}".format(loop), loop=loop)
for loop in ["while", "fori", "fori_inside_cond", "fori_inside_scan"])
def testWhileGradError(self, loop: str = "fori_inside_scan"):
# Raise error for vjp for loops
if loop == "while":
func = lambda x: lax.while_loop(lambda i: i < 5., lambda i: i + 1., x)
elif loop == "fori":
func = lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x)
elif loop == "fori_inside_jit":
func = api.jit(lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x))
elif loop == "fori_inside_cond":
func = lambda x: lax.cond(True, x,
lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x),
1., lambda x: x)
elif loop == "fori_inside_scan":
func = lambda x: lax.scan(lambda c, x: (lax.fori_loop(x, x + 2., lambda i, c1: c1 * c, x),
None),
x, np.ones(2))[0]
else:
assert False
with self.assertRaisesRegex(ValueError, "Reverse-mode differentiation does not work for lax.while_loop"):
api.grad(func)(1.)
api.linearize(func, 1.) # Linearization works
def testIssue1316(self):
def f(carry, _):
c, key = carry
key, _ = random.split(key)
return (c, key), ()
key = random.PRNGKey(0)
api.grad(lambda c: lax.scan(f, (c, key), np.ones(3))[0][0])(0.) # doesn't crash
def testIssue1361(self):
@api.jit
def jit_run_scan(x):
def fun(carry, _):
x, _ = carry
return (2 * x, 0.), None
(x, _), _ = lax.scan(fun, (x, 0.), jnp.arange(3))
return x
api.grad(lambda x: jit_run_scan(x))(0.) # doesn't crash
def test_custom_root_scalar(self):
def scalar_solve(f, y):
return y / f(1.0)
def binary_search(func, x0, low=0.0, high=100.0):
del x0 # unused
def cond(state):
low, high = state
midpoint = 0.5 * (low + high)
return (low < midpoint) & (midpoint < high)
def body(state):
low, high = state
midpoint = 0.5 * (low + high)
update_upper = func(midpoint) > 0
low = jnp.where(update_upper, low, midpoint)
high = jnp.where(update_upper, midpoint, high)
return (low, high)
solution, _ = lax.while_loop(cond, body, (low, high))
return solution
def sqrt_cubed(x, tangent_solve=scalar_solve):
f = lambda y: y ** 2 - x ** 3
return lax.custom_root(f, 0.0, binary_search, tangent_solve)
value, grad = api.value_and_grad(sqrt_cubed)(5.0)
self.assertAllClose(value, 5 ** 1.5, check_dtypes=False, rtol=1e-6)
self.assertAllClose(grad, api.grad(pow)(5.0, 1.5), check_dtypes=False,
rtol=1e-7)
jtu.check_grads(sqrt_cubed, (5.0,), order=2,
rtol={jnp.float32: 1e-2, jnp.float64: 1e-3})
inputs = jnp.array([4.0, 5.0])
results = api.vmap(sqrt_cubed)(inputs)
self.assertAllClose(results, inputs ** 1.5, check_dtypes=False)
results = api.jit(sqrt_cubed)(5.0)
self.assertAllClose(results, 5.0 ** 1.5, check_dtypes=False,
rtol={np.float64:1e-7})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_root_vector_with_solve_closure(self):
def vector_solve(f, y):
return jnp.linalg.solve(api.jacobian(f)(y), y)
def linear_solve(a, b):
f = lambda y: high_precision_dot(a, y) - b
x0 = jnp.zeros_like(b)
solution = jnp.linalg.solve(a, b)
oracle = lambda func, x0: solution
return lax.custom_root(f, x0, oracle, vector_solve)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
jtu.check_grads(linear_solve, (a, b), order=2,
atol={np.float32: 1e-2, np.float64: 1e-11})
actual = api.jit(linear_solve)(a, b)
expected = jnp.linalg.solve(a, b)
self.assertAllClose(expected, actual)
def test_custom_root_with_custom_linear_solve(self):
def linear_solve(a, b):
f = lambda x: high_precision_dot(a, x) - b
factors = jsp.linalg.cho_factor(a)
cho_solve = lambda f, b: jsp.linalg.cho_solve(factors, b)
def pos_def_solve(g, b):
return lax.custom_linear_solve(g, b, cho_solve, symmetric=True)
return lax.custom_root(f, b, cho_solve, pos_def_solve)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
actual = linear_solve(high_precision_dot(a, a.T), b)
expected = jnp.linalg.solve(high_precision_dot(a, a.T), b)
self.assertAllClose(expected, actual)
actual = api.jit(linear_solve)(high_precision_dot(a, a.T), b)
expected = jnp.linalg.solve(high_precision_dot(a, a.T), b)
self.assertAllClose(expected, actual)
jtu.check_grads(lambda x, y: linear_solve(high_precision_dot(x, x.T), y),
(a, b), order=2, rtol={jnp.float32: 1e-2})
def test_custom_root_errors(self):
with self.assertRaisesRegex(TypeError, re.escape("f() output pytree")):
lax.custom_root(lambda x: (x, x), 0.0, lambda f, x: x, lambda f, x: x)
with self.assertRaisesRegex(TypeError, re.escape("solve() output pytree")):
lax.custom_root(lambda x: x, 0.0, lambda f, x: (x, x), lambda f, x: x)
def dummy_root_usage(x):
f = lambda y: x - y
return lax.custom_root(f, 0.0, lambda f, x: x, lambda f, x: (x, x))
with self.assertRaisesRegex(
TypeError, re.escape("tangent_solve() output pytree")):
api.jvp(dummy_root_usage, (0.0,), (0.0,))
@parameterized.named_parameters(
{"testcase_name": "nonsymmetric", "symmetric": False},
{"testcase_name": "symmetric", "symmetric": True},
)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve(self, symmetric):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(
matvec, b, explicit_jacobian_solve, explicit_jacobian_solve,
symmetric=symmetric)
def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
if symmetric:
a = a + a.T
b = rng.randn(3)
jtu.check_grads(linear_solve, (a, b), order=2, rtol=2e-3)
expected = jnp.linalg.solve(a, b)
actual = api.jit(linear_solve)(a, b)
self.assertAllClose(expected, actual)
c = rng.randn(3, 2)
expected = jnp.linalg.solve(a, c)
actual = api.vmap(linear_solve, (None, 1), 1)(a, c)
self.assertAllClose(expected, actual)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_zeros(self):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(matvec, b, explicit_jacobian_solve,
explicit_jacobian_solve)
def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
b = rng.randn(3)
jtu.check_grads(lambda x: linear_solve(x, b), (a,), order=2,
rtol={np.float32: 5e-3})
jtu.check_grads(lambda x: linear_solve(a, x), (b,), order=2,
rtol={np.float32: 5e-3})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_iterative(self):
def richardson_iteration(matvec, b, omega=0.1, tolerance=1e-6):
# Equivalent to vanilla gradient descent:
# https://en.wikipedia.org/wiki/Modified_Richardson_iteration
def cond(x):
return jnp.linalg.norm(matvec(x) - b) > tolerance
def body(x):
return x + omega * (b - matvec(x))
return lax.while_loop(cond, body, b)
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(matvec, b, richardson_iteration,
richardson_iteration)
def build_and_solve(a, b):
# intentionally non-linear in a and b
matvec = partial(high_precision_dot, jnp.exp(a))
return matrix_free_solve(matvec, jnp.cos(b))
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
expected = jnp.linalg.solve(jnp.exp(a), jnp.cos(b))
actual = build_and_solve(a, b)
self.assertAllClose(expected, actual, atol=1e-5)
jtu.check_grads(build_and_solve, (a, b), atol=1e-5, order=2,
rtol={jnp.float32: 6e-2, jnp.float64: 2e-3})
# vmap across an empty dimension
jtu.check_grads(
api.vmap(build_and_solve), (a[None, :, :], b[None, :]),
atol=1e-5,
order=2,
rtol={jnp.float32: 6e-2, jnp.float64: 2e-3})
def test_custom_linear_solve_cholesky(self):
def positive_definite_solve(a, b):
factors = jsp.linalg.cho_factor(a)
def solve(matvec, x):
return jsp.linalg.cho_solve(factors, x)
matvec = partial(high_precision_dot, a)
return lax.custom_linear_solve(matvec, b, solve, symmetric=True)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
expected = jnp.linalg.solve(np.asarray(posify(a)), b)
actual = positive_definite_solve(posify(a), b)
self.assertAllClose(expected, actual)
actual = api.jit(positive_definite_solve)(posify(a), b)
self.assertAllClose(expected, actual)
# numerical gradients are only well defined if ``a`` is guaranteed to be
# positive definite.
jtu.check_grads(
lambda x, y: positive_definite_solve(posify(x), y),
(a, b), order=2, rtol=1e-2)
def test_custom_linear_solve_complex(self):
def solve(a, b):
def solve(matvec, x):
return jsp.linalg.solve(a, x)
def tr_solve(matvec, x):
return jsp.linalg.solve(a.T, x)
matvec = partial(high_precision_dot, a)
return lax.custom_linear_solve(matvec, b, solve, tr_solve)
rng = np.random.RandomState(0)
a = 0.5 * rng.randn(2, 2) + 0.5j * rng.randn(2, 2)
b = 0.5 * rng.randn(2) + 0.5j * rng.randn(2)
jtu.check_grads(solve, (a, b), order=2, rtol=1e-2)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_lu(self):
def linear_solve(a, b):
a_factors = jsp.linalg.lu_factor(a)
at_factors = jsp.linalg.lu_factor(a.T)
def solve(matvec, x):
return jsp.linalg.lu_solve(a_factors, x)
def transpose_solve(vecmat, x):
return jsp.linalg.lu_solve(at_factors, x)
return lax.custom_linear_solve(
partial(high_precision_dot, a), b, solve, transpose_solve)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
b = rng.randn(3)
expected = jnp.linalg.solve(a, b)
actual = linear_solve(a, b)
self.assertAllClose(expected, actual)
jtu.check_grads(linear_solve, (a, b), order=2, rtol=2e-3)
# regression test for https://github.com/google/jax/issues/1536
jtu.check_grads(api.jit(linear_solve), (a, b), order=2,
rtol={np.float32: 2e-3})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_without_transpose_solve(self):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def loss(a, b):
matvec = partial(high_precision_dot, a)
x = lax.custom_linear_solve(matvec, b, explicit_jacobian_solve)
return jnp.sum(x)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
jtu.check_grads(loss, (a, b), order=2, modes=['fwd'],
atol={np.float32: 2e-3, np.float64: 1e-11})
jtu.check_grads(api.vmap(loss), (a[None,:,:], b[None,:]), order=2,
modes=['fwd'], atol={np.float32: 2e-3, np.float64: 1e-11})
with self.assertRaisesRegex(TypeError, "transpose_solve required"):
api.grad(loss)(a, b)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_pytree(self):
"""Test custom linear solve with inputs and outputs that are pytrees."""
def unrolled_matvec(mat, x):
"""Apply a Python list of lists of scalars to a list of scalars."""
result = []
for i in range(len(mat)):
v = 0
for j in range(len(x)):
if mat[i][j] is not None:
v += mat[i][j] * x[j]
result.append(v)
return result
def unrolled_substitution_solve(matvec, b, lower_tri):
"""Solve a triangular unrolled system with fwd/back substitution."""
zero = jnp.zeros(())
one = jnp.ones(())
x = [zero for _ in b]
ordering = range(len(b)) if lower_tri else range(len(b) - 1, -1, -1)
for i in ordering:
residual = b[i] - matvec(x)[i]
diagonal = matvec([one if i == j else zero for j in range(len(b))])[i]
x[i] = residual / diagonal
return x
def custom_unrolled_lower_tri_solve(mat, b):
return lax.custom_linear_solve(
partial(unrolled_matvec, mat), b,
partial(unrolled_substitution_solve, lower_tri=True),
partial(unrolled_substitution_solve, lower_tri=False))
mat = [[1.0, None, None, None, None, None, None],
[1.0, 1.0, None, None, None, None, None],
[None, 1.0, 1.0, None, None, None, None],
[None, None, 1.0, 1.0, None, None, None],
[None, None, None, 1.0, 1.0, None, None],
[None, None, None, None, None, 2.0, None],
[None, None, None, None, None, 4.0, 3.0]]
rng = np.random.RandomState(0)
b = list(rng.randn(7))
# Non-batched
jtu.check_grads(custom_unrolled_lower_tri_solve, (mat, b), order=2,
rtol={jnp.float32: 2e-2})
# Batch one element of b (which, because of unrolling, should only affect
# the first block of outputs)
b_bat = list(b)
b_bat[3] = rng.randn(3)
jtu.check_grads(
api.vmap(
custom_unrolled_lower_tri_solve,
in_axes=(None, [None, None, None, 0, None, None, None]),
out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b_bat),
order=2,
rtol={jnp.float32: 1e-2})
# Batch one element of mat (again only affecting first block)
mat[2][1] = rng.randn(3)
mat_axis_tree = [
[0 if i == 2 and j == 1 else None for j in range(7)] for i in range(7)
]
jtu.check_grads(
api.vmap(
custom_unrolled_lower_tri_solve,
in_axes=(mat_axis_tree, None),
out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b),
order=2)
def test_custom_linear_solve_errors(self):
solve = lambda f, x: x
with self.assertRaisesRegex(TypeError, re.escape("matvec() output pytree")):
lax.custom_linear_solve(lambda x: [x], 1.0, solve, solve)
with self.assertRaisesRegex(TypeError, re.escape("solve() output pytree")):
lax.custom_linear_solve(lambda x: x, 1.0, lambda f, x: [x], solve)
with self.assertRaisesRegex(
TypeError, re.escape("transpose_solve() output pytree")):
lax.custom_linear_solve(lambda x: x, 1.0, solve, lambda f, x: [x])
with self.assertRaisesRegex(ValueError, re.escape("solve() output shapes")):
lax.custom_linear_solve(lambda x: x, 1.0, lambda f, x: jnp.ones(2), solve)
def bad_matvec_usage(a):
return lax.custom_linear_solve(
lambda x: a * jnp.ones(2), 1.0, solve, solve)
with self.assertRaisesRegex(ValueError, re.escape("matvec() output shapes")):
api.jvp(bad_matvec_usage, (1.0,), (1.0,))
def testIssue810(self):
def loss(A):
def step(x, i):
return jnp.matmul(A, x), None
init_x = jnp.zeros(A.shape[-1:])
last_x, _ = lax.scan(step, init_x, jnp.arange(10))
return jnp.sum(last_x)
A = jnp.zeros((3, 3))
# The second DUS was unnecessarily replicating A across time.
# We check XLA because _scan_impl is "underneath" the jaxpr language.
s = str(api.xla_computation(api.grad(loss))(A).as_hlo_text())
assert s.count("dynamic-update-slice(") < 2
def testScanLengthArg(self):
def arange(n):
return lax.scan(lambda c, _: (c + 1, c), 0, None, length=n)[1]
ans = arange(10)
expected = np.arange(10)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_while_loop_of_pmap(self):
# code from jsnoek@
def body(i, x):
result = api.pmap(lambda z: lax.psum(jnp.sin(z), 'i'), axis_name='i')(x)
return result + x
f_loop = lambda x: lax.fori_loop(0, 3, body, x) # noqa: F821
ans = f_loop(jnp.ones(api.device_count()))
del body, f_loop
def body2(i, x):
result = jnp.broadcast_to(jnp.sin(x).sum(), x.shape)
return result + x
g_loop = lambda x: lax.fori_loop(0, 3, body2, x)
expected = g_loop(jnp.ones(api.device_count()))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_while_loop_of_pmap_error_message(self):
def body(i, x):
result = api.pmap(lambda z: lax.psum(jnp.sin(z), 'i'), axis_name='i')(x)
return result + x
f_loop = lambda x: lax.fori_loop(0, 3, body, x)
too_big = 2 * api.device_count()
self.assertRaisesRegex(
ValueError,
re.escape(
"compiling a primitive computation `while` that requires {} "
"replicas, but only {} XLA devices are available on backend {}."
.format(too_big, api.device_count(), jtu.device_under_test())),
lambda: f_loop(jnp.ones(too_big)))
@parameterized.named_parameters(
{"testcase_name": "_{}".format(scan_name),
"scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS)
def test_scan_reverse(self, scan):
def cumsum(x, reverse):
return scan(lambda c, x: (c + x, c + x), 0, x, reverse=reverse)[1]
x = np.array([3, 1, 4, 1, 5, 9])
self.assertAllClose(np.cumsum(x), cumsum(x, False), check_dtypes=False)
self.assertAllClose(np.cumsum(x[::-1])[::-1], cumsum(x, True), check_dtypes=False)
with api.disable_jit():
self.assertAllClose(np.cumsum(x), cumsum(x, False), check_dtypes=False)
with api.disable_jit():
self.assertAllClose(np.cumsum(x[::-1])[::-1], cumsum(x, True), check_dtypes=False)
def test_scan_unroll(self):
d = jnp.ones(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
xs = jnp.ones((5, 3))
c = jnp.ones(4)
scan = lambda c, xs: lax.scan(f, c, xs)
scan_unrolled = lambda c, xs: lax.scan(f, c, xs, unroll=2)
# jaxprs should be the same size
self.assertEqual(
len(str(api.make_jaxpr(scan)(c, xs))),
len(str(api.make_jaxpr(scan_unrolled)(c, xs))))
# but HLO should grow due to unrolling
self.assertLess(
len(str(api.xla_computation(scan)(c, xs).as_hlo_text())),
len(str(api.xla_computation(scan_unrolled)(c, xs).as_hlo_text())))
def test_disable_jit_cond_with_vmap(self):
# https://github.com/google/jax/issues/3093
def fn(t):
return lax.cond(t > 0, 0, lambda x: 0, 0, lambda x: 1)
fn = api.vmap(fn)
with api.disable_jit():
_ = fn(jnp.array([1])) # doesn't crash
def test_disable_jit_while_loop_with_vmap(self):
# https://github.com/google/jax/issues/2823
def trivial_while(y):
return lax.while_loop(lambda x: x < 10.0, lambda x: x + 1.0, y)
with api.disable_jit():
api.vmap(trivial_while)(jnp.array([3.0,4.0])) # doesn't crash
def test_vmaps_of_while_loop(self):
# https://github.com/google/jax/issues/3164
def f(x, n): return lax.fori_loop(0, n, lambda _, x: x + 1, x)
x, n = jnp.arange(3), jnp.arange(4)
api.vmap(api.vmap(f, (None, 0)), (0, None))(x, n) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"_{shape}_axis={axis}",
"shape": shape, "axis": axis}
for shape in [
[0], [1], [2], [3], [5], [10], [1000],
[2, 3], [7, 5], [5, 6, 7]
]
for axis in range(-len(shape), len(shape) - 1))
def testAssociativeScanUnstructured(self, shape, axis):
data = np.arange(np.prod(shape)).reshape(shape) + 7
expected = np.cumsum(data, axis=axis)
result = lax.associative_scan(operator.add, data, axis=axis)
self.assertAllClose(result, expected, check_dtypes=False)
def testAssociativeScanUnstructured1000Reverse(self):
data = np.arange(1000) + 32
expected = np.cumsum(data[::-1])[::-1]
result = lax.associative_scan(operator.add, data, reverse=True)
self.assertAllClose(result, expected, check_dtypes=False)
def testAssociativeScanStructured3(self):
pair = collections.namedtuple('pair', ('first', 'second'))
data = pair(first=np.array([0., 1., 2.]),
second=np.array([0., 10., 20.]))
def fn(a, b):
return pair(first=a.first + b.first,
second=a.second + b.second)
result = lax.associative_scan(fn, elems=data)
self.assertAllClose(result.first, np.array([0., 1., 3.]),
check_dtypes=False)
self.assertAllClose(result.second, np.array([0., 10., 30.]),
check_dtypes=False)
def test_scan_typecheck_param(self):
d = jnp.ones(2)
def f(c, a):
b = jnp.cos(jnp.sum(a) + jnp.sum(c) + jnp.sum(d))
c = jnp.sin(c * b)
return c, b
xs = jnp.ones((5, 3))
c = jnp.ones(4)
scan_fun = lambda c, xs: lax.scan(f, c, xs)
def new_jaxpr():
jaxpr = api.make_jaxpr(scan_fun)(c, xs).jaxpr
scan = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'scan')
return jaxpr, scan
jaxpr, eqn = new_jaxpr()
eqn.params['reverse'] = 4
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid scan param reverse of type int, bool required: 4'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['num_consts'] = -3
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid scan param num_consts of type int, '
'non-negative int required: -3'),
lambda: core.check_jaxpr(jaxpr))
def test_cond_typecheck_param(self):
def new_jaxpr():
jaxpr = api.make_jaxpr(
lambda x: lax.switch(0, [jnp.sin, jnp.cos], x))(1.).jaxpr
cond = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'cond')
return jaxpr, cond
jaxpr, eqn = new_jaxpr()
eqn.params['branches'] = (4, 2)
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid cond param branches of type tuple, '
'tuple of ClosedJaxpr required: (4, 2)'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['linear'] = (4, 2)
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid cond param linear of type tuple, '
'tuple of bool required: (4, 2)'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['linear'] = 'multi\nline'
self.assertRaisesRegex(
core.JaxprTypeError,
r'invalid cond param linear of type str, '
r'tuple of bool required:\nmulti\nline',
lambda: core.check_jaxpr(jaxpr))
@parameterized.named_parameters(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all_integer)
def test_scan_init_weak_type(self, dtype):
def func(carry, x):
return carry + x, x
init_weak = 0 # Python scalars are weakly-typed.
x = jnp.ones(5, dtype=dtype)
carry, result = lax.scan(func, init_weak, x)
self.assertEqual(carry, x.sum())
self.assertArraysEqual(result, x)
@parameterized.named_parameters(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all_integer)
def test_while_loop_init_weak_type(self, dtype):
# This tests whether lax.while_loop can properly handle weakly-typed
# initial values.
def cond_fun(val):
return val < 2
def body_fun(val):
return val + increment
increment = jnp.array(1, dtype=dtype)
init_weak = 0 # Python scalars are weakly-typed.
result = lax.while_loop(cond_fun, body_fun, init_weak)
self.assertArraysEqual(result, jnp.full_like(increment, 2))
def test_scan_vjp_forwards_extensive_residuals(self):
# https://github.com/google/jax/issues/4510
def cumprod(x):
s = jnp.ones((2, 32), jnp.float32)
return lax.scan(lambda s, x: (x*s, s), s, x)
rng = np.random.RandomState(1234)
x = jnp.asarray(rng.randn(32, 2, 32).astype('float32'))
_, vjp_fun = api.vjp(cumprod, x)
# Need to spelunk into vjp_fun. This is fragile, and if it causes problems
# just skip this test.
*_, ext_res = vjp_fun.args[0].args[0]
self.assertIs(ext_res, x)
x = rng.randn(32, 2, 32).astype('float32') # numpy.ndarray, not DeviceArray
_, vjp_fun = api.vjp(cumprod, x)
*_, ext_res = vjp_fun.args[0].args[0]
self.assertIsInstance(ext_res, xla.DeviceArray)
def test_scan_vmap_collectives(self):
def scan_f(state, x):
s = lax.psum(state, 'i') * x
return state, s
def scan(state, xs):
return lax.scan(scan_f, state, xs)
scan_v = api.vmap(scan, in_axes=0, out_axes=0, axis_name='i')
self.assertAllClose(
scan_v(jnp.ones([1]), jnp.arange(5).reshape((1, 5))),
(jnp.array([1.]), jnp.array([[0., 1., 2., 3., 4.]])))
def test_xla_cpu_gpu_loop_cond_bug(self):
# https://github.com/google/jax/issues/5900
def deriv(f):
return lambda x, *args: jax.linearize(lambda x: f(x, *args), x)[1](1.0)
def _while_loop(cond_fun, body_fun, init_val, max_iter):
def _iter(val):
next_val = body_fun(val)
next_cond = True
return next_val, next_cond
def _fun(tup, _):
val, cond = tup
return jax.lax.cond(cond, _iter, lambda x: (x, False), val), _
init = (init_val, cond_fun(init_val))
return jax.lax.scan(_fun, init, None, length=max_iter)[0][0]
def my_pow(x, y):
def body_fun(val):
return val * x
def cond_fun(val):
return True
return _while_loop(cond_fun, body_fun, 1.0, y)
self.assertAllClose(deriv(my_pow)(3.0, 1), 1.0, check_dtypes=False)
def test_unexpected_tracer_error(self):
with self.assertRaisesRegex(core.UnexpectedTracerError,
"transformed by while_loop"):
lst = []
def side_effecting_body(val):
lst.append(val)
return val+1
lax.while_loop(lambda x: x < 2, side_effecting_body, 1)
lst[0] += 1
with self.assertRaisesRegex(core.UnexpectedTracerError,
"transformed by scan"):
lst = []
def side_effecting_scan(carry, val):
lst.append(val)
return carry, val+1
lax.scan(side_effecting_scan, None, jnp.ones((2, 2)))
lst[0] += 1
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| tests/lax_control_flow_test.py | 87,629 | Test typing error messages for cond.
Test typing error messages for while.
Test typing error messages for scan.
Test typing error messages for switch.
Test typing error messages for while.
Test custom linear solve with inputs and outputs that are pytrees.
Apply a Python list of lists of scalars to a list of scalars.
Solve a triangular unrolled system with fwd/back substitution.
Copyright 2018 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. scan tests use numpy Some tests are useful for testing both lax.cond and lax.switch. This function provides a lax.cond-compatible interface to a two-branch lax.switch. Several tests in this file are parameterized such that they either call into lax.cond or into this function. pylint: disable=missing-docstring pylint: disable=missing-docstring pylint: disable=missing-docstring pylint: disable=missing-docstring pylint: disable=missing-docstring Error during XLA code generation for vmap of nested loops branch residuals overlap, should be reused another overlapping residual, expect reuse requires one more residual slot test that proper errors are raised for wrong types these cases stay as cond these cases become select these cases stay as cond these cases become select issue 4696, PR 4896 This preserves the weak type of x. This strips the weak type of x. branch 0 branch 1 branch 2 https://github.com/google/jax/issues/2264 just check this doesn't crash evaluation doesn't crash jvp evaluation doesn't crash jvp numerical check passes linearize works gradient evaluation doesn't crash gradient check passes we can vmap to batch things Tests reverse-mode differentiation through a scan for which the scanned function also involves reverse-mode differentiation. See https://github.com/google/jax/issues/711 doesn't crash doesn't crash Body output not a tuple The carry is a 4-tuple, the last element starts batched, and the carry is shifted left at each iteration. After two shifts, we get the carry_init code from https://github.com/google/jax/issues/757 doesn't crash doesn't crash https://github.com/google/jax/issues/2412 This second caching test shows a different kind of caching that we haven't implemented (but could!), namely that Python functions that are distinct objects but are equivalent functions trigger cache hits. This kind of caching could be salient when using lambda functions with control flow: lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0) lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0) To get a cache hit on the second line we'd need to form a jaxpr and compare them for equality (including the literals on identity). We could implement that by adding a __hash__/__eq__ to core.Jaxpr and core.ClosedJaxpr (see 1221). doesn't crash Raise error for vjp for loops Linearization works doesn't crash doesn't crash unused Equivalent to vanilla gradient descent: https://en.wikipedia.org/wiki/Modified_Richardson_iteration intentionally non-linear in a and b vmap across an empty dimension numerical gradients are only well defined if ``a`` is guaranteed to be positive definite. regression test for https://github.com/google/jax/issues/1536 Non-batched Batch one element of b (which, because of unrolling, should only affect the first block of outputs) Batch one element of mat (again only affecting first block) The second DUS was unnecessarily replicating A across time. We check XLA because _scan_impl is "underneath" the jaxpr language. code from jsnoek@ noqa: F821 jaxprs should be the same size but HLO should grow due to unrolling https://github.com/google/jax/issues/3093 doesn't crash https://github.com/google/jax/issues/2823 doesn't crash https://github.com/google/jax/issues/3164 doesn't crash Python scalars are weakly-typed. This tests whether lax.while_loop can properly handle weakly-typed initial values. Python scalars are weakly-typed. https://github.com/google/jax/issues/4510 Need to spelunk into vjp_fun. This is fragile, and if it causes problems just skip this test. numpy.ndarray, not DeviceArray https://github.com/google/jax/issues/5900 | 4,534 | en | 0.873079 |
# from flask import Flask, Blueprint
# from flask_sqlalchemy import SQLAlchemy
# from flask_login import LoginManager
# import os
from flask import Flask, jsonify, request, make_response, redirect, url_for
import jwt
import datetime
import os
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from sqlalchemy import select
from flask_migrate import Migrate, migrate
from flask_cors import CORS
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, Computed
from numpy import array
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretollave'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
ABSOLUTE_PATH_TO_YOUR_FOLDER ='/home/dani/flask/static/fotosPerfil'
ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER ='/home/dani/flask/static/pdf'
CORS(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# Models
class Usuario(db.Model):
nick = db.Column(db.String(20), primary_key=True)
Nombre_de_usuario = db.Column(db.String(50))
password = db.Column(db.String(50))
e_mail = db.Column(db.String(50), unique=True, nullable=False)
descripcion = db.Column(db.String(1000))
link = db.Column(db.String(200))
foto_de_perfil = db.Column(db.String(400))
class Sigue(db.Model):
#id = db.Column(db.Integer, primary_key=True )
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Chat(db.Model):
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
mensaje = db.Column(db.String(1000))
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Publicacion(db.Model):
id = db.Column(Integer,primary_key=True)
#id = db.Sequence('id', start=1, increment=1)
descripcion = db.Column(db.String(1000))
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'))
class Propia(db.Model):
pdf = db.Column(db.String(400))
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Recomendacion(db.Model):
link = db.Column(db.String(200),nullable=False)
titulo = db.Column(db.String(200),nullable=False)
autor = db.Column(db.String(200),nullable=False)
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Tematica(db.Model):
tema = db.Column(db.String(50), primary_key=True )
class Notificaciones(db.Model):
id = db.Column(db.Integer, primary_key=True )
fecha = db.Column(db.Date)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Prefiere(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Trata_pub_del_tema(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Gusta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Comenta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
comentario = db.Column(db.String(1000))
class Guarda(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Trata(db.Model):
id_publi = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
id_notif = db.Column(db.String(20), db.ForeignKey('notificaciones.id'),primary_key=True)
class Genera(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
#token = request.args.get('token') #http://127.0.0.1:5000/route?token=djsnvidnoffofn
#data = request.get_json()
token = request.headers['token']
#token = data['token']
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return jsonify({'message': 'Puede entrar tol mundo'})
@app.route('/protected')
@token_required
def protected(current_user):
print(current_user)
return jsonify({'message': 'Puedes entrar si puedes'})
# Ruta para el login
@app.route('/register', methods=['POST'])
def add_data():
data= request.get_json()
#nick = request.form.get("nick")
#password = request.form.get("password")
#e_mail = request.form.get("e_mail")
user = Usuario.query.filter_by(e_mail=data['e_mail']).first()
nick = Usuario.query.filter_by(nick=data['nick']).first()
if user: # si esto devuelve algo entonces el email existe
return jsonify({'error': 'Existe correo'}) #json diciendo error existe email
if nick:
return jsonify({'error': 'Existe nick'})
#if (check_email(e_mail) == True and check_password(data['password']) == True ):
register = Usuario(nick=data['nick'],password=generate_password_hash(data['password']), e_mail=data['e_mail'],foto_de_perfil="platon.jpg")
db.session.add(register)
db.session.commit()
token = jwt.encode({'nick' : data['nick'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/login', methods=['POST'])
def login():
# auth = request.authorization #new ESTO SI LO HACES CON AUTH
data= request.get_json()
if '@' in data['nickOcorreo']:
user = Usuario.query.filter_by(e_mail=data['nickOcorreo']).first()
else:
user = Usuario.query.filter_by(nick=data['nickOcorreo']).first()
if not user:
return jsonify({'error': 'No existe ese usuario'})#error mal user
if not check_password_hash(user.password, data['password']):
return jsonify({'error': 'Mal contraseña'}) #error mala contraseña
token = jwt.encode({'nick' : data['nickOcorreo'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=9999999)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/editarPerfil', methods=['GET'])
@token_required
def editarPerfilget(current_user):
s = select([Usuario.Nombre_de_usuario, Usuario.descripcion,Usuario.link, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
result = db.session.execute(s)
seguidos= db.session.query(Sigue).filter(Sigue.Usuario_Nicka == current_user ).count()
seguidores= db.session.query(Sigue).filter(Sigue.Usuario_Nickb == current_user ).count()
nposts= db.session.query(Publicacion).filter(Publicacion.Usuario_Nicka == current_user ).count()
tema = select([Prefiere.tema]).where((Prefiere.Usuario_Nicka == current_user))
temas = db.session.execute(tema)
vector = []
for row in temas:
vector += row
for row in result:
fila = {
"nick": current_user,
"nombre_de_usuario":row[0],
"descripcion":row[1],
"link":row[2],
"foto_de_perfil": 'http://51.255.50.207:5000/display/' + row[3],
"nsiguiendo": seguidos,
"nseguidores": seguidores,
"nposts": nposts,
"tematicas": vector
#"foto_de_perfil" :url_for('static', filename='fotosPerfil/' + row[3])
}
return fila
@app.route('/display/<filename>')
def foto(filename):
return redirect(url_for('static', filename='fotosPerfil/' + filename),code = 301)
@app.route('/editarPerfil', methods=['POST'])
@token_required
def editarPerfilpost(current_user):
data= request.get_json()
user = Usuario.query.filter_by(nick=current_user).first()
user.Nombre_de_usuario = data['nombre_de_usuario']
print(data['nombre_de_usuario'])
print(data['descripcion'])
print(data['link'])
print(data['tematicas'])
user.descripcion = data['descripcion']
user.link = data['link']
tematicas = data['tematicas']
for temas in tematicas:
tema = Prefiere.query.filter_by(tema=temas).first()
if not tema:
tema = Prefiere(Usuario_Nicka=current_user, tema = temas)
db.session.add(tema)
#db.session.commit()
#cambia_foto
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/actualizarImagen', methods=['POST'])
@token_required
def actualizarImagen(current_user):
user = Usuario.query.filter_by(nick=current_user).first()
if request.files['nueva_foto'] is not None: #data['cambia_foto']:
file = request.files['nueva_foto']
print(request.files['nueva_foto'])
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_FOLDER, filename))
user.foto_de_perfil = filename
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPost', methods=['POST'])
@token_required
def subirPost(current_user):
data= request.get_json()
publicacion = Publicacion(descripcion=data['descripcion'],Usuario_Nicka=current_user) #coger id
db.session.add(publicacion)
db.session.commit()
tematicas = data['tematicas']
for temas in tematicas:
temita = Tematica.query.filter_by(tema=temas).first()
if temita:
nuevo = Trata_pub_del_tema(id=publicacion.id, tema = temita.tema)
db.session.add(nuevo)
db.session.commit()
if (data['tipo']=="1"): # articulo
print("xd")
guardarPDF(request.files['pdf'], publicacion.id)
elif(data['tipo']=="2"): # recomendacion
recomendacion = Recomendacion(link=data['link'],titulo=data['titulo'], autor = data['autor'], id = publicacion.id)
db.session.add(recomendacion)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
def guardarPDF(pdf,_id):
propia = Propia.query.filter_by(id=_id).first()
if pdf is not None:
file = pdf
print(pdf)
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER, filename))
propia.pdf = filename
db.session.add(propia)
@app.route('/getPostsPropios', methods=['GET'])
@token_required
def getPostsPropios(current_user):
data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resulta = db.session.execute(a)
#s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)
s=select(Publicacion).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
for r in results:
for i in range(data['id']-8,data['id']):
a = select([Propia.id, Propia.pdf]).where((Propia.id == r.id))
resulta = db.session.execute(a)
Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == row[1] ).count()
Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == row[1] ).count()
Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == row[1] ).count()
fila = {
"id": r.id,
"nick": current_user,
"descripcion":r.descripcion,
"timestamp":r.timestamp,
"pdf": 'http://51.255.50.207:5000/display2/' + a.pdf,
"nlikes": Gustas,
"ncomentarios": Comentarios,
"nguardados": Guardados,
"usuario": resulta.nombre_de_usuario
}
return fila
@app.route('/display2/<filename>')
def pdf(filename):
return redirect(url_for('static', filename='pdf/' + filename),code = 301)
@app.route('/getPostsRecomendados', methods=['GET'])
@token_required
def getPostsRecomendados(current_user):
#data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resultb = db.session.execute(a)
Nombre_de_usuario = ""
for b in resultb:
Nombre_de_usuario=b.Nombre_de_usuario
#s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)
s = select([Publicacion]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
# for record in results:
# print("\n", record)
vector0 = array([])
vector1 = []
vector2 = []
for r in results:
print(str(r.id))
vector0 += r.id
vector1 += str(r.descripcion)
vector2 += str(r.timestamp)
# for r in results:
# for b in resultb:
# a = select([Recomendacion.id, Recomendacion.link,Recomendacion.titulo,Recomendacion.autor]).where((Recomendacion.id == r.id))
# resulta = db.session.execute(a)
# for a in resultaa:
# Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == r.id ).count()
# Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == r.id ).count()
# Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == r.id ).count()
print(vector0)
fila = {
"id": vector0,
#"link": a.link,
#"titulo": a.titulo,
#"autor": a.autor,
"nick": current_user,
"descripcion": vector1,
"timestamp": vector2,
#"nlikes": Gustas,
#"ncomentarios": Comentarios,
#"nguardados": Guardados,
"usuario": Nombre_de_usuario
}
return fila
def check_email(email):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if(re.search(regex,email)):
return True
else:
return False
# Contraseñas de entre 8 y 32 carácteres.
def check_password(password):
regex = '^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[*.!@$%^&(){}[]:;<>,.?/~_+-=|\]).{8,32}$'
if(re.search(regex,password)):
return True
else:
return False
if __name__ == '__main__':
app.run(debug=True)
| .vscode-server/data/User/History/-1f47d17c/IWlp.py | 16,514 | from flask import Flask, Blueprint from flask_sqlalchemy import SQLAlchemy from flask_login import LoginManager import os Modelsid = db.Column(db.Integer, primary_key=True )Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())id = db.Sequence('id', start=1, increment=1)Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())token = request.args.get('token') http://127.0.0.1:5000/route?token=djsnvidnoffofndata = request.get_json()token = data['token'] Ruta para el loginnick = request.form.get("nick")password = request.form.get("password")e_mail = request.form.get("e_mail") si esto devuelve algo entonces el email existejson diciendo error existe emailif (check_email(e_mail) == True and check_password(data['password']) == True ): auth = request.authorization new ESTO SI LO HACES CON AUTHerror mal usererror mala contraseña"foto_de_perfil" :url_for('static', filename='fotosPerfil/' + row[3])db.session.commit()cambia_fotodata['cambia_foto']:coger id articulo recomendacions = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)data= request.get_json()s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id) for record in results: print("\n", record) for r in results: for b in resultb: a = select([Recomendacion.id, Recomendacion.link,Recomendacion.titulo,Recomendacion.autor]).where((Recomendacion.id == r.id)) resulta = db.session.execute(a) for a in resultaa: Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == r.id ).count() Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == r.id ).count() Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == r.id ).count()"link": a.link,"titulo": a.titulo,"autor": a.autor,"nlikes": Gustas,"ncomentarios": Comentarios,"nguardados": Guardados, Contraseñas de entre 8 y 32 carácteres. | 2,292 | es | 0.158281 |
"""A setuptools based setup module.
"""
# Always prefer setuptools over distutils
from setuptools import setup
setup(
name='ooinstall',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version="3.0.0",
description="Ansible wrapper for OpenShift Enterprise 3 installation.",
# The project's main homepage.
url="http://github.com/openshift/openshift-extras/tree/enterprise-3.0/oo-install",
# Author details
author="openshift@redhat.com",
author_email="OpenShift",
# Choose your license
license="Apache 2.0",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
],
# What does your project relate to?
keywords='oo-install setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['ooinstall'],
package_dir={'ooinstall': 'src/ooinstall'},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['click', 'PyYAML'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'],
},
tests_require=['nose'],
test_suite='nose.collector',
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'oo-install=ooinstall.cli_installer:cli',
],
},
)
| oct/ansible/openshift-ansible/utils/setup.py | 2,656 | A setuptools based setup module.
Always prefer setuptools over distutils Versions should comply with PEP440. For a discussion on single-sourcing the version across setup.py and the project code, see https://packaging.python.org/en/latest/single_source_version.html The project's main homepage. Author details Choose your license See https://pypi.python.org/pypi?%3Aaction=list_classifiers What does your project relate to? You can just specify the packages manually here if your project is simple. Or you can use find_packages().packages=find_packages(exclude=['contrib', 'docs', 'tests*']), List run-time dependencies here. These will be installed by pip when your project is installed. For an analysis of "install_requires" vs pip's requirements files see: https://packaging.python.org/en/latest/requirements.html List additional groups of dependencies here (e.g. development dependencies). You can install these using the following syntax, for example: $ pip install -e .[dev,test]extras_require={ 'dev': ['check-manifest'], 'test': ['coverage'],}, If there are data files included in your packages that need to be installed, specify them here. If using Python 2.6 or less, then these have to be included in MANIFEST.in as well. To provide executable scripts, use entry points in preference to the "scripts" keyword. Entry points provide cross-platform support and allow pip to create the appropriate form of executable for the target platform. | 1,458 | en | 0.808607 |
import pytest
from . import specparser
def test_load() -> None:
spec = specparser.Spec.loads(
"""
[meta]
version = 0
[enum]
user_level = ["beginner", "intermediate", "advanced"]
[directive._parent]
content_type = "block"
options.foo = ["path", "uri"]
[directive.child]
inherit = "_parent"
argument_type = "user_level"
deprecated = true
[role._parent]
help = "test-role"
type = "text"
[role.child]
inherit = "_parent"
[rstobject._parent]
help = "test-rstobject"
[rstobject.child]
inherit = "_parent"
"""
)
assert spec.meta.version == 0
assert spec.enum["user_level"] == ["beginner", "intermediate", "advanced"]
assert spec.directive["child"] == specparser.Directive(
inherit="_parent",
example=None,
help=None,
content_type="block",
argument_type="user_level",
required_context=None,
deprecated=True,
domain=None,
options={"foo": [specparser.PrimitiveType.path, specparser.PrimitiveType.uri]},
name="child",
)
# Test these in the opposite order of the definition to ensure that each "type" of definition
# has a separate inheritance namespace
assert spec.rstobject["child"].help == "test-rstobject"
assert spec.role["child"].help == "test-role"
assert spec.role["child"].type == specparser.PrimitiveRoleType.text
validator = spec.get_validator(
[specparser.PrimitiveType.nonnegative_integer, "user_level"]
)
assert validator("10") == 10
assert validator("intermediate") == "intermediate"
with pytest.raises(ValueError):
validator("-10")
with pytest.raises(ValueError):
validator("foo")
def test_inheritance_cycle() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[directive.parent]
inherit = "child"
[directive.child]
inherit = "parent"
"""
)
def test_missing_parent() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[directive._parent]
content_type = "block"
[directive.child]
inherit = "parent"
"""
)
def test_bad_type() -> None:
spec = specparser.Spec.loads(
"""
[meta]
version = 0
"""
)
with pytest.raises(ValueError):
spec.get_validator("gjriojwe")
def test_bad_version() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = -1"""
)
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 1"""
)
def test_bad_link() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[role."kotlin-sdk"]
type = {link = "https://docs.mongodb.com/realm-sdks/kotlin/latest/"}"""
)
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[role."kotlin-sdk"]
type = {link = "https://docs.mongodb.com/realm-sdks/%s/kotlin/latest/%s"}"""
)
| .myenv/Lib/site-packages/snooty/test_specparser.py | 3,280 | Test these in the opposite order of the definition to ensure that each "type" of definition has a separate inheritance namespace | 128 | en | 0.88391 |
# dagutil.py - dag utilities for mercurial
#
# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
# and Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullrev
from i18n import _
class basedag(object):
'''generic interface for DAGs
terms:
"ix" (short for index) identifies a nodes internally,
"id" identifies one externally.
All params are ixs unless explicitly suffixed otherwise.
Pluralized params are lists or sets.
'''
def __init__(self):
self._inverse = None
def nodeset(self):
'''set of all node idxs'''
raise NotImplementedError()
def heads(self):
'''list of head ixs'''
raise NotImplementedError()
def parents(self, ix):
'''list of parents ixs of ix'''
raise NotImplementedError()
def inverse(self):
'''inverse DAG, where parents becomes children, etc.'''
raise NotImplementedError()
def ancestorset(self, starts, stops=None):
'''
set of all ancestors of starts (incl), but stop walk at stops (excl)
'''
raise NotImplementedError()
def descendantset(self, starts, stops=None):
'''
set of all descendants of starts (incl), but stop walk at stops (excl)
'''
return self.inverse().ancestorset(starts, stops)
def headsetofconnecteds(self, ixs):
'''
subset of connected list of ixs so that no node has a descendant in it
By "connected list" we mean that if an ancestor and a descendant are in
the list, then so is at least one path connecting them.
'''
raise NotImplementedError()
def externalize(self, ix):
'''return a list of (or set if given a set) of node ids'''
return self._externalize(ix)
def externalizeall(self, ixs):
'''return a list of (or set if given a set) of node ids'''
ids = self._externalizeall(ixs)
if isinstance(ixs, set):
return set(ids)
return list(ids)
def internalize(self, id):
'''return a list of (or set if given a set) of node ixs'''
return self._internalize(id)
def internalizeall(self, ids, filterunknown=False):
'''return a list of (or set if given a set) of node ids'''
ixs = self._internalizeall(ids, filterunknown)
if isinstance(ids, set):
return set(ixs)
return list(ixs)
class genericdag(basedag):
'''generic implementations for DAGs'''
def ancestorset(self, starts, stops=None):
stops = stops and set(stops) or set()
seen = set()
pending = list(starts)
while pending:
n = pending.pop()
if n not in seen and n not in stops:
seen.add(n)
pending.extend(self.parents(n))
return seen
def headsetofconnecteds(self, ixs):
hds = set(ixs)
if not hds:
return hds
for n in ixs:
for p in self.parents(n):
hds.discard(p)
assert hds
return hds
class revlogbaseddag(basedag):
'''generic dag interface to a revlog'''
def __init__(self, revlog, nodeset):
basedag.__init__(self)
self._revlog = revlog
self._heads = None
self._nodeset = nodeset
def nodeset(self):
return self._nodeset
def heads(self):
if self._heads is None:
self._heads = self._getheads()
return self._heads
def _externalize(self, ix):
return self._revlog.index[ix][7]
def _externalizeall(self, ixs):
idx = self._revlog.index
return [idx[i][7] for i in ixs]
def _internalize(self, id):
ix = self._revlog.rev(id)
if ix == nullrev:
raise LookupError(id, self._revlog.indexfile, _('nullid'))
return ix
def _internalizeall(self, ids, filterunknown):
rl = self._revlog
if filterunknown:
return [r for r in map(rl.nodemap.get, ids)
if r is not None and r != nullrev]
return map(self._internalize, ids)
class revlogdag(revlogbaseddag):
'''dag interface to a revlog'''
def __init__(self, revlog):
revlogbaseddag.__init__(self, revlog, set(xrange(len(revlog))))
def _getheads(self):
return [r for r in self._revlog.headrevs() if r != nullrev]
def parents(self, ix):
rlog = self._revlog
idx = rlog.index
revdata = idx[ix]
prev = revdata[5]
if prev != nullrev:
prev2 = revdata[6]
if prev2 == nullrev:
return [prev]
return [prev, prev2]
prev2 = revdata[6]
if prev2 != nullrev:
return [prev2]
return []
def inverse(self):
if self._inverse is None:
self._inverse = inverserevlogdag(self)
return self._inverse
def ancestorset(self, starts, stops=None):
rlog = self._revlog
idx = rlog.index
stops = stops and set(stops) or set()
seen = set()
pending = list(starts)
while pending:
rev = pending.pop()
if rev not in seen and rev not in stops:
seen.add(rev)
revdata = idx[rev]
for i in [5, 6]:
prev = revdata[i]
if prev != nullrev:
pending.append(prev)
return seen
def headsetofconnecteds(self, ixs):
if not ixs:
return set()
rlog = self._revlog
idx = rlog.index
headrevs = set(ixs)
for rev in ixs:
revdata = idx[rev]
for i in [5, 6]:
prev = revdata[i]
if prev != nullrev:
headrevs.discard(prev)
assert headrevs
return headrevs
def linearize(self, ixs):
'''linearize and topologically sort a list of revisions
The linearization process tries to create long runs of revs where
a child rev comes immediately after its first parent. This is done by
visiting the heads of the given revs in inverse topological order,
and for each visited rev, visiting its second parent, then its first
parent, then adding the rev itself to the output list.
'''
sorted = []
visit = list(self.headsetofconnecteds(ixs))
visit.sort(reverse=True)
finished = set()
while visit:
cur = visit.pop()
if cur < 0:
cur = -cur - 1
if cur not in finished:
sorted.append(cur)
finished.add(cur)
else:
visit.append(-cur - 1)
visit += [p for p in self.parents(cur)
if p in ixs and p not in finished]
assert len(sorted) == len(ixs)
return sorted
class inverserevlogdag(revlogbaseddag, genericdag):
'''inverse of an existing revlog dag; see revlogdag.inverse()'''
def __init__(self, orig):
revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
self._orig = orig
self._children = {}
self._roots = []
self._walkfrom = len(self._revlog) - 1
def _walkto(self, walkto):
rev = self._walkfrom
cs = self._children
roots = self._roots
idx = self._revlog.index
while rev >= walkto:
data = idx[rev]
isroot = True
for prev in [data[5], data[6]]: # parent revs
if prev != nullrev:
cs.setdefault(prev, []).append(rev)
isroot = False
if isroot:
roots.append(rev)
rev -= 1
self._walkfrom = rev
def _getheads(self):
self._walkto(nullrev)
return self._roots
def parents(self, ix):
if ix is None:
return []
if ix <= self._walkfrom:
self._walkto(ix)
return self._children.get(ix, [])
def inverse(self):
return self._orig
| LocalMercurial/mercurial/dagutil.py | 8,249 | generic interface for DAGs
terms:
"ix" (short for index) identifies a nodes internally,
"id" identifies one externally.
All params are ixs unless explicitly suffixed otherwise.
Pluralized params are lists or sets.
generic implementations for DAGs
inverse of an existing revlog dag; see revlogdag.inverse()
generic dag interface to a revlog
dag interface to a revlog
set of all ancestors of starts (incl), but stop walk at stops (excl)
set of all descendants of starts (incl), but stop walk at stops (excl)
return a list of (or set if given a set) of node ids
return a list of (or set if given a set) of node ids
list of head ixs
subset of connected list of ixs so that no node has a descendant in it
By "connected list" we mean that if an ancestor and a descendant are in
the list, then so is at least one path connecting them.
return a list of (or set if given a set) of node ixs
return a list of (or set if given a set) of node ids
inverse DAG, where parents becomes children, etc.
linearize and topologically sort a list of revisions
The linearization process tries to create long runs of revs where
a child rev comes immediately after its first parent. This is done by
visiting the heads of the given revs in inverse topological order,
and for each visited rev, visiting its second parent, then its first
parent, then adding the rev itself to the output list.
set of all node idxs
list of parents ixs of ix
dagutil.py - dag utilities for mercurial Copyright 2010 Benoit Boissinot <bboissin@gmail.com> and Peter Arrenbrecht <peter@arrenbrecht.ch> This software may be used and distributed according to the terms of the GNU General Public License version 2 or any later version. parent revs | 1,698 | en | 0.824613 |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe_app.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe_app:ingredient-list')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicIngredientsAPITests(TestCase):
"""Test endpoints that don't require authentication."""
def setUp(self):
self.client = APIClient()
def test_login_required_to_view_ingredients(self):
"""Test that authentication is needed to view the ingredients."""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsAPITests(TestCase):
"""Test endpoints that require authentication."""
def setUp(self):
self.client = APIClient()
self.user = create_user(
fname='Test',
lname='User',
email='test@gmail.com',
password='testpass'
)
self.client.force_authenticate(user=self.user)
def test_retrieve_ingredients_is_successful(self):
"""Test retrieve ingredients"""
Ingredient.objects.create(user=self.user, name='Carrot')
Ingredient.objects.create(user=self.user, name='Lemon')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_retrieved_ingredients_limited_to_user(self):
"""Tests that only the user's ingredients are retrieved"""
user2 = create_user(
fname='Test2',
lname='User2',
email='test2@gmail.com',
password='test2pass'
)
Ingredient.objects.create(user=user2, name='Carrot')
ingredient = Ingredient.objects.create(user=self.user, name='Lemon')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_is_successful(self):
"""Test that creating a new ingredient is successful."""
payload = {
'name': 'Lemon'
}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_with_invalid_details_invalid(self):
"""Test that ingredients is not created with invalid details"""
payload = {
'name': ''
}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| app/recipe_app/tests/test_ingredients_api.py | 3,090 | Test endpoints that require authentication.
Test endpoints that don't require authentication.
Test that creating a new ingredient is successful.
Test that ingredients is not created with invalid details
Test that authentication is needed to view the ingredients.
Test retrieve ingredients
Tests that only the user's ingredients are retrieved | 341 | en | 0.895907 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class lbvserver_appfwpolicy_binding(base_resource) :
"""Binding class showing the appfwpolicy that can be bound to lbvserver."""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._sc = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def priority(self) :
"""Priority."""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority.
:param priority:
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE."""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
:param gotopriorityexpression:
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
"""Name of the policy bound to the LB vserver."""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name of the policy bound to the LB vserver.
:param policyname:
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
:param name:
"""
try :
self._name = name
except Exception as e:
raise e
@property
def bindpoint(self) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE."""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
:param bindpoint:
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def labeltype(self) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel."""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
:param labeltype:
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label invoked."""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the label invoked.
:param labelname:
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke policies bound to a virtual server or policy label."""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke policies bound to a virtual server or policy label.
:param invoke:
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def sc(self) :
"""Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF."""
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_appfwpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_appfwpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
updateresource = lbvserver_appfwpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
deleteresource = lbvserver_appfwpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
"""Use this API to fetch lbvserver_appfwpolicy_binding resources.
:param service:
:param name:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
"""Use this API to fetch filtered set of lbvserver_appfwpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
"""Use this API to count lbvserver_appfwpolicy_binding resources configued on NetScaler.
:param service:
:param name:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
"""Use this API to count the filtered set of lbvserver_appfwpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
""" """
ON = "ON"
OFF = "OFF"
class Bindpoint:
""" """
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
""" """
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_appfwpolicy_binding_response(base_response) :
""" """
def __init__(self, length=1) :
self.lbvserver_appfwpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_appfwpolicy_binding = [lbvserver_appfwpolicy_binding() for _ in range(length)]
| nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py | 13,671 | Binding class showing the appfwpolicy that can be bound to lbvserver.
converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
Returns the value of object identifier argument
:param client:
:param resource:
The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.
The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
:param bindpoint:
Use this API to count lbvserver_appfwpolicy_binding resources configued on NetScaler.
:param service:
:param name:
Use this API to count the filtered set of lbvserver_appfwpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
:param client:
:param resource:
Use this API to fetch lbvserver_appfwpolicy_binding resources.
:param service:
:param name:
Use this API to fetch filtered set of lbvserver_appfwpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
:param gotopriorityexpression:
Invoke policies bound to a virtual server or policy label.
Invoke policies bound to a virtual server or policy label.
:param invoke:
Name of the label invoked.
Name of the label invoked.
:param labelname:
The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.
The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
:param labeltype:
Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
:param name:
Name of the policy bound to the LB vserver.
Name of the policy bound to the LB vserver.
:param policyname:
Priority.
Priority.
:param priority:
Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF.
Copyright (c) 2008-2015 Citrix Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License") you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 3,575 | en | 0.728292 |
#
# Autogenerated by Frugal Compiler (3.4.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import asyncio
from datetime import timedelta
import inspect
from frugal.aio.processor import FBaseProcessor
from frugal.aio.processor import FProcessorFunction
from frugal.exceptions import TApplicationExceptionType
from frugal.exceptions import TTransportExceptionType
from frugal.middleware import Method
from frugal.transport import TMemoryOutputBuffer
from frugal.util.deprecate import deprecated
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.transport.TTransport import TTransportException
from . import f_BasePinger
from .ttypes import *
class Iface(f_BasePinger.Iface):
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
pass
class Client(f_BasePinger.Client, Iface):
def __init__(self, provider, middleware=None):
"""
Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Client, self).__init__(provider, middleware=middleware)
middleware += provider.get_middleware()
self._methods.update({
'ping': Method(self._ping, middleware),
})
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
return await self._methods['ping']([ctx])
async def _ping(self, ctx):
memory_buffer = TMemoryOutputBuffer(self._transport.get_request_size_limit())
oprot = self._protocol_factory.get_protocol(memory_buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.CALL, 0)
args = ping_args()
args.write(oprot)
oprot.writeMessageEnd()
response_transport = await self._transport.request(ctx, memory_buffer.getvalue())
iprot = self._protocol_factory.get_protocol(response_transport)
iprot.read_response_headers(ctx)
_, mtype, _ = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
class Processor(f_BasePinger.Processor):
def __init__(self, handler, middleware=None):
"""
Create a new Processor.
Args:
handler: Iface
"""
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Processor, self).__init__(handler, middleware=middleware)
self.add_to_processor_map('ping', _ping(Method(handler.ping, middleware), self.get_write_lock()))
class _ping(FProcessorFunction):
def __init__(self, handler, lock):
super(_ping, self).__init__(handler, lock)
async def process(self, ctx, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
ret = self._handler([ctx])
if inspect.iscoroutine(ret):
ret = await ret
except TApplicationException as ex:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", exception=ex)
return
except Exception as e:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.INTERNAL_ERROR, message=str(e))
raise
async with self._lock:
try:
oprot.write_response_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.REPLY, 0)
result.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
except TTransportException as e:
# catch a request too large error because the TMemoryOutputBuffer always throws that if too much data is written
if e.type == TTransportExceptionType.REQUEST_TOO_LARGE:
raise _write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.RESPONSE_TOO_LARGE, message=e.message)
else:
raise e
def _write_application_exception(ctx, oprot, method, ex_code=None, message=None, exception=None):
if exception is not None:
x = exception
else:
x = TApplicationException(type=ex_code, message=message)
oprot.write_response_headers(ctx)
oprot.writeMessageBegin(method, TMessageType.EXCEPTION, 0)
x.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
return x
class ping_args(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| test/expected/python.asyncio/service_extension_same_file/f_Pinger.py | 7,173 | Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
Create a new Processor.
Args:
handler: Iface
Autogenerated by Frugal Compiler (3.4.2) DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING catch a request too large error because the TMemoryOutputBuffer always throws that if too much data is written | 459 | en | 0.862852 |
from __future__ import annotations
import argparse
import atexit
import itertools
import shlex
import shutil
import signal
import subprocess
import sys
import traceback
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from pyoomph import ast, ast2ir, ast_transformer, c_output, ir, parser
python_code_dir = Path(__file__).absolute().parent
project_root = python_code_dir.parent
class CompilationUnit:
ast: List[ast.ToplevelDeclaration]
def __init__(self, source_path: Path, session: c_output.Session):
self.source_path = source_path
self.session = session
def _handle_error(self) -> None:
traceback.print_exc()
print(f"\nThis happened while compiling {self.source_path}", file=sys.stderr)
sys.exit(1)
def create_untyped_ast(self) -> None:
try:
source_code = self.source_path.read_text(encoding="utf-8")
self.ast = ast_transformer.transform_file(
parser.parse_file(
source_code, self.source_path, project_root / "stdlib"
)
)
except Exception:
self._handle_error()
def create_c_code(self, exports: List[ir.Symbol]) -> None:
try:
the_ir = ast2ir.convert_program(self.ast, self.source_path, exports)
self.session.create_c_code(the_ir, self.source_path)
except Exception:
self._handle_error()
def get_c_compiler_command(c_paths: List[Path], exepath: Path) -> Tuple[List[str], str]:
compile_info = {}
with (project_root / "obj" / "compile_info.txt").open() as file:
for line in file:
key, value = line.rstrip("\n").split("=", maxsplit=1)
compile_info[key] = value
before_files = (
[compile_info["cc"]]
+ shlex.split(compile_info["cflags"])
+ [str(path) for path in project_root.glob("obj/*.o")]
)
after_files = (
["-o", str(exepath)]
+ shlex.split(compile_info["ldflags"])
+ ["-I", str(project_root)]
)
return (
before_files + [str(path) for path in c_paths] + after_files,
" ".join(
[shlex.quote(arg) for arg in before_files]
+ [f"<{len(c_paths)} files>"]
+ [shlex.quote(arg) for arg in after_files]
),
)
def run(command: List[str], verbose: bool, human_readable: Optional[str] = None) -> int:
if verbose:
if human_readable is None:
human_readable = " ".join(map(shlex.quote, command))
print("Running:", human_readable, file=sys.stderr)
return subprocess.run(command).returncode
def get_compilation_dir(parent_dir: Path, name_hint: str) -> Path:
for i in itertools.count():
path = parent_dir / (name_hint + str(i))
path.mkdir(parents=True, exist_ok=True)
try:
(path / "compiling").touch(exist_ok=False)
except FileExistsError:
# Another instance of oomph compiler running in parallel
continue
else:
atexit.register((path / "compiling").unlink)
return path
assert False # make mypy feel good
def compute_dependency_graph(
session: c_output.Session,
infile: Path,
verbose: bool,
) -> Dict[CompilationUnit, List[Path]]:
dependency_graph: Dict[CompilationUnit, List[Path]] = {}
queue = [infile]
while queue:
# Pop the next source file to parse
source_path = queue.pop()
if source_path in (unit.source_path for unit in dependency_graph.keys()):
continue
if verbose:
print("Parsing", source_path)
# Create a compilation unit out of it and parse it into an untyped ast
candidate_unit = CompilationUnit(source_path, session)
candidate_unit.create_untyped_ast()
# Calculate its dependencies and add them to the dependencies dictionary,
# including builtins if necessary, and add those dependencies to the queue
current_dependencies = [
top_declaration.path
for top_declaration in candidate_unit.ast
if isinstance(top_declaration, ast.Import)
]
if source_path != project_root / "builtins.oomph":
current_dependencies.append(project_root / "builtins.oomph")
dependency_graph[candidate_unit] = current_dependencies
queue.extend(current_dependencies)
return dependency_graph
def compute_compilation_order(
verbose: bool,
dependency_graph: Dict[CompilationUnit, List[Path]],
) -> List[CompilationUnit]:
compilation_order: List[CompilationUnit] = []
while len(compilation_order) < len(dependency_graph):
candidate_unit = next(
u for u in dependency_graph.keys() if u not in compilation_order
)
breadcrumbs = [candidate_unit]
while True:
uncompiled_dependencies = [
u
for u in dependency_graph.keys()
if u not in compilation_order
and u.source_path in dependency_graph[candidate_unit]
]
if not uncompiled_dependencies:
break
candidate_unit = uncompiled_dependencies[0]
if candidate_unit in breadcrumbs:
message = (
" --> ".join(d.source_path.name for d in breadcrumbs)
+ " --> "
+ candidate_unit.source_path.name
)
raise RuntimeError("cyclic imports: " + message)
breadcrumbs.append(candidate_unit)
compilation_order.append(candidate_unit)
return compilation_order
def main() -> None:
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("infile", type=Path)
arg_parser.add_argument("-o", "--outfile", type=Path)
arg_parser.add_argument("--valgrind", default="")
arg_parser.add_argument("-v", "--verbose", action="store_true")
compiler_args, program_args = arg_parser.parse_known_args()
try:
cache_dir = compiler_args.infile.parent / ".oomph-cache"
cache_dir.mkdir(exist_ok=True)
except OSError:
cache_dir = Path.cwd() / ".oomph-cache"
cache_dir.mkdir(exist_ok=True)
# Create a compiler session
session = c_output.Session(
get_compilation_dir(cache_dir, compiler_args.infile.stem + "_compilation")
)
# Calculate the dependency graph
dependency_graph = compute_dependency_graph(
session, compiler_args.infile.absolute(), compiler_args.verbose
)
# Calculate in which order we need to compile our units
compilation_order = compute_compilation_order(
compiler_args.verbose, dependency_graph
)
# Compile in the calculated order
for unit in compilation_order:
if compiler_args.verbose:
print("Creating C code:", unit.source_path)
unit.create_c_code(session.symbols)
# Write out everything and compile it
c_paths = session.write_everything(project_root / "builtins.oomph")
exe_path = session.compilation_dir / compiler_args.infile.stem
command, human_readable_command = get_c_compiler_command(c_paths, exe_path)
result = run(command, compiler_args.verbose, human_readable_command)
if result != 0:
sys.exit(result)
# If we have an outfile path, move the resulting executable to it and bail
if compiler_args.outfile is not None:
assert not compiler_args.outfile.is_dir() # shutil.move is weird for dirs
shutil.move(str(exe_path), str(compiler_args.outfile))
if compiler_args.verbose:
print("Moved executable to", compiler_args.outfile)
return
# Otherwise, run it directly
command = shlex.split(compiler_args.valgrind) + [str(exe_path)] + program_args
result = run(command, compiler_args.verbose)
if result < 0: # killed by signal
message = f"Program killed by signal {abs(result)}"
try:
message += f" ({signal.Signals(abs(result)).name})"
except ValueError: # e.g. SIGRTMIN + 1
pass
print(message, file=sys.stderr)
elif result > 0:
print(f"Program exited with status {result}", file=sys.stderr)
sys.exit(result)
main()
| pyoomph/__main__.py | 8,281 | Another instance of oomph compiler running in parallel make mypy feel good Pop the next source file to parse Create a compilation unit out of it and parse it into an untyped ast Calculate its dependencies and add them to the dependencies dictionary, including builtins if necessary, and add those dependencies to the queue Create a compiler session Calculate the dependency graph Calculate in which order we need to compile our units Compile in the calculated order Write out everything and compile it If we have an outfile path, move the resulting executable to it and bail shutil.move is weird for dirs Otherwise, run it directly killed by signal e.g. SIGRTMIN + 1 | 666 | en | 0.867526 |
import logging
from typing import List, Union
from cactus.consensus.block_record import BlockRecord
from cactus.consensus.blockchain_interface import BlockchainInterface
from cactus.consensus.constants import ConsensusConstants
from cactus.types.blockchain_format.sized_bytes import bytes32
from cactus.types.full_block import FullBlock
from cactus.types.header_block import HeaderBlock
from cactus.types.unfinished_block import UnfinishedBlock
from cactus.types.unfinished_header_block import UnfinishedHeaderBlock
from cactus.util.ints import uint64
log = logging.getLogger(__name__)
def final_eos_is_already_included(
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
sub_slot_iters: uint64,
) -> bool:
"""
Args:
header_block: An overflow block, with potentially missing information about the new sub slot
blocks: all blocks that have been included before header_block
sub_slot_iters: sub_slot_iters at the header_block
Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub
slot was not included yet, and therefore it is the responsibility of this block to include it
"""
if len(header_block.finished_sub_slots) > 0:
# We already have an included empty sub slot, which means the prev block is 2 sub slots behind.
return False
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
# We also check if curr is close to header_block, which means it's in the same sub slot
seen_overflow_block = curr.overflow and (header_block.total_iters - curr.total_iters < sub_slot_iters // 2)
while not curr.first_in_sub_slot and not curr.height == 0:
if curr.overflow and header_block.total_iters - curr.total_iters < sub_slot_iters // 2:
seen_overflow_block = True
curr = blocks.block_record(curr.prev_hash)
if curr.first_in_sub_slot and seen_overflow_block:
# We have seen another overflow block in this slot (same as header_block), therefore there are no
# missing sub slots
return True
# We have not seen any overflow blocks, therefore header_block will have to include the missing sub slot in
# the future
return False
def get_block_challenge(
constants: ConsensusConstants,
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
genesis_block: bool,
overflow: bool,
skip_overflow_last_ss_validation: bool,
):
if len(header_block.finished_sub_slots) > 0:
if overflow:
# New sub-slot with overflow block
if skip_overflow_last_ss_validation:
# In this case, we are missing the final sub-slot bundle (it's not finished yet), however
# There is a whole empty slot before this block is infused
challenge: bytes32 = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
challenge = header_block.finished_sub_slots[
-1
].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
else:
# No overflow, new slot with a new challenge
challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
if genesis_block:
challenge = constants.GENESIS_CHALLENGE
else:
if overflow:
if skip_overflow_last_ss_validation:
# Overflow infusion without the new slot, so get the last challenge
challenges_to_look_for = 1
else:
# Overflow infusion, so get the second to last challenge. skip_overflow_last_ss_validation is False,
# Which means no sub slots are omitted
challenges_to_look_for = 2
else:
challenges_to_look_for = 1
reversed_challenge_hashes: List[bytes32] = []
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
while len(reversed_challenge_hashes) < challenges_to_look_for:
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
reversed_challenge_hashes += reversed(curr.finished_challenge_slot_hashes)
if curr.height == 0:
assert curr.finished_challenge_slot_hashes is not None
assert len(curr.finished_challenge_slot_hashes) > 0
break
curr = blocks.block_record(curr.prev_hash)
challenge = reversed_challenge_hashes[challenges_to_look_for - 1]
return challenge
| cactus/consensus/get_block_challenge.py | 4,794 | Args:
header_block: An overflow block, with potentially missing information about the new sub slot
blocks: all blocks that have been included before header_block
sub_slot_iters: sub_slot_iters at the header_block
Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub
slot was not included yet, and therefore it is the responsibility of this block to include it
We already have an included empty sub slot, which means the prev block is 2 sub slots behind. We also check if curr is close to header_block, which means it's in the same sub slot We have seen another overflow block in this slot (same as header_block), therefore there are no missing sub slots We have not seen any overflow blocks, therefore header_block will have to include the missing sub slot in the future New sub-slot with overflow block In this case, we are missing the final sub-slot bundle (it's not finished yet), however There is a whole empty slot before this block is infused No overflow, new slot with a new challenge Overflow infusion without the new slot, so get the last challenge Overflow infusion, so get the second to last challenge. skip_overflow_last_ss_validation is False, Which means no sub slots are omitted | 1,261 | en | 0.943783 |
# -*- coding: utf-8 -*-
from functools import lru_cache
import requests
from requests.packages.urllib3.util.retry import Retry
# https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#module-urllib3.util.retry
DEFAULT_RETRIES = 5
DEFAULT_BACKOFF_FACTOR = 0.1
DEFAULT_STATUS_FORCELIST = [500, 502, 503, 504]
@lru_cache(maxsize=None)
def get_session(name, concurrency=50):
session = requests.Session()
retry = Retry(
total=DEFAULT_RETRIES,
backoff_factor=DEFAULT_BACKOFF_FACTOR,
status_forcelist=DEFAULT_STATUS_FORCELIST,
)
# Default HTTPAdapter uses 10 connections. Mount custom adapter to increase
# that limit. Connections are established as needed, so using a large value
# should not negatively impact performance.
http_adapter = requests.adapters.HTTPAdapter(
pool_connections=concurrency, pool_maxsize=concurrency, max_retries=retry
)
session.mount("https://", http_adapter)
session.mount("http://", http_adapter)
return session
| mozci/util/req.py | 1,031 | -*- coding: utf-8 -*- https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.htmlmodule-urllib3.util.retry Default HTTPAdapter uses 10 connections. Mount custom adapter to increase that limit. Connections are established as needed, so using a large value should not negatively impact performance. | 305 | en | 0.754774 |
from werkzeug.wrappers import Request
from flask import Flask, redirect, url_for, request, flash
from flask_sqlalchemy import SQLAlchemy
import os
import requests
import random
from contact_form import ContactForm
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.contrib.gitlab import make_gitlab_blueprint, gitlab
from discord_webhook import DiscordWebhook
import flask
from os import path
from flask_dance.consumer import oauth_authorized
app = Flask(__name__, template_folder="templates", static_folder='static')
# Various environmental variables
app.secret_key = os.environ.get("FLASK_SECRET")
discord_url = os.environ.get("WEBHOOK")
FLASK_HOST = os.environ.get("FLASK_HOST")
app.config["GITHUB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITHUB_CLIENT_ID")
app.config["GITHUB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITHUB_SECRET")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
# Github blueprint
github_bp = make_github_blueprint()
github_bp.redirect_url = FLASK_HOST+"/docs"
app.register_blueprint(github_bp, url_prefix="/login")
app.config["GITLAB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITLAB_ID")
app.config["GITLAB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITLAB_SECRET")
gitlab_bp = make_gitlab_blueprint()
app.register_blueprint(gitlab_bp, url_prefix="/login")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
# Database model & connection
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db.sqlite"
db = SQLAlchemy(app)
git_token = os.environ.get("GITHUB_TOKEN")
print(git_token)
@oauth_authorized.connect
def redirect_to_docs(blueprint, token):
blueprint.token = token
user = []
git_hash = []
resp = github.get("/user")
user = User.query.filter_by(username=resp.json()['login']).first()
if not user:
user = User(username=resp.json()['login'],
github_hash=str(random.getrandbits(128)))
db.session.add(user)
db.session.commit()
DiscordWebhook(url=discord_url, content=f"New user: {resp.json()['login']}. Check out profile at https://github.com/{resp.json()['login']}").execute()
git_hash = user.github_hash
return redirect(f"/docs?username={resp.json()['login']}&token={git_hash}")
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
github_hash = db.Column(db.String(80), unique=True, nullable=True)
# gitlab_hash = db.Column(db.String(80), unique=True, nullable=True)
def __repr__(self):
return '<User %r>' % self.username
if path.exists("db.sqlite") == True:
print("Database exists")
else:
print("Creating database")
db.create_all()
# Routing and repository parsing
@app.route("/signup")
def signup():
resp = github.get("/user")
if not github.authorized:
return redirect(url_for("github.login"))
print(resp)
assert resp.ok
user = User.query.filter_by(username=resp.json()['login']).first()
username = resp.json()['login']
github_hash = user.github_hash
return redirect(f"/docs?username={username}&token={github_hash}")
def parseGithubRepos(repos):
parsedRepos = []
displayForks = request.args.get('forks')
for repo in repos:
parsedRepo = {
'name': repo['full_name'],
'description': repo['description'],
'issues': repo['open_issues'],
'owner': repo['owner']['login'],
'stars': repo['stargazers_count'],
'forks': repo['forks_count'],
'url': repo['html_url'],
'size': repo['size'],
'language': repo['language']
}
if parsedRepo['description'] == None:
parsedRepo['description'] = "No description provided"
if displayForks == 'hidden':
if repo['fork'] == False:
parsedRepos.append(parsedRepo)
else:
parsedRepos.append(parsedRepo)
# if repo['fork'] == False: parsedRepos.append(parsedRepo)
parsedRepos.sort(key=lambda repo: repo["stars"], reverse=True)
return parsedRepos
@app.route("/widget/<username>")
def thing(username):
token = request.args.get('token')
db.session.commit()
user = User.query.filter_by(username=username).first()
resp = {}
theme = request.args.get('theme')
if theme != 'dark': theme = 'light'
if user == None:
return "User not found"
else:
repos = []
if user.github_hash == token:
page = 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page=1", auth=("Uzay-G", git_token)).json()
while resp != []:
print(resp, "\n\n\n")
repos += parseGithubRepos(resp)
page += 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page={page}", auth=("Uzay-G", git_token)).json()
if type(resp) is dict:
return f'ERROR: {resp["message"]}'
return flask.render_template('widget.html', repos=repos, theme=theme)
else:
return "You do not have a valid api token"
@app.route("/")
def serveMain():
form = ContactForm()
return flask.render_template('index.html', form=form)
@app.route("/docs")
def docs():
form = ContactForm()
return flask.render_template('docs.html', username=request.args.get('username'), token=request.args.get("token"), hostname=FLASK_HOST, form=form)
@app.route("/contact", methods=['POST'])
def contact():
form = ContactForm()
if form.validate_on_submit():
flash('Your message was received')
DiscordWebhook(url=discord_url, content=f"Contact @hackathon: name: {form.name.data}, email: {form.email.data}, message: {form.message.data}").execute()
else:
flash('Your message was not transferred correctly.')
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
# @app.route("/signup_gitlab")
# def signup_gitlab():
# resp = gitlab.get("/user")
# if not gitlab.authorized:
# return redirect(url_for("gitlab.login"))
# print(resp)
# assert resp.ok
# user = User.query.filter_by(username=resp.json()['login']).first()
# username = resp.json()['login']
# gitlab_hash = user.gitlab_hash
# return redirect(f"/docs?username={username}&token={gitlab_hash}")
# def getGitlabRepoLanguage(repo):
# resp = requests.get(f"https://gitlab.com/api/v4/projects/{repo['id']}/languages").json()
# return next(iter(resp))
# def parseGitlabRepos(repos):
# parsedRepos = []
# for repo in repos:
# parsedRepo = {}
# parsedRepo['name'] = repo['name']
# if repo['description'] == None:
# parsedRepo['description'] = "No description provided"
# else:
# parsedRepo['description'] = repo['description']
# try:
# parsedRepo['issues'] = repo['open_issues_count']
# except:
# parsedRepo['issues'] = 0
# parsedRepo['owner'] = repo['namespace']['name']
# parsedRepo['stars'] = repo['star_count']
# parsedRepo['forks'] = repo['forks_count']
# parsedRepo['url'] = repo['web_url']
# try:
# parsedRepo['size'] = repo['statistics']['repository_size'],
# except:
# parsedRepo['size'] = None
# parsedRepo['language'] = getGitlabRepoLanguage(repo)
# parsedRepos.append(parsedRepo)
# return parsedRepos
| app.py | 7,828 | Various environmental variables Github blueprint Database model & connection gitlab_hash = db.Column(db.String(80), unique=True, nullable=True) Routing and repository parsing if repo['fork'] == False: parsedRepos.append(parsedRepo) @app.route("/signup_gitlab") def signup_gitlab(): resp = gitlab.get("/user") if not gitlab.authorized: return redirect(url_for("gitlab.login")) print(resp) assert resp.ok user = User.query.filter_by(username=resp.json()['login']).first() username = resp.json()['login'] gitlab_hash = user.gitlab_hash return redirect(f"/docs?username={username}&token={gitlab_hash}") def getGitlabRepoLanguage(repo): resp = requests.get(f"https://gitlab.com/api/v4/projects/{repo['id']}/languages").json() return next(iter(resp)) def parseGitlabRepos(repos): parsedRepos = [] for repo in repos: parsedRepo = {} parsedRepo['name'] = repo['name'] if repo['description'] == None: parsedRepo['description'] = "No description provided" else: parsedRepo['description'] = repo['description'] try: parsedRepo['issues'] = repo['open_issues_count'] except: parsedRepo['issues'] = 0 parsedRepo['owner'] = repo['namespace']['name'] parsedRepo['stars'] = repo['star_count'] parsedRepo['forks'] = repo['forks_count'] parsedRepo['url'] = repo['web_url'] try: parsedRepo['size'] = repo['statistics']['repository_size'], except: parsedRepo['size'] = None parsedRepo['language'] = getGitlabRepoLanguage(repo) parsedRepos.append(parsedRepo) return parsedRepos | 1,699 | en | 0.18758 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'secure_auth_rest.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| manage.py | 636 | Django's command-line utility for administrative tasks.
!/usr/bin/env python | 77 | en | 0.656913 |
ownerclass = 'AppDelegate'
ownerimport = 'AppDelegate.h'
# Init
result = Window(330, 110, "Tell me your name!")
nameLabel = Label(result, text="Name:")
nameLabel.width = 45
nameField = TextField(result, text="")
helloLabel = Label(result, text="")
button = Button(result, title="Say Hello", action=Action(owner, 'sayHello'))
button.width = 100
# Owner Assignments
owner.nameField = nameField
owner.helloLabel = helloLabel
# Layout
nameLabel.moveTo(Pack.UpperLeft)
nameField.moveNextTo(nameLabel, Pack.Right, Pack.Middle)
nameField.fill(Pack.Right)
helloLabel.moveNextTo(nameLabel, Pack.Below, Pack.Left)
helloLabel.fill(Pack.Right)
button.moveNextTo(helloLabel, Pack.Below, Pack.Right)
nameField.setAnchor(Pack.UpperLeft, growX=True)
helloLabel.setAnchor(Pack.UpperLeft, growX=True)
button.setAnchor(Pack.UpperRight)
| demos/localized/MainWindow.py | 820 | Init Owner Assignments Layout | 29 | en | 0.897204 |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterternary.marker.colorbar"
_path_str = "scatterternary.marker.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| packages/python/plotly/plotly/graph_objs/scatterternary/marker/colorbar/_tickfont.py | 8,543 | Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
class properties -------------------- color ----- family ------ size ---- Self properties description --------------------------- Validate arg ------------ Handle skip_invalid ------------------- Populate data dict with properties ---------------------------------- Process unknown kwargs ---------------------- Reset skip_invalid ------------------ | 4,490 | en | 0.569698 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.