text
stringlengths 3
1.05M
|
|---|
try:
import sys
# importing whole module
from tkinter import *
from tkinter.ttk import *
from time import strftime
import time
root = Tk()
root.title("A Digital Clock")
def clock():
Strr = strftime("%a/%H:%M:%S:%p ")
design.config(text = Strr)
design.after(100,clock)
design = Label(root, font = ('Arial',70, 'bold'),
background = 'Yellow',
foreground = 'Black'
)
design.grid(row = 0,column = 2)
design.pack(anchor = 'center')
clock()
root.mainloop()
except ModuleNotFoundError as e:
#print("\nModule Not Found: Run 'bash requerments.ssh'\n")
print(e)
except KeyboardInterrupt:
print("\n\nManually Killed The Task. GoodBye\n")
|
/*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-2016 Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#ifndef incl_HPHP_HTONLL_H_
#define incl_HPHP_HTONLL_H_
/*
* Tries to find a suitable implementation of htonll/ntohll if it doesn't
* already exist. This could go into portability.h, but seemed specific enough
* to be worth pulling out.
*/
#include <folly/portability/Sockets.h>
#if defined(__FreeBSD__)
# include <sys/endian.h>
#elif defined(__APPLE__)
# include <machine/endian.h>
# include <libkern/OSByteOrder.h>
#elif defined(_MSC_VER)
# include <stdlib.h>
#else
# include <byteswap.h>
#endif
#if __BYTE_ORDER == __LITTLE_ENDIAN
# define htolell(x) (x)
# define letohll(x) (x)
# if !defined(htonll) && !defined(ntohll)
# if defined(__FreeBSD__)
# define htonll(x) bswap64(x)
# define ntohll(x) bswap64(x)
# elif defined(__APPLE__)
# define htonll(x) OSSwapInt64(x)
# define ntohll(x) OSSwapInt64(x)
# elif defined(_MSC_VER)
# define htonll(x) _byteswap_uint64(x)
# define ntohll(x) _byteswap_uint64(x)
# else
# define htonll(x) bswap_64(x)
# define ntohll(x) bswap_64(x)
# endif
# endif
#else
# if defined(__FreeBSD__)
# define htolell(x) bswap64(x)
# define letohll(x) bswap64(x)
# elif defined(__APPLE__)
# define htolell(x) OSSwapInt64(x)
# define letohll(x) OSSwapInt64(x)
# elif defined(_MSC_VER)
# define htolell(x) _byteswap_uint64(x)
# define letohll(x) _byteswap_uint64(x)
# else
# define htolell(x) bswap_64(x)
# define letohll(x) bswap_64(x)
# endif
# if !defined(htonll) && !defined(ntohll)
# define htonll(x) (x)
# define ntohll(x) (x)
# endif
#endif
#endif
|
"""AWS Login utilities"""
import os
import logging
import boto3
from botocore.config import Config
logger = logging.getLogger(__name__)
def get_boto3_client(service: str, profile: str = None, region: str = "us-east-1") -> boto3.Session.client:
"""Get a boto3 client for a given service"""
logging.getLogger('botocore').setLevel(logging.CRITICAL)
session_data = {"region_name": region}
if profile:
session_data["profile_name"] = profile
session = boto3.Session(**session_data)
config = Config(connect_timeout=5, retries={"max_attempts": 10})
if os.environ.get('LOCALSTACK_ENDPOINT_URL'):
client = session.client(service, config=config, endpoint_url=os.environ.get('LOCALSTACK_ENDPOINT_URL'))
else:
client = session.client(service, config=config)
logger.debug(f"{client.meta.endpoint_url} in {client.meta.region_name}: boto3 client login successful")
return client
def get_boto3_resource(service: str, profile: str = None, region: str = "us-east-1") -> boto3.Session.resource:
"""Get a boto3 resource for a given service"""
logging.getLogger('botocore').setLevel(logging.CRITICAL)
session_data = {"region_name": region}
if profile:
session_data["profile_name"] = profile
session = boto3.Session(**session_data)
resource = session.resource(service)
return resource
def get_current_account_id(sts_client: boto3.Session.client) -> str:
"""Get the current account ID"""
response = sts_client.get_caller_identity()
current_account_id = response.get("Account")
return current_account_id
def get_available_regions(service: str):
"""AWS exposes their list of regions as an API. Gather the list."""
regions = boto3.session.Session().get_available_regions(service)
logger.debug("The service %s does not have available regions. Returning us-east-1 as default")
if not regions:
regions = ["us-east-1"]
return regions
def get_target_account_credentials(target_account_role_name: str, target_account_id: str,
role_session_name: str = "HotDogsAreSandwiches", profile: str = None):
"""
Get a boto3 client for a given AWS service
:param profile:
:param role_session_name: AssumeRole session name
:param target_account_role_name: The name of the target account role
:param target_account_id: The target account ID
:return:
"""
default_region = "us-east-1"
session_data = {"region_name": default_region}
if profile:
session_data["profile_name"] = profile
session = boto3.Session(**session_data)
config = Config(connect_timeout=5, retries={"max_attempts": 10})
sts_client = session.client('sts', config=config)
acct_b = sts_client.assume_role(
RoleArn=f"arn:aws:iam::{target_account_id}:role/{target_account_role_name}",
RoleSessionName=role_session_name
)
aws_access_key_id = acct_b['Credentials']['AccessKeyId']
aws_secret_access_key = acct_b['Credentials']['SecretAccessKey']
aws_session_token = acct_b['Credentials']['SessionToken']
return aws_access_key_id, aws_secret_access_key, aws_session_token
|
import React from "react"
import Modal from "react-modal"
import Helmet from "react-helmet"
import { SkipNavLink } from "@reach/skip-nav"
import MdClose from "react-icons/lib/md/close"
import { navigate, PageRenderer } from "gatsby"
import presets, { colors } from "../utils/presets"
import Banner from "../components/banner"
import Navigation from "../components/navigation"
import MobileNavigation from "../components/navigation-mobile"
import PageWithSidebar from "../components/page-with-sidebar"
import mousetrap from "mousetrap"
// Import Futura PT typeface
import "../fonts/Webfonts/futurapt_book_macroman/stylesheet.css"
import "../fonts/Webfonts/futurapt_bookitalic_macroman/stylesheet.css"
import "../fonts/Webfonts/futurapt_demi_macroman/stylesheet.css"
import "../fonts/Webfonts/futurapt_demiitalic_macroman/stylesheet.css"
// Other fonts
import "typeface-spectral"
let windowWidth
class DefaultLayout extends React.Component {
constructor() {
super()
this.handleCloseModal = this.handleCloseModal.bind(this)
}
handleCloseModal() {
navigate(this.props.modalBackgroundPath)
}
componentDidMount() {
Modal.setAppElement(`#___gatsby`)
if (this.props.isModal && window.innerWidth > 750) {
mousetrap.bind(`left`, this.props.modalPrevious)
mousetrap.bind(`right`, this.props.modalNext)
mousetrap.bind(`spacebar`, this.props.modalNext)
document.querySelector(`html`).style.overflowY = `hidden`
}
}
componentWillUnmount() {
if (this.props.isModal && window.innerWidth > 750) {
mousetrap.unbind(`left`)
mousetrap.unbind(`right`)
mousetrap.unbind(`spacebar`)
document.querySelector(`html`).style.overflowY = `auto`
}
}
render() {
const isHomepage = this.props.location.pathname === `/`
// SEE: template-docs-markdown for why this.props.isSidebarDisabled is here
const isSidebarDisabled =
this.props.isSidebarDisabled || !this.props.itemList
let isModal = false
if (!windowWidth && typeof window !== `undefined`) {
windowWidth = window.innerWidth
}
if (this.props.isModal && windowWidth > 750) {
isModal = true
}
if (isModal && window.innerWidth > 750) {
return (
<React.Fragment>
<PageRenderer
location={{ pathname: this.props.modalBackgroundPath }}
/>
<Modal
isOpen={true}
style={{
content: {
top: `inherit`,
left: `inherit`,
right: `inherit`,
bottom: `inherit`,
margin: `0 auto`,
width: `750px`,
background: `none`,
border: `none`,
padding: `40px 0`,
overflow: `visible`,
},
overlay: {
position: `absolute`,
top: 0,
left: 0,
right: 0,
bottom: `unset`,
minHeight: `100%`,
minWidth: `100%`,
zIndex: 10,
overflowY: `auto`,
backgroundColor: `rgba(255, 255, 255, 0.95)`,
},
}}
onRequestClose={() => navigate(this.props.modalBackgroundPath)}
contentLabel="Site Details Modal"
>
<div
css={{
backgroundColor: `#ffffff`,
borderRadius: presets.radius,
boxShadow: `0 0 90px -24px ${colors.gatsby}`,
position: `relative`,
}}
>
<button
onClick={this.handleCloseModal}
css={{
background: colors.ui.bright,
border: 0,
borderBottomLeftRadius: presets.radius,
borderTopRightRadius: presets.radius,
color: colors.gatsby,
cursor: `pointer`,
position: `absolute`,
left: `auto`,
right: 0,
height: 40,
width: 40,
"&:hover": {
background: colors.gatsby,
color: `#fff`,
},
}}
>
<MdClose />
</button>
{this.props.children}
{this.props.modalPreviousLink}
{this.props.modalNextLink}
</div>
</Modal>
</React.Fragment>
)
}
return (
<div className={isHomepage ? `is-homepage` : ``}>
<Helmet defaultTitle={`GatsbyJS`} titleTemplate={`%s | GatsbyJS`}>
<meta
name="viewport"
content="width=device-width, initial-scale=1, shrink-to-fit=no, viewport-fit=cover"
/>
<meta name="twitter:site" content="@gatsbyjs" />
<meta property="og:type" content="website" />
<meta property="og:site_name" content="GatsbyJS" />
<meta name="docsearch:version" content="2.0" />
<link
rel="canonical"
href={`https://gatsbyjs.org${this.props.location.pathname}`}
/>
<html lang="en" />
</Helmet>
<SkipNavLink css={styles.skipLink}>Skip to main content</SkipNavLink>
<Banner background={isHomepage ? `#402060` : false}>
These are the docs for v2.
{` `}
<a
href="https://v1.gatsbyjs.org/"
css={{
color: `#fff`,
}}
>
View the v1 docs
<span
css={{
display: `none`,
[presets.Mobile]: {
display: `inline`,
},
}}
>
{` `}
instead
</span>
</a>
.
</Banner>
<Navigation pathname={this.props.location.pathname} />
<div
className={`main-body`}
css={{
paddingTop: presets.bannerHeight,
[presets.Tablet]: {
margin: `0 auto`,
paddingTop: isHomepage
? presets.bannerHeight
: `calc(${presets.bannerHeight} + ${presets.headerHeight})`,
},
paddingLeft: `env(safe-area-inset-left)`,
paddingRight: `env(safe-area-inset-right)`,
}}
>
<PageWithSidebar
disable={isSidebarDisabled}
itemList={this.props.itemList}
location={this.props.location}
enableScrollSync={this.props.enableScrollSync}
renderContent={() => this.props.children}
/>
</div>
<MobileNavigation />
</div>
)
}
}
const styles = {
skipLink: {
border: `0`,
clip: `rect(0 0 0 0)`,
height: 1,
width: 1,
margin: -1,
padding: 0,
overflow: `hidden`,
position: `absolute`,
zIndex: 100,
fontSize: `0.85rem`,
":focus": {
padding: `0.9rem`,
position: `fixed`,
top: 10,
left: 10,
background: `white`,
textDecoration: `none`,
width: `auto`,
height: `auto`,
clip: `auto`,
},
},
}
export default DefaultLayout
|
'use strict';
Object.defineProperty(exports, '__esModule', { value: true });
var prefix = 'fas';
var iconName = 'border-right';
var width = 448;
var height = 512;
var ligatures = [];
var unicode = 'f852';
var svgPathData = 'M240 224h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm96 0h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm-192 0h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm96 192h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm96 0h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm-96-96h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm0-192h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm-96 288h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm96-384h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16V48a16 16 0 0 0-16-16zm96 0h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16V48a16 16 0 0 0-16-16zM48 224H16a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm0 192H16a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm0-96H16a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm0-192H16a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm0-96H16A16 16 0 0 0 0 48v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16V48a16 16 0 0 0-16-16zm96 0h-32a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16V48a16 16 0 0 0-16-16zm288 0h-32a16 16 0 0 0-16 16v416a16 16 0 0 0 16 16h32a16 16 0 0 0 16-16V48a16 16 0 0 0-16-16z';
exports.definition = {
prefix: prefix,
iconName: iconName,
icon: [
width,
height,
ligatures,
unicode,
svgPathData
]};
exports.faBorderRight = exports.definition;
exports.prefix = prefix;
exports.iconName = iconName;
exports.width = width;
exports.height = height;
exports.ligatures = ligatures;
exports.unicode = unicode;
exports.svgPathData = svgPathData;
|
import React, { useState } from 'react';
import { connect } from 'react-redux';
import styled from 'styled-components';
import { signup } from '../actions';
import { PageContainer,
LoginContainer,
LoginHeader,
FormContainer,
InputContainer, Button} from '../styled-components';
import Footer from "./Footer";
const SignUpContainer = styled(LoginContainer)`
display: flex;
flex-flow: row wrap;
align-items: center;
`
const SignUpForm = styled(FormContainer)`
display: flex;
flex-flow: row wrap;
align-items: center;
margin-right: 15px;
max-width: 600px;
/* button{
width: 30%;
height: auto;
}*/
`
const SignUpInputs = styled(InputContainer)`
margin-top: 20px;
display: flex;
flex-flow: column wrap;
input{
width: 300px;
}
`
const SignUp = props => {
const[newUser, setNewUser] = useState({username: '', password: '', email: ''});
const handleChange = e => {
e.preventDefault();
setNewUser({
...newUser,
[e.target.name]: e.target.value,
})
}
const signup = e => {
e.preventDefault();
if(!newUser.username || !newUser.password || !newUser.email){
return alert('Please fill out all the fields to sign up.')
} else {
props.signup(newUser)
.then(res => {
console.log(res);
})
}
}
return(
<PageContainer>
<SignUpContainer>
<LoginHeader>
<h1>Sign Up</h1>
<p>Signing up is optional, and allows you to save your exit strategies. We will never provide anyone else your information.</p>
</LoginHeader>
<SignUpForm>
<form onSubmit={signup}>
<SignUpInputs>
<label>Username</label>
<input
type="text"
placeholder="username"
required="fill this out!"
name="username"
value={newUser.username}
onChange={handleChange}
/>
<label>Password</label>
<input
type="password"
placeholder="password"
required="fill this out!"
name="password"
value={newUser.password}
onChange={handleChange}
/>
<label>Email</label>
<input
type="text"
placeholder="email"
required="fill this out!"
name="email"
value={newUser.email}
onChange={handleChange}
/>
</SignUpInputs>
</form>
<Button className="Login-button" onClick={signup}>Sign Up</Button>
</SignUpForm>
</SignUpContainer>
<Footer/>
</PageContainer>
)
};
const mapStateToProps = state => {
return{
error: state.error,
loggingIn: state.loggingIn
}
}
export default connect(
mapStateToProps,
{ signup }
)(SignUp);
|
var settings = {
username: 'me@there.com',
password: 'n0tMyP455w0rd',
accountName: 'me',
label: 'prod'
}
module.exports = settings;
|
#!/usr/bin/env python3
"""build_pot
Build a PO template (for i18n) and update the .po files to reflect
the last changes.
"""
import os.path
import sys
import glob
import tokenize
from distutils.core import Command
from utils.i18n import pygettext
# from pygettext.main():
class Options:
# constants
GNU = 1
SOLARIS = 2
# defaults
extractall = 0 # FIXME: currently this option has no effect at all.
keywords = []
writelocations = 1
locationstyle = GNU
verbose = 0
width = 78
excludefilename = ""
docstrings = 0
nodocstrings = {}
toexclude = []
class build_pot(Command):
description = "Generate a .po template file (.pot) from python source files"
user_options = [
("msgmerge=", None, "location of the msgmerge program"),
("extract-all", "a", ""),
("default-domain=", "d", ""),
("escape", "E", ""),
("docstrings", "D", ""),
("keyword=", "k", "Comma separated list of keywords"),
("no-default-keywords", "K", ""),
("add-location", "n", ""),
("no-location", None, ""),
("style=", "S", 'POT file style "gnu" or "solaris"'),
("output=", "o", ""),
("output-dir=", "p", ""),
("width=", "w", ""),
("exclude-file=", "x", ""),
("all-linguas=", None, ""),
# ('no-docstrings=', 'X', ''),
]
boolean_options = [
"extract-all",
"escape",
"docstrings",
"no-default-keywords",
"add-location",
"no-location",
"no-docstrings",
]
# constants
GNU = 1
SOLARIS = 2
def initialize_options(self):
self.podir = "po"
self.msgmerge = "msgmerge"
self.options = Options()
# defaults for variable parsing:
self.escape = 0
self.width = 78
self.extract_all = 0 # doesn't do anything yet
self.default_domain = None
self.keyword = None
self.no_default_keywords = 0
self.no_location = 0
self.style = None
self.output = None
self.output_dir = None
self.docstrings = 0
self.exclude_file = None
# self.no_docstrings = None
self.all_linguas = []
def finalize_options(self):
options = self.options
self.name = self.distribution.get_name()
# Build default options for the TokenEater
if self.default_domain:
self.output = self.default_domain + ".pot"
if self.keyword:
options.keywords.extend(self.keyword.split(","))
if self.no_default_keywords:
options.keywords = []
if self.no_location:
options.writelocations = 0
if self.style:
if self.style == "gnu":
options.locationstyle = self.GNU
elif self.style == "solaris":
options.locationstyle = self.SOLARIS
else:
raise SystemExit(f"Invalid value for --style: {self.style}")
if not self.output:
self.output = self.distribution.get_name() + ".pot"
if not self.output_dir:
self.output_dir = self.podir
if self.docstrings:
options.docstrings = 1
options.width = int(self.width)
if self.exclude_file:
try:
fp = open(self.exclude_file)
options.toexclude = fp.readlines()
fp.close()
except OSError:
raise SystemExit(f"Can't read --exclude-file: {self.exclude_file}")
# skip: self.no_docstrings
if self.all_linguas:
self.all_linguas = self.all_linguas.split(",")
# calculate escapes
pygettext.make_escapes(self.escape)
# calculate all keywords
options.keywords.append("_")
if self.output_dir:
self.output = os.path.join(self.output_dir, self.output)
self.packages = self.distribution.packages
# self.all_linguas = self.distribution.get_all_linguas()
# self.all_linguas = self.distribution.options['po']['all_linguas']
def run(self):
create_pot_file(self.packages, self.output, self.options, verbose=self.verbose)
merge_files(
self.all_linguas,
self.msgmerge,
self.output,
self.output_dir,
verbose=self.verbose,
)
def create_pot_file(packages, pot_file, options=Options(), verbose=False):
"""
Create a new .pot file. This is basically a rework of the
main function of pygettext.
"""
source_files = []
for p in packages:
pathlist = p.split(".")
path = os.path.join(*pathlist)
source_files.extend(glob.glob(os.path.join(path, "*.py")))
# slurp through all the files
eater = pygettext.TokenEater(options)
for filename in source_files:
if verbose:
print(f"Working on {filename}")
fp = open(filename, "rb")
try:
eater.set_filename(filename)
try:
tokens = tokenize.tokenize(fp.readline)
for _token in tokens:
eater(*_token)
except tokenize.TokenError as e:
print(f"{e[0]}: {filename}, line {e[1][0]:d}, column {e[1][1]:d}")
finally:
fp.close()
if pot_file == "-":
fp = sys.stdout
else:
fp = open(pot_file, "w")
try:
eater.write(fp)
finally:
if fp is not sys.stdout:
fp.close()
def merge_files(all_linguas, msgmerge, pot_file, output_dir, verbose=False):
if not all_linguas:
return
for lingua in all_linguas:
d = {
"msgmerge": msgmerge,
"po": os.path.join(output_dir, lingua + ".po"),
"pot": pot_file,
}
if verbose:
print(f"Merging {d['pot']} and {d['po']} ")
res = os.system(f"{d['msgmerge']} {d['po']} {d['pot']} -o {d['po']}")
if res:
raise SystemExit("error while running msgmerge.")
if __name__ == "__main__":
from setuptools import find_packages
from utils.i18n import LINGUAS
packages = find_packages(exclude=["utils*", "docs", "tests"])
output_dir = "po"
pot_file = os.path.join(output_dir, "gaphor.pot")
create_pot_file(packages, pot_file, verbose=True)
merge_files(LINGUAS, "msgmerge", pot_file, output_dir=output_dir, verbose=True)
|
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import tensorflow.contrib.eager as tfe
# If use eager, should call this first
tf.enable_eager_execution()
# A toy dataset of points around 3*x+2
NUM_EXAMPLES = 1000
training_inputs = tf.random_normal([NUM_EXAMPLES])
noise = tf.random_normal([NUM_EXAMPLES])
training_outputs = training_inputs * 3 + 2 + noise
def prediction(input, weight, bias):
return input * weight + bias
# A loss function using mean-squared error
def loss(weights, biases):
error = prediction(training_inputs, weights, biases) - training_outputs
return tf.reduce_mean(tf.square(error))
# Return the derivative of loss with respect tot weight and bias
def grad(weights, biases):
with tf.GradientTape() as tape:
loss_value = loss(weights, biases)
return tape.gradient(loss_value, [weights, biases])
train_steps = 200
learning_rate = 0.01
# Start with arbitrary values for M and B on the same batch of data
W = tfe.Variable(5.)
B = tfe.Variable(10.)
print("Initial loss: {:.3f}".format(loss(W, B)))
for i in range(train_steps):
dW, dB = grad(W, B)
W.assign_sub(dW * learning_rate)
B.assign_sub(dB * learning_rate)
if i % 20 == 0:
print("Loss at step {:03d}: {:.3f}".format(i, loss(W, B)))
print("Final loss: {:.3f}".format(loss(W, B)))
print("W = {:.3f}, B = {:.3f}".format(W.numpy(), B.numpy()))
|
import MiniApp, {
CustomPermission,
CustomPermissionResult,
} from 'js-miniapp-sdk';
import {
REQUEST_PERMISSIONS_SUCCESS,
REQUEST_PERMISSIONS_FAILURE,
} from './types';
type PermissionsSuccessAction = {
type: String,
permissions: CustomPermissionResult[],
};
const requestCustomPermissions = (
requestedPermssions: CustomPermission[]
): Function => {
return (dispatch) => {
return MiniApp.requestCustomPermissions(requestedPermssions)
.then((permissions) => {
dispatch({
type: REQUEST_PERMISSIONS_SUCCESS,
permissions,
});
return permissions;
})
.catch((_) => {
dispatch({
type: REQUEST_PERMISSIONS_FAILURE,
});
});
};
};
export { requestCustomPermissions };
export type { PermissionsSuccessAction };
|
import numpy as np
from dipy.data import get_gtab_taiwan_dsi
from numpy.testing import (assert_almost_equal,
assert_equal,
run_module_suite)
from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix, mapmri_EAP
from dipy.sims.voxel import (MultiTensor, all_tensor_evecs, multi_tensor_pdf)
from scipy.special import gamma
from scipy.misc import factorial
from dipy.data import get_sphere
def int_func(n):
f = np.sqrt(2) * factorial(n) / float(((gamma(1 + n / 2.0))
* np.sqrt(2**(n + 1) * factorial(n))))
return f
def test_mapmri_metrics():
gtab = get_gtab_taiwan_dsi()
mevals = np.array(([0.0015, 0.0003, 0.0003],
[0.0015, 0.0003, 0.0003]))
angl = [(0, 0), (60, 0)]
S, sticks = MultiTensor(gtab, mevals, S0=100.0, angles=angl,
fractions=[50, 50], snr=None)
# since we are testing without noise we can use higher order and lower
# lambdas, with respect to the default.
radial_order = 6
lambd = 1e-8
# test mapmri_indices
indices = mapmri_index_matrix(radial_order)
n_c = indices.shape[0]
F = radial_order / 2
n_gt = np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))
assert_equal(n_c, n_gt)
# test MAPMRI fitting
mapm = MapmriModel(gtab, radial_order=radial_order, lambd=lambd)
mapfit = mapm.fit(S)
c_map = mapfit.mapmri_coeff
R = mapfit.mapmri_R
mu = mapfit.mapmri_mu
S_reconst = mapfit.predict(gtab, 1.0)
# test the signal reconstruction
S = S / S[0]
nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum())
assert_almost_equal(nmse_signal, 0.0, 3)
# test if the analytical integral of the pdf is equal to one
integral = 0
for i in range(indices.shape[0]):
n1, n2, n3 = indices[i]
integral += c_map[i] * int_func(n1) * int_func(n2) * int_func(n3)
assert_almost_equal(integral, 1.0, 3)
# compare the shore pdf with the ground truth multi_tensor pdf
sphere = get_sphere('symmetric724')
v = sphere.vertices
radius = 10e-3
r_points = v * radius
pdf_mt = multi_tensor_pdf(r_points, mevals=mevals,
angles=angl, fractions=[50, 50])
pdf_map = mapmri_EAP(r_points, radial_order, c_map, mu, R)
nmse_pdf = np.sqrt(np.sum((pdf_mt - pdf_map) ** 2)) / (pdf_mt.sum())
assert_almost_equal(nmse_pdf, 0.0, 2)
# test MAPMRI metrics
tau = 1 / (4 * np.pi ** 2)
angl = [(0, 0), (0, 0)]
S, sticks = MultiTensor(gtab, mevals, S0=100.0, angles=angl,
fractions=[50, 50], snr=None)
mapm = MapmriModel(gtab, radial_order=radial_order, lambd=lambd)
mapfit = mapm.fit(S)
# RTOP
gt_rtop = 1.0 / np.sqrt((4 * np.pi * tau)**3 *
mevals[0, 0] * mevals[0, 1] * mevals[0, 2])
rtop = mapfit.rtop()
assert_almost_equal(rtop, gt_rtop, 4)
# RTAP
gt_rtap = 1.0 / np.sqrt((4 * np.pi * tau)**2 * mevals[0, 1] * mevals[0, 2])
rtap = mapfit.rtap()
assert_almost_equal(rtap, gt_rtap, 4)
# RTPP
gt_rtpp = 1.0 / np.sqrt((4 * np.pi * tau) * mevals[0, 0])
rtpp = mapfit.rtpp()
assert_almost_equal(rtpp, gt_rtpp, 4)
if __name__ == '__main__':
run_module_suite()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import subprocess
DEFAULT_DATASET = 'infovis'
DATASETS = [ DEFAULT_DATASET, '20newsgroups', 'nsfgrants', 'nsf25k', 'nsf10k', 'nsf1k', 'poliblogs', 'fomc', 'CR_financial_collapse', 'CR_stock_market_plunge', 'FCIC_final_report', 'FCIC_first_hearing', 'FR_federal_open_market_committee', 'FR_monetary_policy_hearings' ]
DEFAULT_MODEL = 'mallet'
MODELS = [ DEFAULT_MODEL, 'treetm', 'stmt', 'stm', 'gensim' ]
def Shell(command):
p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
while p.poll() is None:
line = p.stdout.readline().rstrip('\n')
if len(line) > 0:
print line
def Demonstrate(dataset, model, is_quiet, force_overwrite):
database_folder = 'data/demo/{}/corpus'.format(dataset)
corpus_folder = 'data/demo/{}/corpus'.format(dataset)
model_folder = 'data/demo/{}/model-{}'.format(dataset, model)
app_name = '{}_{}'.format(dataset, model)
def PrepareDataset():
executable = 'bin/fetch_dataset.sh'
Shell([executable, dataset])
def PrepareModel():
executable = 'bin/setup_{}.sh'.format(model)
command = [executable]
Shell(command)
def PrepareOthers():
executable = 'bin/setup_mallet.sh'
command = [executable]
Shell(command)
executable = 'bin/setup_corenlp.sh'
command = [executable]
Shell(command)
def TrainModel():
executable = 'bin/train_{}.py'.format(model)
command = [executable, corpus_folder, model_folder]
if is_quiet:
command.append('--quiet')
if force_overwrite:
command.append('--overwrite')
Shell(command)
def ImportModel():
executable = 'bin/read_{}.py'.format(model)
command = [executable, app_name, model_folder, corpus_folder, database_folder]
if is_quiet:
command.append('--quiet')
if force_overwrite:
command.append('--overwrite')
Shell(command)
print '--------------------------------------------------------------------------------'
print 'Build a topic model ({}) using a demo dataset ({})'.format(model, dataset)
print ' database = {}'.format(database_folder)
print ' corpus = {}'.format(corpus_folder)
print ' model = {}'.format(model_folder)
print ' app = {}'.format(app_name)
print '--------------------------------------------------------------------------------'
PrepareDataset()
PrepareModel()
PrepareOthers()
TrainModel()
ImportModel()
def main():
parser = argparse.ArgumentParser( description = 'Import a MALLET topic model as a web2py application.' )
parser.add_argument( 'dataset' , nargs = '?', type = str, default = DEFAULT_DATASET, choices = DATASETS, help = 'Dataset identifier' )
parser.add_argument( 'model' , nargs = '?', type = str, default = DEFAULT_MODEL , choices = MODELS , help = 'Model type' )
parser.add_argument( '--quiet' , const = True, default = False, action = 'store_const', help = 'Show fewer debugging messages' )
parser.add_argument( '--overwrite' , const = True, default = False, action = 'store_const', help = 'Overwrite any existing model' )
args = parser.parse_args()
Demonstrate( args.dataset, args.model, args.quiet, args.overwrite )
if __name__ == '__main__':
main()
|
/**
* @licstart The following is the entire license notice for the
* Javascript code in this page
*
* Copyright 2018 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @licend The above is the entire license notice for the
* Javascript code in this page
*/
'use strict';
var pdfjsVersion = '2.2.2';
var pdfjsBuild = '14c012b6';
var pdfjsCoreWorker = require('./core/worker.js');
exports.WorkerMessageHandler = pdfjsCoreWorker.WorkerMessageHandler;
|
from django.shortcuts import render, redirect
from django.views import View
from django.http import request, HttpResponseRedirect
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.template.response import TemplateResponse
from django.urls import reverse_lazy
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
from .models import Book, Author, Tag
from .forms import SignUpForm
# Create your views here.
class StartView(View):
def get(self, requset):
return TemplateResponse(requset, 'index.html')
# panel
class PanelView(LoginRequiredMixin, View):
def get(self, request):
return render(request, 'panel.html')
# books
class BookListView(ListView):
model = Book
template_name = 'book_list.html'
class BookDetailView(DetailView):
model = Book
template_name = 'book_detail.html'
class BookUpdateView(UpdateView):
model = Book
template_name = 'form.html'
fields = '__all__'
success_url = reverse_lazy('book-list')
class BookDeleteView(DeleteView):
model = Book
template_name = 'confirm_delete.html'
success_url = reverse_lazy('book-list')
class BookCreateView(CreateView):
model = Book
template_name = 'book_form.html'
fields = '__all__'
success_url = reverse_lazy('book-list')
# authors
class AuthorListView(ListView):
model = Author
template_name = 'author_list.html'
class AuthorDetailView(DetailView):
model = Author
template_name = 'author_detail.html'
class AuthorUpdateView(UpdateView):
model = Author
template_name = 'form.html'
fields = '__all__'
success_url = reverse_lazy('author-list')
class AuthorDeleteView(DeleteView):
model = Author
template_name = 'author_confirm_delete.html'
success_url = reverse_lazy('author-list')
class AuthorCreateView(CreateView):
model = Author
template_name = 'author_form.html'
fields = '__all__'
success_url = reverse_lazy('author-list')
# tags
class TagListView(ListView):
model = Tag
template_name = 'tag_list.html'
class TagUpdateView(UpdateView):
model = Tag
template_name = 'form.html'
fields = '__all__'
success_url = reverse_lazy('tag-list')
class TagDeleteView(DeleteView):
model = Tag
template_name = 'confirm_delete.html'
success_url = reverse_lazy('tag-list')
class TagCreateView(CreateView):
model = Tag
template_name = 'tag_form.html'
fields = '__all__'
success_url = reverse_lazy('tag-list')
# account
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect('panel')
else:
form = SignUpForm()
return render(request, 'signup.html', {'form': form})
|
'use strict';
const fs = require("fs/promises");
const path = require("path");
const os = require("os");
const files = require('../../lib/utils/files');
describe("files.list",function(){
before(async function(){
this.dir = await fs.mkdtemp(path.join(os.tmpdir(), "node-thumbnailer-list-"));
this.file = path.join(this.dir, "wallpaper.svg");
this.thumbpath = path.join(this.dir, "thumbnails/normal");
this.thumb = path.join(this.thumbpath, files.getHash(this.file));
this.badThumb = path.join(this.thumbpath, files.getHash(path.join(this.dir, "logo.png")));
await fs.mkdir(this.thumbpath, {recursive: true});
await fs.copyFile(__dirname+"/../fixtures/sources/wallpaper.svg", this.file);
await fs.copyFile(__dirname+"/../fixtures/thumbnails/normal/wallpaper.png", this.thumb);
await fs.copyFile(__dirname+"/../fixtures/thumbnails/normal/logo.png", this.badThumb);
await fs.utimes(this.file,1418500379,1418500379); //set mtime to Sat Dec 13 2014 20:52:59 GMT+0100 (CET)
});
after(async function(){
await fs.rm(this.dir, {recursive: true});
});
it.skip("find invalid thumbnails",async function(){
let bad_files = await expect(files.list(this.thumbpath)).to.be.fulfilled;
expect(bad_files).to.deep.equal([this.badThumb]);
});
it("skip invalid dirs", async function(){
let bad_files = await expect(files.list(path.join(this.dir, "thumbnails/foo"))).to.be.fulfilled;
expect(bad_files).to.deep.equal([]);
});
});
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch script for pre-training representations."""
import os.path as osp
from absl import app
from absl import flags
from absl import logging
from base_configs import validate_config
from ml_collections import config_flags
import torch
from torchkit import CheckpointManager
from torchkit import experiment
from torchkit import Logger
from torchkit.utils.py_utils import Stopwatch
from utils import setup_experiment
from xirl import common
# pylint: disable=logging-fstring-interpolation
FLAGS = flags.FLAGS
flags.DEFINE_string("experiment_name", None, "Experiment name.")
flags.DEFINE_boolean("resume", False, "Whether to resume training.")
flags.DEFINE_string("device", "cuda:0", "The compute device.")
flags.DEFINE_boolean("raw_imagenet", False, "")
config_flags.DEFINE_config_file(
"config",
"base_configs/pretrain.py",
"File path to the training hyperparameter configuration.",
)
@experiment.pdb_fallback
def main(_):
# Make sure we have a valid config that inherits all the keys defined in the
# base config.
validate_config(FLAGS.config, mode="pretrain")
config = FLAGS.config
exp_dir = osp.join(config.root_dir, FLAGS.experiment_name)
setup_experiment(exp_dir, config, FLAGS.resume)
# No need to do any pretraining if we're loading the raw pretrained
# ImageNet baseline.
if FLAGS.raw_imagenet:
return
# Setup compute device.
if torch.cuda.is_available():
device = torch.device(FLAGS.device)
else:
logging.info("No GPU device found. Falling back to CPU.")
device = torch.device("cpu")
logging.info("Using device: %s", device)
# Set RNG seeds.
if config.seed is not None:
logging.info("Pretraining experiment seed: %d", config.seed)
experiment.seed_rngs(config.seed)
experiment.set_cudnn(config.cudnn_deterministic, config.cudnn_benchmark)
else:
logging.info("No RNG seed has been set for this pretraining experiment.")
logger = Logger(osp.join(exp_dir, "tb"), FLAGS.resume)
# Load factories.
(
model,
optimizer,
pretrain_loaders,
downstream_loaders,
trainer,
eval_manager,
) = common.get_factories(config, device)
# Create checkpoint manager.
checkpoint_dir = osp.join(exp_dir, "checkpoints")
checkpoint_manager = CheckpointManager(
checkpoint_dir,
model=model,
optimizer=optimizer,
)
global_step = checkpoint_manager.restore_or_initialize()
total_batches = max(1, len(pretrain_loaders["train"]))
epoch = int(global_step / total_batches)
complete = False
stopwatch = Stopwatch()
try:
while not complete:
for batch in pretrain_loaders["train"]:
train_loss = trainer.train_one_iter(batch)
if not global_step % config.logging_frequency:
for k, v in train_loss.items():
logger.log_scalar(v, global_step, k, "pretrain")
logger.flush()
if not global_step % config.eval.eval_frequency:
# Evaluate the model on the pretraining validation dataset.
valid_loss = trainer.eval_num_iters(
pretrain_loaders["valid"],
config.eval.val_iters,
)
for k, v in valid_loss.items():
logger.log_scalar(v, global_step, k, "pretrain")
# Evaluate the model on the downstream datasets.
for split, downstream_loader in downstream_loaders.items():
eval_to_metric = eval_manager.evaluate(
model,
downstream_loader,
device,
config.eval.val_iters,
)
for eval_name, eval_out in eval_to_metric.items():
eval_out.log(
logger,
global_step,
eval_name,
f"downstream/{split}",
)
# Save model checkpoint.
if not global_step % config.checkpointing_frequency:
checkpoint_manager.save(global_step)
# Exit if complete.
global_step += 1
if global_step > config.optim.train_max_iters:
complete = True
break
time_per_iter = stopwatch.elapsed()
logging.info(
"Iter[{}/{}] (Epoch {}), {:.6f}s/iter, Loss: {:.3f}".format(
global_step,
config.optim.train_max_iters,
epoch,
time_per_iter,
train_loss["train/total_loss"].item(),
))
stopwatch.reset()
epoch += 1
except KeyboardInterrupt:
logging.info("Caught keyboard interrupt. Saving model before quitting.")
finally:
checkpoint_manager.save(global_step)
logger.close()
if __name__ == "__main__":
flags.mark_flag_as_required("experiment_name")
app.run(main)
|
module.exports={title:"Seagate",slug:"seagate",svg:'<svg role="img" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><title>Seagate</title><path d="M11.2279 22.6842c-.2642-1.0048-.5233-1.9919-.7764-2.9548.2734-.1324.4986-.2368.7965-.384 1.239-.528 2.4364-1.1329 3.5508-1.8929 1.3946-.9512 2.6975-2.0144 3.8322-3.2673.8904-.983 1.6324-2.0681 2.0558-3.3424.282-.8479.4202-1.7152.2962-2.6088-.103-.7409-.375-1.4175-.7962-2.0369-.5082-.747-1.1691-1.3149-1.9737-1.7209-1.0067-.5084-2.0802-.696-3.1976-.6628-.9912.0294-1.9456.2473-2.8748.5868C10.431 5.0247 8.863 5.9178 7.358 6.928c-.4391.2949-.8425.6364-1.1916 1.0376-.1904.2188-.3368.4606-.3985.7491-.1038.4872.1755.858.6724.8816.3477.0164.6577-.1181.9617-.262.9456-.4467 1.7992-1.049 2.6828-1.6.7256-.4527 1.458-.8945 2.255-1.2117.786-.313 1.5975-.4847 2.447-.3423.9257.1549 1.6704.5913 2.1225 1.4413.3168.5956.3421 1.2269.1754 1.871-.1903.733-.6147 1.3292-1.1168 1.8759-.6926.7545-1.49 1.3817-2.372 1.899-.924.5421-1.8403 1.1006-2.788 1.5986-1.1086.5827-2.2897.9958-3.4927 1.3446-1.0106.2934-2.0378.4258-3.0865.3328-.8788-.078-1.7068-.3332-2.4364-.8414-.659-.4593-1.1454-1.068-1.44-1.8192-.455-1.1608-.4317-2.3436-.1437-3.5352.3256-1.3464 1.008-2.5097 1.8502-3.5909.6395-.8209 1.3767-1.5472 2.1709-2.2152 1.1242-.9458 2.317-1.7969 3.5952-2.524.4327-.246.8736-.4777 1.285-.702l.4213.8667c-.3808.2048-.757.4008-1.1275.6072-1.5956.8886-3.0574 1.96-4.3596 3.2419-.707.6956-1.3047 1.4804-1.7574 2.3664-.4741.9285-.7285 1.9104-.6164 2.9584.1232 1.147.661 2.0288 1.7175 2.5579.6303.3158 1.303.4098 1.997.3947.9564-.0205 1.858-.2923 2.7487-.6108 1.3273-.475 2.5625-1.1374 3.7648-1.8638.7058-.4264 1.4094-.8594 2.0064-1.4363.315-.3047.6302-.6145.7914-1.0384.2862-.7533-.121-1.4058-.9281-1.4824-.4392-.0415-.8377.1044-1.2262.2842-1.026.4747-1.9486 1.125-2.9045 1.719-.7306.454-1.4693.8943-2.2732 1.2087-.5567.2175-1.1321.3535-1.7363.2843-1.0364-.1187-1.6846-.937-1.5719-1.9753.0886-.8158.4854-1.4814 1.0219-2.075.5934-.6566 1.2856-1.1949 2.0287-1.6697 1.141-.7293 2.299-1.428 3.5382-1.9832 1.4362-.6438 2.9097-1.1544 4.4904-1.2936 1.1439-.1006 2.2752-.0366 3.3912.2533.9863.2563 1.896.6732 2.7145 1.28.8112.6015 1.4645 1.347 1.959 2.2285.4462.7956.7005 1.6501.7756 2.5585.079.9561-.0343 1.8948-.3106 2.8077-.2695.89-.6313 1.7449-1.1264 2.5378-.6903 1.1051-1.5035 2.1103-2.4493 3.0074-.9636.9142-1.937 1.8156-3.034 2.572-1.4267.9841-2.9131 1.8594-4.5207 2.5189-.429.176-.8567.3563-1.3065.5436"/></svg>',get path(){return this.svg.match(/<path\s+d="([^"]*)/)[1]},source:"https://branding.seagate.com/productpage/3fc51aba-c35a-4eff-a833-a258b0440bd2",hex:"6EBE49",guidelines:void 0,license:void 0};
|
"use strict";
const fs = require("fs");
const promisify = require("util.promisify");
const readFile = promisify(fs.readFile);
const writeFile = promisify(fs.writeFile);
const plugins = [
[
"imagemin-gifsicle",
{
interlaced: true
}
],
[
"imagemin-jpegtran",
{
progressive: true
}
],
[
"imagemin-optipng",
{
optimizationLevel: 5
}
],
[
"imagemin-svgo",
{
plugins: [
{
removeViewBox: false
},
{
inlineStyles: {
onlyMatchedOnce: false
}
},
{
cleanupIDs: false
}
]
}
]
].map(([name, opts]) => require(name)(opts));
const minifyFile = (exports.minifyFile = filename =>
[...plugins, it => writeFile(filename, it)].reduce(
(acc, it) => acc.then(it),
readFile(filename)
));
|
const colors = require('colors/safe');
const faqSchema = require('../../api/v1/faq/index.model');
const { purify } = require('../../helpers/sanitize');
const { faqDB } = require('../../db');
const { defaultContent: content } = require('../../helpers/variables/faq');
const {
faqsDeletedMessage,
faqsSeededMessage,
} = require('../../helpers/variables/tasks');
const {
RETURNS_PL,
DELIVERY_PL,
PAYMENT_PL,
SERVICE_PL,
PRODUCTS_PL,
DISCOUNTS_PL,
OTHERS_PL,
} = require('../../helpers/variables/constants/faq');
const faqCategories = [
RETURNS_PL,
DELIVERY_PL,
PAYMENT_PL,
SERVICE_PL,
PRODUCTS_PL,
DISCOUNTS_PL,
OTHERS_PL,
];
const createFAQs = () => {
const faqs = [];
for (let i = 0; i < 100; i += 1) {
faqs.push({
category: faqCategories[i % faqCategories.length],
title: `Tytuł numer ${i + 1}`,
content,
});
}
return faqs;
};
const removeFAQs = async () => {
try {
await faqDB.remove();
// eslint-disable-next-line no-console
console.log(colors.green(faqsDeletedMessage));
} catch (error) {
// eslint-disable-next-line no-console
console.error(colors.red(error));
process.exit(0);
}
};
const seedExampleFAQs = async () => {
const faqs = [];
createFAQs().forEach((faq) => {
const { schemaError, data } = faqSchema(faq);
if (schemaError) {
// eslint-disable-next-line no-console
console.error(colors.red(schemaError.details[0].message));
process.exit(0);
}
data.purify_content = purify(data.content);
faqs.push({
...data,
created_at: new Date(),
updated_at: new Date(),
deleted_at: null,
});
});
try {
await faqDB.remove();
await faqDB.insert(faqs);
// eslint-disable-next-line no-console
console.log(colors.green(faqsSeededMessage));
} catch (error) {
// eslint-disable-next-line no-console
console.error(colors.red(error));
process.exit(0);
}
};
module.exports = {
removeFAQs,
seedExampleFAQs,
};
|
from typing import Dict, List, NewType
from backend.common.consts.api_version import ApiMajorVersion
from backend.common.models.district import District
from backend.common.queries.dict_converters.converter_base import ConverterBase
DistrictDict = NewType("DistrictDict", Dict)
class DistrictConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
ApiMajorVersion.API_V3: 2,
}
@classmethod
def _convert_list(
cls, model_list: List[District], version: ApiMajorVersion
) -> List[DistrictDict]:
CONVERTERS = {
3: cls.districtsConverter_v3,
}
return CONVERTERS[version](model_list)
@classmethod
def districtsConverter_v3(cls, districts: List[District]) -> List[DistrictDict]:
return list(map(cls.districtConverter_v3, districts))
@classmethod
def districtConverter_v3(cls, district: District) -> DistrictDict:
return DistrictDict(
{
"key": district.key.id(),
"year": district.year,
"abbreviation": district.abbreviation,
"display_name": district.display_name,
}
)
@staticmethod
def dictToModel_v3(data: Dict) -> District:
district = District(id=data["key"])
district.year = data["year"]
district.abbreviation = data["abbreviation"]
district.display_name = data["display_name"]
return district
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://appengine.googleapis.com/v1beta/'
DOCS_URL = 'https://cloud.google.com/appengine/docs/admin-api/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
APPS = (
'apps',
'{+name}',
{
'':
'apps/{appsId}',
},
[u'name']
)
APPS_AUTHORIZEDCERTIFICATES = (
'apps.authorizedCertificates',
'{+name}',
{
'':
'apps/{appsId}/authorizedCertificates/'
'{authorizedCertificatesId}',
},
[u'name']
)
APPS_DOMAINMAPPINGS = (
'apps.domainMappings',
'{+name}',
{
'':
'apps/{appsId}/domainMappings/{domainMappingsId}',
},
[u'name']
)
APPS_FIREWALL_INGRESSRULES = (
'apps.firewall.ingressRules',
'{+name}',
{
'':
'apps/{appsId}/firewall/ingressRules/{ingressRulesId}',
},
[u'name']
)
APPS_LOCATIONS = (
'apps.locations',
'{+name}',
{
'':
'apps/{appsId}/locations/{locationsId}',
},
[u'name']
)
APPS_OPERATIONS = (
'apps.operations',
'{+name}',
{
'':
'apps/{appsId}/operations/{operationsId}',
},
[u'name']
)
APPS_SERVICES = (
'apps.services',
'{+name}',
{
'':
'apps/{appsId}/services/{servicesId}',
},
[u'name']
)
APPS_SERVICES_VERSIONS = (
'apps.services.versions',
'{+name}',
{
'':
'apps/{appsId}/services/{servicesId}/versions/{versionsId}',
},
[u'name']
)
APPS_SERVICES_VERSIONS_INSTANCES = (
'apps.services.versions.instances',
'{+name}',
{
'':
'apps/{appsId}/services/{servicesId}/versions/{versionsId}/'
'instances/{instancesId}',
},
[u'name']
)
def __init__(self, collection_name, path, flat_paths, params):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
|
console.log('DAY 05');
const input = "input_05.txt";
const output = document.getElementById('export');
axios.get(input)
.then(function (response) {
performA(response.data);
})
.catch(function (error) {
console.log(error);
});
function performA(data)
{
let passes = data.split('\n');
let highest = 0;
let seats = Array();
passes.forEach(pass => {
let rowSeq = pass.slice(0,7).split('').map(val => val==='F'?0:1).join('');
let colSeq = pass.slice(-3).split('').map(val => val==='R'?1:0).join('');
let row = (binarySearch(0,127,rowSeq));
let col = (binarySearch(0,7,colSeq));
let seatId = row*8+col;
seats.push(seatId);
highest = Math.max(highest, seatId);
});
seats.sort((a,b) => a-b);
let onlySeat = seats.filter(function (val, index)
{
return (index<seats.length-2 && seats[index+1]-val===2);
});
console.log(highest);
console.log(onlySeat[0]+1);
}
function binarySearch(min, max, sequence)
{
let side = sequence.substring(0,1);
if (side == 0)
{
max -= Math.round((max-min)/2);
// console.log('lower: '+min+'-'+max);
if (sequence.substring(1).length === 0)
{
return min;
}
}
else
{
min += Math.round((max-min)/2);
// console.log('upper: '+min+'-'+max);
if (sequence.substring(1).length === 0)
{
return max;
}
}
return binarySearch(min, max, sequence.substring(1))
}
|
'use strict';
const argv = require('argv');
const logger = require('./common/middleware/logger');
const app = require('./app/app');
const billing = require('./billing/billing');
const migrate = require('./common/utils/migrate');
const argvOptions = [
{
name: 'task',
type: 'string'
}
];
(async function main() {
logger.info('~~~ PTAH BACKEND ~~~');
await migrate.start(logger);
const args = argv.option(argvOptions).run();
const task = (args.options.task || '').toLowerCase();
if (task === 'billing') {
logger.info('starting billing');
return await billing.start(logger);
}
logger.info('starting app...');
app.start(logger);
})()
.then()
.catch(err => {
logger.error(err);
process.exit(1);
});
|
sap.ui.define(['sap/ui/webc/common/thirdparty/base/asset-registries/Icons'], function (Icons) { 'use strict';
const name = "email-read";
const pathData = "M512 143v337q0 13-9.5 22.5T480 512H32q-14 0-23-9.5T0 480V143L254 0zm-48 337L256 326 48 480h416zM328 294l152-135L255 37 32 159l151 135-17 18L32 191v273l223-170 225 168V192L347 312z";
const ltr = false;
const collection = "SAP-icons";
const packageName = "@ui5/webcomponents-icons";
Icons.registerIcon(name, { pathData, ltr, collection, packageName });
var emailRead = { pathData };
return emailRead;
});
|
from __future__ import print_function, unicode_literals, division, absolute_import
import random as _random
from pyotp.hotp import HOTP
from pyotp.otp import OTP
from pyotp.totp import TOTP
from . import utils
def random_base32(length=16, random=_random.SystemRandom(),
chars=list('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')):
return ''.join(
random.choice(chars)
for _ in range(length)
)
|
from django.core.exceptions import ImproperlyConfigured
from django.core.servers.basehttp import get_internal_wsgi_application
from django.core.signals import request_started
from django.core.wsgi import get_wsgi_application
from django.db import close_old_connections
from django.test import SimpleTestCase, override_settings
from django.test.client import RequestFactory
@override_settings(ROOT_URLCONF='wsgi.urls')
class WSGITest(SimpleTestCase):
request_factory = RequestFactory()
def setUp(self):
request_started.disconnect(close_old_connections)
def tearDown(self):
request_started.connect(close_old_connections)
def test_get_wsgi_application(self):
"""
get_wsgi_application() returns a functioning WSGI callable.
"""
application = get_wsgi_application()
environ = self.request_factory._base_environ(
PATH_INFO="/",
CONTENT_TYPE="text/html; charset=utf-8",
REQUEST_METHOD="GET"
)
response_data = {}
def start_response(status, headers):
response_data["status"] = status
response_data["headers"] = headers
response = application(environ, start_response)
self.assertEqual(response_data["status"], "200 OK")
self.assertEqual(
set(response_data["headers"]),
{('Content-Length', '12'), ('Content-Type', 'text/html; charset=utf-8')})
self.assertIn(bytes(response), [
b"Content-Length: 12\r\nContent-Type: text/html; charset=utf-8\r\n\r\nHello World!",
b"Content-Type: text/html; charset=utf-8\r\nContent-Length: 12\r\n\r\nHello World!"
])
def test_file_wrapper(self):
"""
FileResponse uses wsgi.file_wrapper.
"""
class FileWrapper:
def __init__(self, filelike, blksize=8192):
filelike.close()
application = get_wsgi_application()
environ = self.request_factory._base_environ(
PATH_INFO='/file/',
REQUEST_METHOD='GET',
**{'wsgi.file_wrapper': FileWrapper}
)
response_data = {}
def start_response(status, headers):
response_data['status'] = status
response_data['headers'] = headers
response = application(environ, start_response)
self.assertEqual(response_data['status'], '200 OK')
self.assertIsInstance(response, FileWrapper)
class GetInternalWSGIApplicationTest(SimpleTestCase):
@override_settings(WSGI_APPLICATION="wsgi.wsgi.application")
def test_success(self):
"""
If ``WSGI_APPLICATION`` is a dotted path, the referenced object is
returned.
"""
app = get_internal_wsgi_application()
from .wsgi import application
self.assertIs(app, application)
@override_settings(WSGI_APPLICATION=None)
def test_default(self):
"""
If ``WSGI_APPLICATION`` is ``None``, the return value of
``get_wsgi_application`` is returned.
"""
# Mock out get_wsgi_application so we know its return value is used
fake_app = object()
def mock_get_wsgi_app():
return fake_app
from django.core.servers import basehttp
_orig_get_wsgi_app = basehttp.get_wsgi_application
basehttp.get_wsgi_application = mock_get_wsgi_app
try:
app = get_internal_wsgi_application()
self.assertIs(app, fake_app)
finally:
basehttp.get_wsgi_application = _orig_get_wsgi_app
@override_settings(WSGI_APPLICATION="wsgi.noexist.app")
def test_bad_module(self):
msg = "WSGI application 'wsgi.noexist.app' could not be loaded; Error importing"
with self.assertRaisesMessage(ImproperlyConfigured, msg):
get_internal_wsgi_application()
@override_settings(WSGI_APPLICATION="wsgi.wsgi.noexist")
def test_bad_name(self):
msg = "WSGI application 'wsgi.wsgi.noexist' could not be loaded; Error importing"
with self.assertRaisesMessage(ImproperlyConfigured, msg):
get_internal_wsgi_application()
|
/** Parse type signatures
* @file
*
* The parser translates the lexer's tokens into IR nodes
*
* This source file is part of the Cone Programming Language C compiler
* See Copyright Notice in conec.h
*/
#include "parser.h"
#include "../ir/ir.h"
#include "../shared/memory.h"
#include "../shared/error.h"
#include "../ir/nametbl.h"
#include "lexer.h"
#include <stdio.h>
#include <assert.h>
// Parse a permission, return reference to defperm if not found
INode *parsePerm() {
if (lexIsToken(PermToken)) {
INode *perm = newPermUseNode((PermNode*)lex->val.ident->node);
lexNextToken();
return perm;
}
return unknownType;
}
// Parse a variable declaration
VarDclNode *parseVarDcl(ParseState *parse, PermNode *defperm, uint16_t flags) {
VarDclNode *varnode;
INode *perm;
// Grab the permission type
perm = parsePerm();
if (perm->tag == UnknownTag)
perm = (INode*)defperm;
INode *permdcl = itypeGetTypeDcl(perm);
if (permdcl == (INode*)mut1Perm || permdcl == (INode*)uniPerm || permdcl == (INode*)opaqPerm
|| (permdcl == (INode*)constPerm && !(flags & ParseMayConst)))
errorMsgNode(perm, ErrorInvType, "Permission not valid for variable/field declaration");
// Obtain variable's name
if (!lexIsToken(IdentToken)) {
errorMsgLex(ErrorNoIdent, "Expected variable name for declaration");
return newVarDclFull(anonName, VarDclTag, unknownType, perm, NULL);
}
varnode = newVarDclNode(lex->val.ident, VarDclTag, perm);
lexNextToken();
// Get value type, if provided
varnode->vtype = parseVtype(parse);
// Get initialization value after '=', if provided
if (lexIsToken(AssgnToken)) {
if (!(flags&ParseMayImpl))
errorMsgLex(ErrorBadImpl, "A default/initial value may not be specified here.");
lexNextToken();
varnode->value = parseAnyExpr(parse);
}
else {
if (!(flags&ParseMaySig))
errorMsgLex(ErrorNoInit, "Must specify default/initial value.");
}
return varnode;
}
INode *parseTypeName(ParseState *parse) {
INode *node = parseNameUse(parse);
if (lexIsToken(LBracketToken)) {
FnCallNode *fncall = newFnCallNode(node, 8);
fncall->flags |= FlagIndex;
lexNextToken();
lexIncrParens();
if (!lexIsToken(RBracketToken)) {
nodesAdd(&fncall->args, parseVtype(parse));
while (lexIsToken(CommaToken)) {
lexNextToken();
nodesAdd(&fncall->args, parseVtype(parse));
}
}
parseCloseTok(RBracketToken);
node = (INode *)fncall;
}
return node;
}
// Parse an enum type
INode* parseEnum(ParseState *parse) {
EnumNode *node = newEnumNode();
lexNextToken();
return (INode*)node;
}
// Parse a field declaration
FieldDclNode *parseFieldDcl(ParseState *parse, PermNode *defperm) {
FieldDclNode *fldnode;
INode *vtype;
INode *perm;
// Grab the permission type
perm = parsePerm();
INode *permdcl = perm == unknownType? unknownType : itypeGetTypeDcl(perm);
if (permdcl != (INode*)mutPerm && permdcl == (INode*)immPerm)
errorMsgNode(perm, ErrorInvType, "Permission not valid for field declaration");
// Obtain variable's name
if (!lexIsToken(IdentToken)) {
errorMsgLex(ErrorNoIdent, "Expected field name for declaration");
return newFieldDclNode(anonName, perm);
}
fldnode = newFieldDclNode(lex->val.ident, perm);
lexNextToken();
// Get value type, if provided
if (lexIsToken(EnumToken))
fldnode->vtype = parseEnum(parse);
else if ((vtype = parseVtype(parse)))
fldnode->vtype = vtype;
// Get initialization value after '=', if provided
if (lexIsToken(AssgnToken)) {
lexNextToken();
fldnode->value = parseAnyExpr(parse);
}
return fldnode;
}
// Parse a struct
INode *parseStruct(ParseState *parse, uint16_t strflags) {
char *svprefix = parse->gennamePrefix;
INsTypeNode *svtype = parse->typenode;
GenericNode *genericnode = NULL;
StructNode *strnode;
uint16_t fieldnbr = 0;
// Capture the kind of type, then get next token (name)
uint16_t tag = StructTag;
lexNextToken();
// Handle attributes
while (1) {
if (lex->toktype == SamesizeToken) {
if (strflags & TraitType)
strflags |= SameSize;
else
errorMsgLex(ErrorNoIdent, "@samesize attribute only allowed on traits.");
lexNextToken();
}
else if (lex->toktype == MoveToken) {
strflags |= MoveType;
lexNextToken();
}
else if (lex->toktype == OpaqueToken) {
strflags |= OpaqueType;
lexNextToken();
}
else
break;
}
// Process struct type name, if provided
if (lexIsToken(IdentToken)) {
strnode = newStructNode(lex->val.ident);
strnode->tag = tag;
strnode->flags |= strflags;
strnode->mod = parse->mod;
nameConcatPrefix(&parse->gennamePrefix, &strnode->namesym->namestr);
parse->typenode = (INsTypeNode *)strnode;
lexNextToken();
}
else {
errorMsgLex(ErrorNoIdent, "Expected a name for the type");
return NULL;
}
uint16_t methflags = ParseMayName | ParseMayImpl;
if (strnode->flags & TraitType)
methflags |= ParseMaySig;
// Handle if generic parameters are found
if (lexIsToken(LBracketToken)) {
genericnode = newGenericDclNode(strnode->namesym);
parseGenericVars(parse, genericnode);
genericnode->body = (INode*)strnode;
}
// Obtain base trait, if specified
if (lexIsToken(ExtendsToken)) {
lexNextToken();
strnode->basetrait = parseTypeName(parse); // Type could be a qualified name or generic
}
// If block has been provided, process field or method definitions
if (parseHasBlock()) {
parseBlockStart();
while (!parseBlockEnd()) {
lexStmtStart();
if (lexIsToken(SetToken)) {
lexNextToken();
if (!lexIsToken(FnToken))
errorMsgLex(ErrorNotFn, "Expected fn declaration");
else {
FnDclNode *fn = (FnDclNode*)parseFn(parse, FlagMethFld, methflags);
if (fn && isNamedNode(fn)) {
fn->flags |= FlagSetMethod;
nameGenFnName(fn, parse->gennamePrefix);
iNsTypeAddFn((INsTypeNode*)strnode, fn);
}
}
}
else if (lexIsToken(FnToken)) {
FnDclNode *fn = (FnDclNode*)parseFn(parse, FlagMethFld, methflags);
if (fn && isNamedNode(fn)) {
nameGenFnName(fn, parse->gennamePrefix);
iNsTypeAddFn((INsTypeNode*)strnode, fn);
}
}
else if (lexIsToken(MixinToken)) {
// Handle a trait mixin, capturing it in a field-like node
FieldDclNode *field = newFieldDclNode(anonName, (INode*)immPerm);
field->flags |= IsMixin | FlagMethFld;
lexNextToken();
INode *vtype;
if ((vtype = parseVtype(parse)))
field->vtype = vtype;
structAddField(strnode, field);
parseEndOfStatement();
}
else if (lexIsToken(PermToken) || lexIsToken(IdentToken)) {
FieldDclNode *field = parseFieldDcl(parse, mutPerm);
field->index = fieldnbr++;
field->flags |= FlagMethFld;
structAddField(strnode, field);
parseEndOfStatement();
}
else {
errorMsgLex(ErrorNoSemi, "Unknown struct statement.");
parseSkipToNextStmt();
}
}
}
else
parseEndOfStatement();
parse->typenode = svtype;
parse->gennamePrefix = svprefix;
return genericnode? (INode*)genericnode : (INode*)strnode;
}
void parseInjectSelf(FnSigNode *fnsig) {
NameUseNode *selftype = newNameUseNode(selfTypeName);
VarDclNode *selfparm = newVarDclFull(nametblFind("self", 4), VarDclTag, (INode*)selftype, newPermUseNode(constPerm), NULL);
selfparm->scope = 1;
selfparm->index = 0;
selfparm->flowtempflags |= VarInitialized;
nodesAdd(&fnsig->parms, (INode*)selfparm);
}
// Parse a function's type signature
INode *parseFnSig(ParseState *parse, int fnflags) {
FnSigNode *fnsig;
uint16_t parmnbr = 0;
uint16_t parseflags = ParseMaySig | ParseMayImpl;
// Set up memory block for the function's type signature
fnsig = newFnSigNode();
// Process parameter declarations
if (lexIsToken(LParenToken)) {
lexNextToken();
// A type's method with no parameters should still define self
if (lexIsToken(RParenToken) && (fnflags & FlagMethFld))
parseInjectSelf(fnsig);
while (lexIsToken(PermToken) || lexIsToken(IdentToken)) {
VarDclNode *parm = parseVarDcl(parse, immPerm, parseflags);
parm->flowtempflags |= VarInitialized; // parameter vars always start with a valid value
// Do special inference if function is a type's method
if (parse->typenode) {
// Create default self parm, if 'self' was not specified
if ((fnflags & FlagMethFld) && parmnbr == 0 && parm->namesym != selfName) {
parseInjectSelf(fnsig);
++parmnbr;
}
// Infer value type of a parameter (or its reference) if unspecified
if (parm->vtype == unknownType) {
parm->vtype = (INode*)newNameUseNode(selfTypeName);
}
else if (parm->vtype->tag == RefTag) {
RefNode *refnode = (RefNode *)parm->vtype;
if (refnode->vtexp == unknownType) {
refnode->vtexp = (INode*)newNameUseNode(selfTypeName);
}
}
}
// Add parameter to function's parm list
parm->scope = 1;
parm->index = parmnbr++;
if (parm->value)
parseflags = ParseMayImpl; // force remaining parms to specify default
nodesAdd(&fnsig->parms, (INode*)parm);
if (!lexIsToken(CommaToken))
break;
lexNextToken();
}
parseCloseTok(RParenToken);
}
else
errorMsgLex(ErrorNoLParen, "Expected left parenthesis for parameter declarations");
// Parse return type info - turn into void if none specified
if ((fnsig->rettype = parseVtype(parse)) != unknownType) {
// Handle multiple return types
if (lexIsToken(CommaToken)) {
TupleNode *rettype = newTupleNode(4);
nodesAdd(&rettype->elems, fnsig->rettype);
while (lexIsToken(CommaToken)) {
lexNextToken();
nodesAdd(&rettype->elems, parseVtype(parse));
}
fnsig->rettype = (INode*)rettype;
}
}
else {
fnsig->rettype = (INode*)newVoidNode();
inodeLexCopy(fnsig->rettype, (INode*)fnsig); // Make invisible void show up in error msg
}
return (INode*)fnsig;
}
// Parse a typedef statement
TypedefNode *parseTypedef(ParseState *parse) {
lexNextToken();
// Process struct type name, if provided
if (!lexIsToken(IdentToken)) {
errorMsgLex(ErrorNoIdent, "Expected a name for the type");
return NULL;
}
TypedefNode *newnode = newTypedefNode(lex->val.ident);
lexNextToken();
newnode->typeval = parseVtype(parse);
parseEndOfStatement();
return newnode;
}
// Parse a type expression. Return unknownType if none found.
INode* parseVtype(ParseState *parse) {
// This is a placeholder since parser converges type and value expression parsing
switch (lex->toktype) {
case QuesToken:
case AmperToken:
case ArrayRefToken:
case VirtRefToken:
case PlusToken:
case PlusArrayRefToken:
case PlusVirtRefToken:
case StarToken:
case LBracketToken:
case VoidToken:
case IdentToken:
return parsePrefix(parse, 0);
default:
return unknownType;
}
}
|
#!/usr/bin/env node
'use strict'
const assert = require('assert')
const program = require('commander')
const Analytics = require('.')
const pkg = require('./package')
const run = (method, options) => {
const writeKey = process.env.SEGMENT_WRITE_KEY || program.writeKey
assert(writeKey, 'You need to define your write key via the $SEGMENT_WRITE_KEY environment variable or the --write-key flag.')
const analytics = new Analytics(writeKey, { flushAt: 1 })
analytics[method](options, err => {
if (err) {
console.error(err.stack)
process.exit(1)
}
})
}
const toDate = str => new Date(str)
const toObject = str => JSON.parse(str)
program
.version(pkg.version)
.option('-w, --write-key <key>', 'the segment write key to use')
program
.command('track <event>')
.description('track a user event')
.option('-u, --user <id>', 'the user id to send the event as')
.option('-a, --anonymous <id>', 'the anonymous user id to send the event as')
.option('-p, --properties <data>', 'the event properties to send (JSON-encoded)', toObject)
.option('-t, --timestamp <date>', 'the date of the event', toDate)
.option('-c, --context <data>', 'additional context for the event (JSON-encoded)', toObject)
.action((event, options) => {
run('track', {
event,
userId: options.user,
anonymousId: options.anonymous,
properties: options.properties,
timestamp: options.timestamp,
context: options.context
})
})
program
.command('page')
.description('track a page view')
.option('-u, --user <id>', 'the user id to send the event as')
.option('-n, --name <name>', 'the name of the page')
.option('-C, --category <category>', 'the category of the page')
.option('-p, --properties <data>', 'attributes of the page (JSON-encoded)', toObject)
.option('-t, --timestamp <date>', 'the date of the event', toDate)
.option('-c, --context <data>', 'additional context for the event (JSON-encoded)', toObject)
.action(options => {
run('page', {
userId: options.user,
name: options.name,
category: options.category,
properties: options.properties,
timestamp: options.timestamp,
context: options.context
})
})
program
.command('identify')
.description('identify a user')
.option('-u, --user <id>', 'the user id to send the event as')
.option('-T, --traits <data>', 'the user traits to send (JSON-encoded)', toObject)
.option('-t, --timestamp <date>', 'the date of the event', toDate)
.option('-c, --context <data>', 'additional context for the event (JSON-encoded)', toObject)
.action(options => {
run('identify', {
userId: options.user,
traits: options.traits,
timestamp: options.timestamp,
context: options.context
})
})
program
.command('group')
.description('identify a group of users')
.option('-u, --user <id>', 'the user id to send the event as')
.option('-a, --anonymous <id>', 'the anonymous id to associate with this group')
.option('-g, --group <id>', 'the group id to associate this user with')
.option('-T, --traits <data>', 'attributes about the group (JSON-encoded)', toObject)
.option('-t, --timestamp <date>', 'the date of the event', toDate)
.option('-c, --context <data>', 'additional context for the event (JSON-encoded)', toObject)
.action(options => {
run('group', {
userId: options.user,
anonymousId: options.anonymous,
groupId: options.group,
traits: options.traits,
timestamp: options.timestamp,
context: options.context
})
})
program
.command('alias')
.description('remap a user to a new id')
.option('-u, --user <id>', 'the user id to send the event as')
.option('-p, --previous <id>', 'the previous user id (to add the alias for)')
.action(options => {
run('alias', {
userId: options.user,
previousId: options.previous
})
})
program.parse(process.argv)
if (program.args.length === 0) {
program.help()
}
|
/*
* Copyright (C) 2015-2017 Alibaba Group Holding Limited
*/
#include <stdint.h>
#include "k_config.h"
#include "board.h"
#include "aos/hal/uart.h"
#include "aos/hal/gpio.h"
#include "aos/hal/i2c.h"
#include "aos/hal/can.h"
#include "aos/hal/timer.h"
#include "stm32f4xx_hal.h"
#include "hal_uart_stm32f4.h"
#include "hal_gpio_stm32f4.h"
#include "hal_can_stm32f4.h"
#include "hal_timer_stm32f4.h"
#ifdef AOS_CANOPEN
#include "co_adapter.h"
#endif
#if defined (__CC_ARM) && defined(__MICROLIB)
#define PUTCHAR_PROTOTYPE int fputc(int ch, FILE *f)
#define GETCHAR_PROTOTYPE int fgetc(FILE *f)
size_t g_iram1_start = 0x20000000;
size_t g_iram1_total_size = 0x00030000;
#elif defined(__ICCARM__)
#define PUTCHAR_PROTOTYPE int fputc(int ch, FILE *f)
#define GETCHAR_PROTOTYPE int fgetc(FILE *f)
#else
/* With GCC/RAISONANCE, small printf (option LD Linker->Libraries->Small printf
set to 'Yes') calls __io_putchar() */
#define PUTCHAR_PROTOTYPE int __io_putchar(int ch)
#define GETCHAR_PROTOTYPE int __io_getchar(void)
#endif /* defined (__CC_ARM) && defined(__MICROLIB) */
uart_dev_t uart_0;
const gpio_mapping_t gpio_mapping_table[TOTAL_GPIO_NUM] =
{
{ON_BOARD_LED01, GPIOB, GPIO_PIN_0, /*IRQ_NULL,*/GPIO_PULLUP, GPIO_SPEED_FREQ_LOW},
{ON_BOARD_LED02, GPIOB, GPIO_PIN_7, /*IRQ_NULL,*/GPIO_PULLUP, GPIO_SPEED_FREQ_LOW},
{ON_BOARD_LED03, GPIOB, GPIO_PIN_14, /*IRQ_NULL,*/GPIO_PULLUP, GPIO_SPEED_FREQ_LOW}
};
gpio_dev_t brd_gpio_table[] =
{
{ON_BOARD_LED01, OUTPUT_PUSH_PULL, NULL},
{ON_BOARD_LED02, OUTPUT_PUSH_PULL, NULL},
{ON_BOARD_LED03, OUTPUT_PUSH_PULL, NULL},
};
CAN_MAPPING CAN_MAPPING_TABLE[] =
{
#ifdef AOS_CANOPEN
{ PORT_CAN_CANOPEN, CAN1, can_dispatch, NULL, NULL},
#endif
{ PORT_CAN_CANOPEN, NULL, NULL, NULL, NULL},
};
TIMER_MAPPING TIMER_MAPPING_TABLE[] =
{
{PORT_TIMER_CANOPEN, TIM3},
};
UART_MAPPING UART_MAPPING_TABLE[] =
{
{ PORT_UART_STD, USART3, { UART_OVERSAMPLING_16, 1024} },
{ PORT_UART_AT, USART6, { UART_OVERSAMPLING_16, 2048} },
{ PORT_UART_RS485, UART7, { UART_OVERSAMPLING_16, 512} },
{ PORT_UART_SCANNER, UART4, { UART_OVERSAMPLING_16, 512} },
{ PORT_UART_LORA, UART5, { UART_OVERSAMPLING_16, 512} },
};
void* i2c_mapping_table[] = { I2C1, I2C2, I2C3};
static void stduart_init(void);
static void I2C1_init();
uint32_t hal_timer_getcounter(timer_dev_t *tim)
{
uint32_t counter = 0;
if (tim != NULL)
{
counter = __HAL_TIM_GET_COUNTER((TIM_HandleTypeDef *)tim->priv);
}
return counter;
}
void hal_timer_setcounter(timer_dev_t *tim, uint32_t counter)
{
if (tim != NULL)
{
__HAL_TIM_SET_COUNTER((TIM_HandleTypeDef *)tim->priv, counter);
__HAL_TIM_ENABLE((TIM_HandleTypeDef *)tim->priv);
}
}
static int32_t brd_gpio_init(void)
{
int32_t i;
int32_t ret = 0;
for (i = 0; i < TOTAL_GPIO_NUM; ++i) {
ret = hal_gpio_init(&brd_gpio_table[i]);
if (ret) {
printf("gpio %d in gpio table init fail \r\n", i);
}
}
return ret;
}
void stm32_soc_init(void)
{
HAL_Init();
/* Configure the system clock */
SystemClock_Config();
/**Configure the Systick interrupt time
*/
HAL_SYSTICK_Config(HAL_RCC_GetHCLKFreq()/RHINO_CONFIG_TICKS_PER_SECOND);
/* GPIO Ports Clock Enable */
__HAL_RCC_GPIOC_CLK_ENABLE();
__HAL_RCC_GPIOH_CLK_ENABLE();
__HAL_RCC_GPIOA_CLK_ENABLE();
__HAL_RCC_GPIOE_CLK_ENABLE();
__HAL_RCC_GPIOB_CLK_ENABLE();
__HAL_RCC_GPIOD_CLK_ENABLE();
__HAL_RCC_GPIOG_CLK_ENABLE();
MX_DMA_Init();
}
void stm32_peripheral_init(void)
{
/*default uart init*/
stduart_init();
/*gpio init*/
brd_gpio_init();
/*i2c pre init*/
hal_i2c_pre_init();
/*i2c bus 1 init*/
I2C1_init();
/*default can init*/
#ifdef PT_SENSOR
CAN_init();
#endif
}
static void stduart_init(void)
{
uart_0.port = PORT_UART_STD;
uart_0.config.baud_rate = STDIO_UART_BUADRATE;
uart_0.config.data_width = DATA_WIDTH_8BIT;
uart_0.config.flow_control = FLOW_CONTROL_DISABLED;
uart_0.config.mode = MODE_TX_RX;
uart_0.config.parity = NO_PARITY;
uart_0.config.stop_bits = STOP_BITS_1;
hal_uart_init(&uart_0);
}
static void I2C1_init()
{
i2c_dev_t i2c_1 = {
.port = 1,
.config.address_width = I2C_HAL_ADDRESS_WIDTH_7BIT,
.config.freq = I2C_BUS_BIT_RATES_100K,
.config.mode = I2C_MODE_MASTER,
};
if (hal_i2c_init(&i2c_1)) {
printf("i2c bus 1 init fail \r\n");
}
}
/**
* @brief This function handles System tick timer.
*/
void SysTick_Handler(void)
{
HAL_IncTick();
krhino_intrpt_enter();
krhino_tick_proc();
krhino_intrpt_exit();
}
/**
* @brief Retargets the C library printf function to the USART.
* @param None
* @retval None
*/
PUTCHAR_PROTOTYPE
{
if (ch == '\n') {
//hal_uart_send(&console_uart, (void *)"\r", 1, 30000);
hal_uart_send(&uart_0, (void *)"\r", 1, 30000);
}
hal_uart_send(&uart_0, &ch, 1, 30000);
return ch;
}
/**
* @brief Retargets the C library scanf function to the USART.
* @param None
* @retval None
*/
GETCHAR_PROTOTYPE
{
/* Place your implementation of fgetc here */
/* e.g. readwrite a character to the USART2 and Loop until the end of transmission */
uint8_t ch = EOF;
int32_t ret = -1;
uint32_t recv_size;
ret = hal_uart_recv_II(&uart_0, &ch, 1, &recv_size, HAL_WAIT_FOREVER);
if (ret == 0) {
return ch;
} else {
return -1;
}
}
|
# Preprocess iemocap conversation emotion dataset
import argparse
import pickle
import random
import os
from pathlib import Path
import numpy as np
from tqdm import tqdm
from sklearn import metrics
from sklearn.model_selection import train_test_split
from utils import Vocab, Tokenizer, PAD_TOKEN, SOS_TOKEN, EOS_TOKEN
project_dir = Path(__file__).resolve().parent
datasets_dir = project_dir.joinpath('datasets/')
iemocap_dir = datasets_dir.joinpath('iemocap/')
iemocap_pickle = iemocap_dir.joinpath("IEMOCAP_features_raw.pkl")
GLOVE_DIR = ""
# Tokenizer
tokenizer = Tokenizer('spacy')
class IEMOCAP:
'''
label index mapping = {'hap':0, 'sad':1, 'neu':2, 'ang':3, 'exc':4, 'fru':5}
'''
def load_iemocap_data(self):
_, self.videoSpeakers, self.videoLabels, _, _, _, self.videoSentence, trainVid, self.testVid = pickle.load(
open(iemocap_pickle, "rb"), encoding="latin1")
self.trainVid, self.valVid = train_test_split(
list(trainVid), test_size=.2, random_state=1227)
self.vids = {"train":self.trainVid, "valid":self.valVid, "test":self.testVid}
# Calculating maximum sentence length
self.max_conv_length = max([len(self.videoSentence[vid]) for vid in self.trainVid])
def tokenize_conversation(lines):
sentence_list = [tokenizer(line) for line in lines]
return sentence_list
def pad_sentences(conversations, max_sentence_length=30, max_conversation_length=10):
def pad_tokens(tokens, max_sentence_length=max_sentence_length):
n_valid_tokens = len(tokens)
if n_valid_tokens > max_sentence_length - 1:
tokens = tokens[:max_sentence_length - 1]
n_pad = max_sentence_length - n_valid_tokens - 1
tokens = tokens + [EOS_TOKEN] + [PAD_TOKEN] * n_pad
return tokens
def pad_conversation(conversation):
conversation = [pad_tokens(sentence) for sentence in conversation]
return conversation
all_padded_sentences = []
all_sentence_length = []
for conversation in conversations:
if len(conversation) > max_conversation_length:
conversation = conversation[:max_conversation_length]
sentence_length = [min(len(sentence) + 1, max_sentence_length) # +1 for EOS token
for sentence in conversation]
all_sentence_length.append(sentence_length)
sentences = pad_conversation(conversation)
all_padded_sentences.append(sentences)
sentences = all_padded_sentences
sentence_length = all_sentence_length
return sentences, sentence_length
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Load the dataset
iemocap = IEMOCAP()
iemocap.load_iemocap_data()
# Maximum valid length of sentence
# => SOS/EOS will surround sentence (EOS for source / SOS for target)
# => maximum length of tensor = max_sentence_length + 1
parser.add_argument('-s', '--max_sentence_length', type=int, default=30)
# Vocabulary
parser.add_argument('--max_vocab_size', type=int, default=20000)
parser.add_argument('--min_vocab_frequency', type=int, default=5)
args = parser.parse_args()
max_sent_len = args.max_sentence_length
max_conv_len = iemocap.max_conv_length
max_vocab_size = args.max_vocab_size
min_freq = args.min_vocab_frequency
def to_pickle(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
for split_type in ['train', 'valid', 'test']:
conv_sentences = [iemocap.videoSentence[vid] for vid in iemocap.vids[split_type]]
conv_labels = [iemocap.videoLabels[vid]
for vid in iemocap.vids[split_type]]
print(f'Processing {split_type} dataset...')
split_data_dir = iemocap_dir.joinpath(split_type)
split_data_dir.mkdir(exist_ok=True)
conv_sentences = list([tokenize_conversation(conv) for conv in conv_sentences])
conversation_length = [min(len(conv), max_conv_len)
for conv in conv_sentences]
# fix labels as per conversation_length
for idx, conv_len in enumerate(conversation_length):
conv_labels[idx]=conv_labels[idx][:conv_len]
sentences, sentence_length = pad_sentences(
conv_sentences,
max_sentence_length=max_sent_len,
max_conversation_length=max_conv_len)
for sentence_len, label in zip(conversation_length, conv_labels):
assert(sentence_len ==len(label))
print('Saving preprocessed data at', split_data_dir)
to_pickle(conversation_length, split_data_dir.joinpath(
'conversation_length.pkl'))
to_pickle(sentences, split_data_dir.joinpath('sentences.pkl'))
to_pickle(conv_labels, split_data_dir.joinpath('labels.pkl'))
to_pickle(sentence_length, split_data_dir.joinpath(
'sentence_length.pkl'))
to_pickle(iemocap.vids[split_type], split_data_dir.joinpath('video_id.pkl'))
if split_type == 'train':
print('Save Vocabulary...')
vocab = Vocab(tokenizer)
vocab.add_dataframe(conv_sentences)
assert(GLOVE_DIR != "")
vocab.update(GLOVE_DIR, max_size=max_vocab_size, min_freq=min_freq)
print('Vocabulary size: ', len(vocab))
vocab.pickle(iemocap_dir.joinpath('word2id.pkl'),
iemocap_dir.joinpath('id2word.pkl'),
iemocap_dir.joinpath('word_emb.pkl'))
|
'use strict';
/**
* Module dependencies.
*/
var mongoose = require('mongoose'),
Schema = mongoose.Schema;
/**
* Sherpa Schema
*/
var SherpaSchema = new Schema({
challenge: {
type: String,
default: '',
trim: true
},
sherpa: {
type: Boolean
},
sherpaName: {
type: String,
default: '',
required: 'Please fill in a name',
trim: true
},
created: {
type: Date,
default: Date.now
},
user: {
type: Schema.ObjectId,
ref: 'User'
}
});
mongoose.model('Sherpa', SherpaSchema);
|
# coding: utf-8
from datetime import timedelta as td
import json
from unittest.mock import patch, Mock
from django.core import mail
from django.utils.timezone import now
from hc.api.models import Channel, Check, Notification, TokenBucket
from hc.test import BaseTestCase
from requests.exceptions import ConnectionError, Timeout
from django.test.utils import override_settings
class NotifyTestCase(BaseTestCase):
def _setup_data(self, kind, value, status="down", email_verified=True):
self.check = Check(project=self.project)
self.check.status = status
self.check.last_ping = now() - td(minutes=61)
self.check.save()
self.channel = Channel(project=self.project)
self.channel.kind = kind
self.channel.value = value
self.channel.email_verified = email_verified
self.channel.save()
self.channel.checks.add(self.check)
@patch("hc.api.transports.requests.request")
def test_webhook(self, mock_get):
definition = {
"method_down": "GET",
"url_down": "http://example",
"body_down": "",
"headers_down": {},
}
self._setup_data("webhook", json.dumps(definition))
mock_get.return_value.status_code = 200
self.channel.notify(self.check)
mock_get.assert_called_with(
"get",
"http://example",
headers={"User-Agent": "healthchecks.io"},
timeout=5,
)
@patch("hc.api.transports.requests.request", side_effect=Timeout)
def test_webhooks_handle_timeouts(self, mock_get):
definition = {
"method_down": "GET",
"url_down": "http://example",
"body_down": "",
"headers_down": {},
}
self._setup_data("webhook", json.dumps(definition))
self.channel.notify(self.check)
n = Notification.objects.get()
self.assertEqual(n.error, "Connection timed out")
self.channel.refresh_from_db()
self.assertEqual(self.channel.last_error, "Connection timed out")
@patch("hc.api.transports.requests.request", side_effect=ConnectionError)
def test_webhooks_handle_connection_errors(self, mock_get):
definition = {
"method_down": "GET",
"url_down": "http://example",
"body_down": "",
"headers_down": {},
}
self._setup_data("webhook", json.dumps(definition))
self.channel.notify(self.check)
n = Notification.objects.get()
self.assertEqual(n.error, "Connection failed")
@patch("hc.api.transports.requests.request")
def test_webhooks_handle_500(self, mock_get):
definition = {
"method_down": "GET",
"url_down": "http://example",
"body_down": "",
"headers_down": {},
}
self._setup_data("webhook", json.dumps(definition))
mock_get.return_value.status_code = 500
self.channel.notify(self.check)
n = Notification.objects.get()
self.assertEqual(n.error, "Received status code 500")
@patch("hc.api.transports.requests.request")
def test_webhooks_support_variables(self, mock_get):
definition = {
"method_down": "GET",
"url_down": "http://host/$CODE/$STATUS/$TAG1/$TAG2/?name=$NAME",
"body_down": "",
"headers_down": {},
}
self._setup_data("webhook", json.dumps(definition))
self.check.name = "Hello World"
self.check.tags = "foo bar"
self.check.save()
self.channel.notify(self.check)
url = "http://host/%s/down/foo/bar/?name=Hello%%20World" % self.check.code
args, kwargs = mock_get.call_args
self.assertEqual(args[0], "get")
self.assertEqual(args[1], url)
self.assertEqual(kwargs["headers"], {"User-Agent": "healthchecks.io"})
self.assertEqual(kwargs["timeout"], 5)
@patch("hc.api.transports.requests.request")
def test_webhooks_handle_variable_variables(self, mock_get):
definition = {
"method_down": "GET",
"url_down": "http://host/$$NAMETAG1",
"body_down": "",
"headers_down": {},
}
self._setup_data("webhook", json.dumps(definition))
self.check.tags = "foo bar"
self.check.save()
self.channel.notify(self.check)
# $$NAMETAG1 should *not* get transformed to "foo"
args, kwargs = mock_get.call_args
self.assertEqual(args[1], "http://host/$TAG1")
@patch("hc.api.transports.requests.request")
def test_webhooks_support_post(self, mock_request):
definition = {
"method_down": "POST",
"url_down": "http://example.com",
"body_down": "The Time Is $NOW",
"headers_down": {},
}
self._setup_data("webhook", json.dumps(definition))
self.check.save()
self.channel.notify(self.check)
args, kwargs = mock_request.call_args
self.assertEqual(args[0], "post")
self.assertEqual(args[1], "http://example.com")
# spaces should not have been urlencoded:
payload = kwargs["data"].decode()
self.assertTrue(payload.startswith("The Time Is 2"))
@patch("hc.api.transports.requests.request")
def test_webhooks_dollarsign_escaping(self, mock_get):
# If name or tag contains what looks like a variable reference,
# that should be left alone:
definition = {
"method_down": "GET",
"url_down": "http://host/$NAME",
"body_down": "",
"headers_down": {},
}
self._setup_data("webhook", json.dumps(definition))
self.check.name = "$TAG1"
self.check.tags = "foo"
self.check.save()
self.channel.notify(self.check)
url = "http://host/%24TAG1"
mock_get.assert_called_with(
"get", url, headers={"User-Agent": "healthchecks.io"}, timeout=5
)
@patch("hc.api.transports.requests.request")
def test_webhooks_handle_up_events(self, mock_get):
definition = {
"method_up": "GET",
"url_up": "http://bar",
"body_up": "",
"headers_up": {},
}
self._setup_data("webhook", json.dumps(definition), status="up")
self.channel.notify(self.check)
mock_get.assert_called_with(
"get", "http://bar", headers={"User-Agent": "healthchecks.io"}, timeout=5
)
@patch("hc.api.transports.requests.request")
def test_webhooks_handle_noop_up_events(self, mock_get):
definition = {
"method_up": "GET",
"url_up": "",
"body_up": "",
"headers_up": {},
}
self._setup_data("webhook", json.dumps(definition), status="up")
self.channel.notify(self.check)
self.assertFalse(mock_get.called)
self.assertEqual(Notification.objects.count(), 0)
@patch("hc.api.transports.requests.request")
def test_webhooks_handle_unicode_post_body(self, mock_request):
definition = {
"method_down": "POST",
"url_down": "http://foo.com",
"body_down": "(╯°□°)╯︵ ┻━┻",
"headers_down": {},
}
self._setup_data("webhook", json.dumps(definition))
self.check.save()
self.channel.notify(self.check)
args, kwargs = mock_request.call_args
# unicode should be encoded into utf-8
self.assertIsInstance(kwargs["data"], bytes)
@patch("hc.api.transports.requests.request")
def test_webhooks_handle_post_headers(self, mock_request):
definition = {
"method_down": "POST",
"url_down": "http://foo.com",
"body_down": "data",
"headers_down": {"Content-Type": "application/json"},
}
self._setup_data("webhook", json.dumps(definition))
self.channel.notify(self.check)
headers = {"User-Agent": "healthchecks.io", "Content-Type": "application/json"}
mock_request.assert_called_with(
"post", "http://foo.com", data=b"data", headers=headers, timeout=5
)
@patch("hc.api.transports.requests.request")
def test_webhooks_handle_get_headers(self, mock_request):
definition = {
"method_down": "GET",
"url_down": "http://foo.com",
"body_down": "",
"headers_down": {"Content-Type": "application/json"},
}
self._setup_data("webhook", json.dumps(definition))
self.channel.notify(self.check)
headers = {"User-Agent": "healthchecks.io", "Content-Type": "application/json"}
mock_request.assert_called_with(
"get", "http://foo.com", headers=headers, timeout=5
)
@patch("hc.api.transports.requests.request")
def test_webhooks_allow_user_agent_override(self, mock_request):
definition = {
"method_down": "GET",
"url_down": "http://foo.com",
"body_down": "",
"headers_down": {"User-Agent": "My-Agent"},
}
self._setup_data("webhook", json.dumps(definition))
self.channel.notify(self.check)
headers = {"User-Agent": "My-Agent"}
mock_request.assert_called_with(
"get", "http://foo.com", headers=headers, timeout=5
)
@patch("hc.api.transports.requests.request")
def test_webhooks_support_variables_in_headers(self, mock_request):
definition = {
"method_down": "GET",
"url_down": "http://foo.com",
"body_down": "",
"headers_down": {"X-Message": "$NAME is DOWN"},
}
self._setup_data("webhook", json.dumps(definition))
self.check.name = "Foo"
self.check.save()
self.channel.notify(self.check)
headers = {"User-Agent": "healthchecks.io", "X-Message": "Foo is DOWN"}
mock_request.assert_called_with(
"get", "http://foo.com", headers=headers, timeout=5
)
def test_email(self):
self._setup_data("email", "alice@example.org")
self.channel.notify(self.check)
n = Notification.objects.get()
self.assertEqual(n.error, "")
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
self.assertTrue("X-Status-Url" in email.extra_headers)
self.assertTrue("List-Unsubscribe" in email.extra_headers)
self.assertTrue("List-Unsubscribe-Post" in email.extra_headers)
def test_email_transport_handles_json_value(self):
payload = {"value": "alice@example.org", "up": True, "down": True}
self._setup_data("email", json.dumps(payload))
self.channel.notify(self.check)
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
def test_it_reports_unverified_email(self):
self._setup_data("email", "alice@example.org", email_verified=False)
self.channel.notify(self.check)
# If an email is not verified, it should say so in the notification:
n = Notification.objects.get()
self.assertEqual(n.error, "Email not verified")
def test_email_checks_up_down_flags(self):
payload = {"value": "alice@example.org", "up": True, "down": False}
self._setup_data("email", json.dumps(payload))
self.channel.notify(self.check)
# This channel should not notify on "down" events:
self.assertEqual(Notification.objects.count(), 0)
self.assertEqual(len(mail.outbox), 0)
def test_email_handles_amperstand(self):
self._setup_data("email", "alice@example.org")
self.check.name = "Foo & Bar"
self.channel.notify(self.check)
email = mail.outbox[0]
self.assertEqual(email.subject, "DOWN | Foo & Bar")
@patch("hc.api.transports.requests.request")
def test_pd(self, mock_post):
self._setup_data("pd", "123")
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["json"]
self.assertEqual(payload["event_type"], "trigger")
self.assertEqual(payload["service_key"], "123")
@patch("hc.api.transports.requests.request")
def test_pd_complex(self, mock_post):
self._setup_data("pd", json.dumps({"service_key": "456"}))
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["json"]
self.assertEqual(payload["event_type"], "trigger")
self.assertEqual(payload["service_key"], "456")
@patch("hc.api.transports.requests.request")
def test_pagertree(self, mock_post):
self._setup_data("pagertree", "123")
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["json"]
self.assertEqual(payload["event_type"], "trigger")
@patch("hc.api.transports.requests.request")
def test_pagerteam(self, mock_post):
self._setup_data("pagerteam", "123")
self.channel.notify(self.check)
self.assertFalse(mock_post.called)
self.assertEqual(Notification.objects.count(), 0)
@patch("hc.api.transports.requests.request")
def test_slack(self, mock_post):
self._setup_data("slack", "123")
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["json"]
attachment = payload["attachments"][0]
fields = {f["title"]: f["value"] for f in attachment["fields"]}
self.assertEqual(fields["Last Ping"], "an hour ago")
@patch("hc.api.transports.requests.request")
def test_slack_with_complex_value(self, mock_post):
v = json.dumps({"incoming_webhook": {"url": "123"}})
self._setup_data("slack", v)
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
self.assertEqual(args[1], "123")
@patch("hc.api.transports.requests.request")
def test_slack_handles_500(self, mock_post):
self._setup_data("slack", "123")
mock_post.return_value.status_code = 500
self.channel.notify(self.check)
n = Notification.objects.get()
self.assertEqual(n.error, "Received status code 500")
@patch("hc.api.transports.requests.request", side_effect=Timeout)
def test_slack_handles_timeout(self, mock_post):
self._setup_data("slack", "123")
self.channel.notify(self.check)
n = Notification.objects.get()
self.assertEqual(n.error, "Connection timed out")
@patch("hc.api.transports.requests.request")
def test_slack_with_tabs_in_schedule(self, mock_post):
self._setup_data("slack", "123")
self.check.kind = "cron"
self.check.schedule = "*\t* * * *"
self.check.save()
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
self.assertEqual(Notification.objects.count(), 1)
self.assertTrue(mock_post.called)
@patch("hc.api.transports.requests.request")
def test_hipchat(self, mock_post):
self._setup_data("hipchat", "123")
self.channel.notify(self.check)
self.assertFalse(mock_post.called)
self.assertEqual(Notification.objects.count(), 0)
@patch("hc.api.transports.requests.request")
def test_opsgenie_with_legacy_value(self, mock_post):
self._setup_data("opsgenie", "123")
mock_post.return_value.status_code = 202
self.channel.notify(self.check)
n = Notification.objects.first()
self.assertEqual(n.error, "")
self.assertEqual(mock_post.call_count, 1)
args, kwargs = mock_post.call_args
self.assertIn("api.opsgenie.com", args[1])
payload = kwargs["json"]
self.assertIn("DOWN", payload["message"])
@patch("hc.api.transports.requests.request")
def test_opsgenie_up(self, mock_post):
self._setup_data("opsgenie", "123", status="up")
mock_post.return_value.status_code = 202
self.channel.notify(self.check)
n = Notification.objects.first()
self.assertEqual(n.error, "")
self.assertEqual(mock_post.call_count, 1)
args, kwargs = mock_post.call_args
method, url = args
self.assertTrue(str(self.check.code) in url)
@patch("hc.api.transports.requests.request")
def test_opsgenie_with_json_value(self, mock_post):
self._setup_data("opsgenie", json.dumps({"key": "456", "region": "eu"}))
mock_post.return_value.status_code = 202
self.channel.notify(self.check)
n = Notification.objects.first()
self.assertEqual(n.error, "")
self.assertEqual(mock_post.call_count, 1)
args, kwargs = mock_post.call_args
self.assertIn("api.eu.opsgenie.com", args[1])
@patch("hc.api.transports.requests.request")
def test_opsgenie_returns_error(self, mock_post):
self._setup_data("opsgenie", "123")
mock_post.return_value.status_code = 403
mock_post.return_value.json.return_value = {"message": "Nice try"}
self.channel.notify(self.check)
n = Notification.objects.first()
self.assertEqual(n.error, 'Received status code 403 with a message: "Nice try"')
@patch("hc.api.transports.requests.request")
def test_pushover(self, mock_post):
self._setup_data("po", "123|0")
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertIn("DOWN", payload["title"])
@patch("hc.api.transports.requests.request")
def test_pushover_up_priority(self, mock_post):
self._setup_data("po", "123|0|2", status="up")
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertIn("UP", payload["title"])
self.assertEqual(payload["priority"], 2)
self.assertIn("retry", payload)
self.assertIn("expire", payload)
@patch("hc.api.transports.requests.request")
def test_victorops(self, mock_post):
self._setup_data("victorops", "123")
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["json"]
self.assertEqual(payload["message_type"], "CRITICAL")
@patch("hc.api.transports.requests.request")
def test_discord(self, mock_post):
v = json.dumps({"webhook": {"url": "123"}})
self._setup_data("discord", v)
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["json"]
attachment = payload["attachments"][0]
fields = {f["title"]: f["value"] for f in attachment["fields"]}
self.assertEqual(fields["Last Ping"], "an hour ago")
@patch("hc.api.transports.requests.request")
def test_discord_rewrites_discordapp_com(self, mock_post):
v = json.dumps({"webhook": {"url": "https://discordapp.com/foo"}})
self._setup_data("discord", v)
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
url = args[1]
# discordapp.com is deprecated. For existing webhook URLs, wwe should
# rewrite discordapp.com to discord.com:
self.assertEqual(url, "https://discord.com/foo/slack")
@patch("hc.api.transports.requests.request")
def test_pushbullet(self, mock_post):
self._setup_data("pushbullet", "fake-token")
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
_, kwargs = mock_post.call_args
self.assertEqual(kwargs["json"]["type"], "note")
self.assertEqual(kwargs["headers"]["Access-Token"], "fake-token")
@patch("hc.api.transports.requests.request")
def test_telegram(self, mock_post):
v = json.dumps({"id": 123})
self._setup_data("telegram", v)
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["json"]
self.assertEqual(payload["chat_id"], 123)
self.assertTrue("The check" in payload["text"])
@patch("hc.api.transports.requests.request")
def test_telegram_returns_error(self, mock_post):
self._setup_data("telegram", json.dumps({"id": 123}))
mock_post.return_value.status_code = 400
mock_post.return_value.json.return_value = {"description": "Hi"}
self.channel.notify(self.check)
n = Notification.objects.first()
self.assertEqual(n.error, 'Received status code 400 with a message: "Hi"')
def test_telegram_obeys_rate_limit(self):
self._setup_data("telegram", json.dumps({"id": 123}))
TokenBucket.objects.create(value="tg-123", tokens=0)
self.channel.notify(self.check)
n = Notification.objects.first()
self.assertEqual(n.error, "Rate limit exceeded")
@patch("hc.api.transports.requests.request")
def test_sms(self, mock_post):
self._setup_data("sms", "+1234567890")
self.check.last_ping = now() - td(hours=2)
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
n = Notification.objects.get()
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertEqual(payload["To"], "+1234567890")
self.assertFalse("\xa0" in payload["Body"])
callback_path = f"/api/v1/notifications/{n.code}/status"
self.assertTrue(payload["StatusCallback"].endswith(callback_path))
# sent SMS counter should go up
self.profile.refresh_from_db()
self.assertEqual(self.profile.sms_sent, 1)
@patch("hc.api.transports.requests.request")
def test_sms_handles_json_value(self, mock_post):
value = {"label": "foo", "value": "+1234567890"}
self._setup_data("sms", json.dumps(value))
self.check.last_ping = now() - td(hours=2)
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertEqual(payload["To"], "+1234567890")
@patch("hc.api.transports.requests.request")
def test_sms_limit(self, mock_post):
# At limit already:
self.profile.last_sms_date = now()
self.profile.sms_sent = 50
self.profile.save()
self._setup_data("sms", "+1234567890")
self.channel.notify(self.check)
self.assertFalse(mock_post.called)
n = Notification.objects.get()
self.assertTrue("Monthly SMS limit exceeded" in n.error)
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
self.assertEqual(email.subject, "Monthly SMS Limit Reached")
@patch("hc.api.transports.requests.request")
def test_sms_limit_reset(self, mock_post):
# At limit, but also into a new month
self.profile.sms_sent = 50
self.profile.last_sms_date = now() - td(days=100)
self.profile.save()
self._setup_data("sms", "+1234567890")
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
self.assertTrue(mock_post.called)
@patch("hc.api.transports.requests.request")
def test_whatsapp(self, mock_post):
definition = {"value": "+1234567890", "up": True, "down": True}
self._setup_data("whatsapp", json.dumps(definition))
self.check.last_ping = now() - td(hours=2)
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertEqual(payload["To"], "whatsapp:+1234567890")
n = Notification.objects.get()
callback_path = f"/api/v1/notifications/{n.code}/status"
self.assertTrue(payload["StatusCallback"].endswith(callback_path))
# sent SMS counter should go up
self.profile.refresh_from_db()
self.assertEqual(self.profile.sms_sent, 1)
@patch("hc.api.transports.requests.request")
def test_whatsapp_obeys_up_down_flags(self, mock_post):
definition = {"value": "+1234567890", "up": True, "down": False}
self._setup_data("whatsapp", json.dumps(definition))
self.check.last_ping = now() - td(hours=2)
self.channel.notify(self.check)
self.assertEqual(Notification.objects.count(), 0)
self.assertFalse(mock_post.called)
@patch("hc.api.transports.requests.request")
def test_whatsapp_limit(self, mock_post):
# At limit already:
self.profile.last_sms_date = now()
self.profile.sms_sent = 50
self.profile.save()
definition = {"value": "+1234567890", "up": True, "down": True}
self._setup_data("whatsapp", json.dumps(definition))
self.channel.notify(self.check)
self.assertFalse(mock_post.called)
n = Notification.objects.get()
self.assertTrue("Monthly message limit exceeded" in n.error)
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
self.assertEqual(email.subject, "Monthly WhatsApp Limit Reached")
@patch("hc.api.transports.requests.request")
def test_call(self, mock_post):
self.profile.call_limit = 1
self.profile.save()
value = {"label": "foo", "value": "+1234567890"}
self._setup_data("call", json.dumps(value))
self.check.last_ping = now() - td(hours=2)
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertEqual(payload["To"], "+1234567890")
n = Notification.objects.get()
callback_path = f"/api/v1/notifications/{n.code}/status"
self.assertTrue(payload["StatusCallback"].endswith(callback_path))
@patch("hc.api.transports.requests.request")
def test_call_limit(self, mock_post):
# At limit already:
self.profile.last_call_date = now()
self.profile.calls_sent = 50
self.profile.save()
definition = {"value": "+1234567890"}
self._setup_data("call", json.dumps(definition))
self.channel.notify(self.check)
self.assertFalse(mock_post.called)
n = Notification.objects.get()
self.assertTrue("Monthly phone call limit exceeded" in n.error)
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
self.assertEqual(email.subject, "Monthly Phone Call Limit Reached")
@patch("hc.api.transports.requests.request")
def test_call_limit_reset(self, mock_post):
# At limit, but also into a new month
self.profile.calls_sent = 50
self.profile.last_call_date = now() - td(days=100)
self.profile.save()
self._setup_data("sms", "+1234567890")
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
self.assertTrue(mock_post.called)
@patch("apprise.Apprise")
@override_settings(APPRISE_ENABLED=True)
def test_apprise_enabled(self, mock_apprise):
self._setup_data("apprise", "123")
mock_aobj = Mock()
mock_aobj.add.return_value = True
mock_aobj.notify.return_value = True
mock_apprise.return_value = mock_aobj
self.channel.notify(self.check)
self.assertEqual(Notification.objects.count(), 1)
self.check.status = "up"
self.assertEqual(Notification.objects.count(), 1)
@patch("apprise.Apprise")
@override_settings(APPRISE_ENABLED=False)
def test_apprise_disabled(self, mock_apprise):
self._setup_data("apprise", "123")
mock_aobj = Mock()
mock_aobj.add.return_value = True
mock_aobj.notify.return_value = True
mock_apprise.return_value = mock_aobj
self.channel.notify(self.check)
self.assertEqual(Notification.objects.count(), 1)
def test_not_implimented(self):
self._setup_data("webhook", "http://example")
self.channel.kind = "invalid"
with self.assertRaises(NotImplementedError):
self.channel.notify(self.check)
@patch("hc.api.transports.requests.request")
def test_msteams(self, mock_post):
self._setup_data("msteams", "http://example.com/webhook")
mock_post.return_value.status_code = 200
self.check.name = "_underscores_ & more"
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["json"]
self.assertEqual(payload["@type"], "MessageCard")
# summary and title should be the same, except
# title should have any special HTML characters escaped
self.assertEqual(payload["summary"], "“_underscores_ & more” is DOWN.")
self.assertEqual(payload["title"], "“_underscores_ & more” is DOWN.")
@patch("hc.api.transports.requests.request")
def test_msteams_escapes_html_and_markdown_in_desc(self, mock_post):
self._setup_data("msteams", "http://example.com/webhook")
mock_post.return_value.status_code = 200
self.check.desc = """
TEST _underscore_ `backticks` <u>underline</u> \\backslash\\ "quoted"
"""
self.channel.notify(self.check)
args, kwargs = mock_post.call_args
text = kwargs["json"]["sections"][0]["text"]
self.assertIn(r"\_underscore\_", text)
self.assertIn(r"\`backticks\`", text)
self.assertIn("<u>underline</u>", text)
self.assertIn(r"\\backslash\\ ", text)
self.assertIn(""quoted"", text)
@patch("hc.api.transports.os.system")
@override_settings(SHELL_ENABLED=True)
def test_shell(self, mock_system):
definition = {"cmd_down": "logger hello", "cmd_up": ""}
self._setup_data("shell", json.dumps(definition))
mock_system.return_value = 0
self.channel.notify(self.check)
mock_system.assert_called_with("logger hello")
@patch("hc.api.transports.os.system")
@override_settings(SHELL_ENABLED=True)
def test_shell_handles_nonzero_exit_code(self, mock_system):
definition = {"cmd_down": "logger hello", "cmd_up": ""}
self._setup_data("shell", json.dumps(definition))
mock_system.return_value = 123
self.channel.notify(self.check)
n = Notification.objects.get()
self.assertEqual(n.error, "Command returned exit code 123")
@patch("hc.api.transports.os.system")
@override_settings(SHELL_ENABLED=True)
def test_shell_supports_variables(self, mock_system):
definition = {"cmd_down": "logger $NAME is $STATUS ($TAG1)", "cmd_up": ""}
self._setup_data("shell", json.dumps(definition))
mock_system.return_value = 0
self.check.name = "Database"
self.check.tags = "foo bar"
self.check.save()
self.channel.notify(self.check)
mock_system.assert_called_with("logger Database is down (foo)")
@patch("hc.api.transports.os.system")
@override_settings(SHELL_ENABLED=False)
def test_shell_disabled(self, mock_system):
definition = {"cmd_down": "logger hello", "cmd_up": ""}
self._setup_data("shell", json.dumps(definition))
self.channel.notify(self.check)
self.assertFalse(mock_system.called)
n = Notification.objects.get()
self.assertEqual(n.error, "Shell commands are not enabled")
@patch("hc.api.transports.requests.request")
def test_zulip(self, mock_post):
definition = {
"bot_email": "bot@example.org",
"api_key": "fake-key",
"mtype": "stream",
"to": "general",
}
self._setup_data("zulip", json.dumps(definition))
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
assert Notification.objects.count() == 1
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertIn("DOWN", payload["topic"])
@patch("hc.api.transports.requests.request")
def test_zulip_returns_error(self, mock_post):
definition = {
"bot_email": "bot@example.org",
"api_key": "fake-key",
"mtype": "stream",
"to": "general",
}
self._setup_data("zulip", json.dumps(definition))
mock_post.return_value.status_code = 403
mock_post.return_value.json.return_value = {"msg": "Nice try"}
self.channel.notify(self.check)
n = Notification.objects.first()
self.assertEqual(n.error, 'Received status code 403 with a message: "Nice try"')
|
/**
* Copyright IBM Corp. 2016, 2018
*
* This source code is licensed under the Apache-2.0 license found in the
* LICENSE file in the root directory of this source tree.
*/
'use strict';
const { prefix } = require('../../globals/js/settings');
module.exports = {
context: {
prefix,
},
variants: [
{
name: 'default',
label: 'Interactive Tooltip',
notes: `
Interactive tooltip should be used if there are actions a user can take in the tooltip (e.g. a link or a button).
For more regular use case, e.g. giving the user more text information about something,
use definition tooltip or icon tooltip.
`,
},
{
name: 'definition',
label: 'Definition Tooltip',
notes: `
Definition tooltip is for regular use case of tooltip,
e.g. giving the user more text information about something, like defining a word.
This works better than the interactive tooltip in regular use cases
because the info icon used in interactive tooltip can be repetitive when it’s shown several times on a page.
Definition tooltip does not use any JavaScript.
If there are actions a user can take in the tooltip (e.g. a link or a button), use interactive tooltip.
For top positioning, replace bx--tooltip--definition__bottom class with bx--tooltip--definition__top.
For center/right alignment, add bx--tooltip--definition__align-center/bx--tooltip--definition__align-right class
to the DOM element with bx--tooltip--definition__bottom/bx--tooltip--definition__top.
`,
},
{
name: 'icon',
label: 'Icon Tooltip',
notes: `
Icon tooltip is for short single line of text describing an icon.
Icon tooltip does not use any JavaScript. No label should be added to this variation.
If there are actions a user can take in the tooltip (e.g. a link or a button), use interactive tooltip.
`,
},
],
};
|
"""
Classic cart-pole system implemented by Rich Sutton et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class CartPoleEnv(gym.Env):
"""
Description:
A pole is attached by an un-actuated joint to a cart, which moves along
a frictionless track. The pendulum starts upright, and the goal is to
prevent it from falling over by increasing and reducing the cart's
velocity.
Source:
This environment corresponds to the version of the cart-pole problem
described by Barto, Sutton, and Anderson
Observation:
Type: Box(4)
Num Observation Min Max
0 Cart Position -4.8 4.8
1 Cart Velocity -Inf Inf
2 Pole Angle -0.418 rad (-24 deg) 0.418 rad (24 deg)
3 Pole Angular Velocity -Inf Inf
Actions:
Type: Discrete(2)
Num Action
0 Push cart to the left
1 Push cart to the right
Note: The amount the velocity that is reduced or increased is not
fixed; it depends on the angle the pole is pointing. This is because
the center of gravity of the pole increases the amount of energy needed
to move the cart underneath it
Reward:
Reward is 1 for every step taken, including the termination step
Starting State:
All observations are assigned a uniform random value in [-0.05..0.05]
Episode Termination:
Pole Angle is more than 12 degrees.
Cart Position is more than 2.4 (center of the cart reaches the edge of
the display).
Episode length is greater than 200.
Solved Requirements:
Considered solved when the average return is greater than or equal to
195.0 over 100 consecutive trials.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self):
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5 # actually half the pole's length
self.polemass_length = (self.masspole * self.length)
self.force_mag = 10.0
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = 'euler'
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation
# is still within bounds.
high = np.array([self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max],
dtype=np.float32)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
err_msg = "%r (%s) invalid" % (action, type(action))
assert self.action_space.contains(action), err_msg
x, x_dot, theta, theta_dot = self.state
force = self.force_mag if action == 1 else -self.force_mag
costheta = math.cos(theta)
sintheta = math.sin(theta)
# For the interested reader:
# https://coneural.org/florian/papers/05_cart_pole.pdf
temp = (force + self.polemass_length * theta_dot ** 2 * sintheta) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (self.length * (4.0 / 3.0 - self.masspole * costheta ** 2 / self.total_mass))
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == 'euler':
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
self.state = (x, x_dot, theta, theta_dot)
done = bool(
x < -self.x_threshold
or x > self.x_threshold
or theta < -self.theta_threshold_radians
or theta > self.theta_threshold_radians
)
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warn(
"You are calling 'step()' even though this "
"environment has already returned done = True. You "
"should always call 'reset()' once you receive 'done = "
"True' -- any further steps are undefined behavior."
)
self.steps_beyond_done += 1
reward = 0.0
return np.array(self.state), reward, done, {}
def reset(self):
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
return np.array(self.state)
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.x_threshold * 2
scale = screen_width/world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
pole.set_color(.8, .6, .4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth/2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5, .5, .8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0, carty), (screen_width, carty))
self.track.set_color(0, 0, 0)
self.viewer.add_geom(self.track)
self._pole_geom = pole
if self.state is None:
return None
# Edit the pole polygon vertex
pole = self._pole_geom
l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
pole.v = [(l, b), (l, t), (r, t), (r, b)]
x = self.state
cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
env = CartPoleEnv()
# start_state = env.reset()
# # print("start_state: ", start_state)
# state, reward, done, _ = env.step(0)
# state, reward, done, _ = env.step(0)
# print(state)
# state, reward, done, _ = env.step(0)
# print(state)
# env.reset()
# env.state = start_state
# state, reward, done, _ = env.step(0)
# state, reward, done, _ = env.step(0)
# print(state)
# state, reward, done, _ = env.step(0)
# print(state)
|
module.exports = function getZerosCount(number) {
let counter = 0;
for (let i = 5; number/i >= 1; i *= 5)
counter += Math.floor(number/i);
return counter;
};
|
import path from 'path';
import resolve from '@rollup/plugin-node-resolve';
import replace from '@rollup/plugin-replace';
import commonjs from '@rollup/plugin-commonjs';
import url from '@rollup/plugin-url';
import svelte from 'rollup-plugin-svelte';
import babel from '@rollup/plugin-babel';
import { terser } from 'rollup-plugin-terser';
import config from 'sapper/config/rollup.js';
// import { visualizer } from 'rollup-plugin-visualizer';
import pkg from './package.json';
const mode = process.env.NODE_ENV;
const dev = mode === 'development';
const legacy = !!process.env.SAPPER_LEGACY_BUILD;
const onwarn = (warning, onwarn) =>
(warning.code === 'MISSING_EXPORT' && /'preload'/.test(warning.message)) ||
(warning.code === 'CIRCULAR_DEPENDENCY' && /[/\\]@sapper[/\\]/.test(warning.message)) ||
onwarn(warning);
export default {
client: {
input: config.client.input(),
output: config.client.output(),
plugins: [
// visualizer(),
replace({
preventAssignment: true,
values:{
'process.browser': true,
'process.env.NODE_ENV': JSON.stringify(mode)
},
}),
svelte({
compilerOptions: {
dev,
hydratable: true
}
}),
url({
sourceDir: path.resolve(__dirname, 'src/node_modules/images'),
publicPath: '/client/'
}),
resolve({
browser: true,
dedupe: ['svelte']
}),
commonjs(),
legacy && babel({
extensions: ['.js', '.mjs', '.html', '.svelte'],
babelHelpers: 'runtime',
exclude: ['node_modules/@babel/**'],
presets: [
['@babel/preset-env', {
targets: '> 0.25%, not dead'
}]
],
plugins: [
'@babel/plugin-syntax-dynamic-import',
['@babel/plugin-transform-runtime', {
useESModules: true
}]
]
}),
!dev && terser({
module: true
})
],
preserveEntrySignatures: false,
onwarn,
},
server: {
input: config.server.input(),
output: config.server.output(),
plugins: [
replace({
preventAssignment: true,
values:{
'process.browser': false,
'process.env.NODE_ENV': JSON.stringify(mode)
},
}),
svelte({
compilerOptions: {
dev,
generate: 'ssr',
hydratable: true
},
emitCss: false
}),
url({
sourceDir: path.resolve(__dirname, 'src/node_modules/images'),
publicPath: '/client/',
emitFiles: false // already emitted by client build
}),
resolve({
dedupe: ['svelte']
}),
commonjs()
],
external: Object.keys(pkg.dependencies).concat(require('module').builtinModules),
preserveEntrySignatures: 'strict',
onwarn,
},
serviceworker: {
input: config.serviceworker.input(),
output: config.serviceworker.output(),
plugins: [
resolve(),
replace({
preventAssignment: true,
values:{
'process.browser': true,
'process.env.NODE_ENV': JSON.stringify(mode)
},
}),
commonjs(),
!dev && terser()
],
preserveEntrySignatures: false,
onwarn,
}
};
|
# -*- coding: utf-8 -*-
import os
from openprocurement.api.interfaces import IContentConfigurator
from pyramid.interfaces import IRequest
from zope.configuration.xmlconfig import file as ZcmlFile
from zope.interface import directlyProvides
import openprocurement.tender.cfaua
from openprocurement.tender.cfaua.adapters.configurator import CloseFrameworkAgreementUAConfigurator
from openprocurement.tender.cfaua.interfaces import ICloseFrameworkAgreementUA
from openprocurement.tender.cfaua.models.tender import CloseFrameworkAgreementUA
from zope.component import provideAdapter
# from openprocurement.tender.cfaua.adapters.serializable.guarantee import SerializableTenderGuarantee
# from openprocurement.tender.cfaua.adapters.serializable.minimalstep import SerializableTenderMinimalStep
# from openprocurement.tender.cfaua.adapters.serializable.value import TenderMultilotValue
# from openprocurement.tender.cfaua.interfaces import ISerializableTenderValue, ISerializableTenderGuarantee, ISerializableTenderMinimalStep
def includeme(config):
config.add_tender_procurementMethodType(CloseFrameworkAgreementUA)
config.scan("openprocurement.tender.cfaua.views")
config.scan("openprocurement.tender.cfaua.subscribers")
config.registry.registerAdapter(CloseFrameworkAgreementUAConfigurator,
(ICloseFrameworkAgreementUA, IRequest),
IContentConfigurator)
ZcmlFile(
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'configure.zcml'),
package=openprocurement.tender.cfaua
)
|
/*
* iplink_hsr.c HSR device support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Arvid Brodin <arvid.brodin@alten.se>
*
* Based on iplink_vlan.c by Patrick McHardy <kaber@trash.net>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h> /* Needed by linux/if.h for some reason */
#include <linux/if.h>
#include <linux/if_arp.h>
#include "rt_names.h"
#include "utils.h"
#include "ip_common.h"
static void print_usage(FILE *f)
{
fprintf(f,
"Usage:\tip link add name NAME type hsr slave1 SLAVE1-IF slave2 SLAVE2-IF\n"
"\t[ supervision ADDR-BYTE ]\n"
"\n"
"NAME\n"
" name of new hsr device (e.g. hsr0)\n"
"SLAVE1-IF, SLAVE2-IF\n"
" the two slave devices bound to the HSR device\n"
"ADDR-BYTE\n"
" 0-255; the last byte of the multicast address used for HSR supervision\n"
" frames (default = 0)\n");
}
static void usage(void)
{
print_usage(stderr);
}
static int hsr_parse_opt(struct link_util *lu, int argc, char **argv,
struct nlmsghdr *n)
{
int ifindex;
unsigned char multicast_spec;
while (argc > 0) {
if (matches(*argv, "supervision") == 0) {
NEXT_ARG();
if (get_u8(&multicast_spec, *argv, 0))
invarg("ADDR-BYTE is invalid", *argv);
addattr_l(n, 1024, IFLA_HSR_MULTICAST_SPEC,
&multicast_spec, 1);
} else if (matches(*argv, "slave1") == 0) {
NEXT_ARG();
ifindex = ll_name_to_index(*argv);
if (ifindex == 0)
invarg("No such interface", *argv);
addattr_l(n, 1024, IFLA_HSR_SLAVE1, &ifindex, 4);
} else if (matches(*argv, "slave2") == 0) {
NEXT_ARG();
ifindex = ll_name_to_index(*argv);
if (ifindex == 0)
invarg("No such interface", *argv);
addattr_l(n, 1024, IFLA_HSR_SLAVE2, &ifindex, 4);
} else if (matches(*argv, "help") == 0) {
usage();
return -1;
} else {
fprintf(stderr, "hsr: what is \"%s\"?\n", *argv);
usage();
return -1;
}
argc--, argv++;
}
return 0;
}
static void hsr_print_opt(struct link_util *lu, FILE *f, struct rtattr *tb[])
{
SPRINT_BUF(b1);
if (!tb)
return;
if (tb[IFLA_HSR_SLAVE1] &&
RTA_PAYLOAD(tb[IFLA_HSR_SLAVE1]) < sizeof(__u32))
return;
if (tb[IFLA_HSR_SLAVE2] &&
RTA_PAYLOAD(tb[IFLA_HSR_SLAVE2]) < sizeof(__u32))
return;
if (tb[IFLA_HSR_SEQ_NR] &&
RTA_PAYLOAD(tb[IFLA_HSR_SEQ_NR]) < sizeof(__u16))
return;
if (tb[IFLA_HSR_SUPERVISION_ADDR] &&
RTA_PAYLOAD(tb[IFLA_HSR_SUPERVISION_ADDR]) < ETH_ALEN)
return;
fprintf(f, "slave1 ");
if (tb[IFLA_HSR_SLAVE1])
fprintf(f, "%s ",
ll_index_to_name(rta_getattr_u32(tb[IFLA_HSR_SLAVE1])));
else
fprintf(f, "<none> ");
fprintf(f, "slave2 ");
if (tb[IFLA_HSR_SLAVE2])
fprintf(f, "%s ",
ll_index_to_name(rta_getattr_u32(tb[IFLA_HSR_SLAVE2])));
else
fprintf(f, "<none> ");
if (tb[IFLA_HSR_SEQ_NR])
fprintf(f, "sequence %d ",
rta_getattr_u16(tb[IFLA_HSR_SEQ_NR]));
if (tb[IFLA_HSR_SUPERVISION_ADDR])
fprintf(f, "supervision %s ",
ll_addr_n2a(RTA_DATA(tb[IFLA_HSR_SUPERVISION_ADDR]),
RTA_PAYLOAD(tb[IFLA_HSR_SUPERVISION_ADDR]),
ARPHRD_VOID,
b1, sizeof(b1)));
}
static void hsr_print_help(struct link_util *lu, int argc, char **argv,
FILE *f)
{
print_usage(f);
}
struct link_util hsr_link_util = {
.id = "hsr",
.maxattr = IFLA_VLAN_MAX,
.parse_opt = hsr_parse_opt,
.print_opt = hsr_print_opt,
.print_help = hsr_print_help,
};
|
// https://jsbin.com/qubonu/1/edit?js,output
import React from 'react';
class App extends React.Component {
constructor(){
super();
this.state = {data: [
{id: 1, name: "Simon Bailey"},{id: 2, name: "Thomas Burleson"},
{id: 3, name: "Will Button"},{id: 4, name: "Ben Clinkinbeard"},
{id: 5, name: "Kent Dodds"},{id: 6, name: "Trevor Ewen"},
{id: 7, name: "Aaron Frost"},{id: 8, name: "Joel Hooks"},
{id: 9, name: "Jafar Husain"},{id: 10, name: "Tim Kindberg"},
{id: 11, name: "John Lindquist"},
{id: 12, name: "Joe Maddalone"},
{id: 13, name: "Tyler McGinnis"},{id: 14, name: "Scott Moss"},
{id: 15, name: "Robert Penner"},{id: 16, name: "Keith Peters"},
{id: 17, name: "Lukas Ruebbelke"},
{id: 18, name: "Brett Shollenberger"}
]}
}
render(){
let rows = this.state.data.map( person => {
return <PersonRow key={person.id} data={person} />
})
return <table>
<tbody>{rows}</tbody>
</table>
}
}
const PersonRow = (props) => {
return <tr>
<td>{props.data.id}</td>
<td>{props.data.name}</td>
</tr>
}
export default App
|
import numpy as np
from matplotlib import pyplot as plt
def sigmoid(alpha, x):
return 1.0 / (1.0 + np.exp(-alpha * x))
if __name__ == '__main__':
x = np.linspace(-5.0, 5.0, 100)
s = np.vectorize(sigmoid)
plt.plot(x, s(1.0, x), label = str(1.0))
plt.show()
# 後で書く。
|
import json
import logging
import re
import urllib
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Any, Dict, Generator, Iterable, List
import click
import requests
from datahub.configuration import ConfigModel
from datahub.configuration.common import AllowDenyPattern
from datahub.emitter.mce_builder import make_group_urn, make_user_urn
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import (
CorpGroupSnapshot,
CorpUserSnapshot,
)
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.schema_classes import (
CorpGroupInfoClass,
CorpUserInfoClass,
GroupMembershipClass,
)
logger = logging.getLogger(__name__)
class AzureADConfig(ConfigModel):
"""Config to create a token and connect to Azure AD instance"""
# Required
client_id: str
tenant_id: str
client_secret: str
authority: str
token_url: str
# Optional: URLs for redirect and hitting the Graph API
redirect: str = "https://login.microsoftonline.com/common/oauth2/nativeclient"
graph_url: str = "https://graph.microsoft.com/v1.0"
# Optional: Customize the mapping to DataHub Username from an attribute in the REST API response
# Reference: https://docs.microsoft.com/en-us/graph/api/user-list?view=graph-rest-1.0&tabs=http#response-1
azure_ad_response_to_username_attr: str = "userPrincipalName"
azure_ad_response_to_username_regex: str = "(.*)"
# Optional: Customize the mapping to DataHub Groupname from an attribute in the REST API response
# Reference: https://docs.microsoft.com/en-us/graph/api/group-list?view=graph-rest-1.0&tabs=http#response-1
azure_ad_response_to_groupname_attr: str = "displayName"
azure_ad_response_to_groupname_regex: str = "(.*)"
# Optional: to ingest users, groups or both
ingest_users: bool = True
ingest_groups: bool = True
ingest_group_membership: bool = True
ingest_groups_users: bool = True
users_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
groups_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
# If enabled, report will contain names of filtered users and groups.
filtered_tracking: bool = True
# Optional: Whether to mask sensitive information from workunit ID's. On by default.
mask_group_id: bool = True
mask_user_id: bool = True
@dataclass
class AzureADSourceReport(SourceReport):
filtered: List[str] = field(default_factory=list)
filtered_tracking: bool = field(default=True, repr=False)
filtered_count: int = field(default=0)
def report_filtered(self, name: str) -> None:
self.filtered_count += 1
if self.filtered_tracking:
self.filtered.append(name)
# Source that extracts Azure AD users, groups and group memberships using Microsoft Graph REST API
class AzureADSource(Source):
"""Ingest Azure AD Users and Groups into DataHub"""
@classmethod
def create(cls, config_dict, ctx):
config = AzureADConfig.parse_obj(config_dict)
return cls(config, ctx)
def __init__(self, config: AzureADConfig, ctx: PipelineContext):
super().__init__(ctx)
self.config = config
self.report = AzureADSourceReport(
filtered_tracking=self.config.filtered_tracking
)
self.token_data = {
"grant_type": "client_credentials",
"client_id": self.config.client_id,
"tenant_id": self.config.tenant_id,
"client_secret": self.config.client_secret,
"resource": "https://graph.microsoft.com",
"scope": "https://graph.microsoft.com/.default",
}
self.token = self.get_token()
self.selected_azure_ad_groups: list = []
self.azure_ad_groups_users: list = []
def get_token(self):
token_response = requests.post(self.config.token_url, data=self.token_data)
if token_response.status_code == 200:
token = token_response.json().get("access_token")
return token
else:
error_str = (
f"Token response status code: {str(token_response.status_code)}. "
f"Token response content: {str(token_response.content)}"
)
logger.error(error_str)
self.report.report_failure("get_token", error_str)
click.echo("Error: Token response invalid")
exit()
def get_workunits(self) -> Iterable[MetadataWorkUnit]:
# for future developers: The actual logic of this ingestion wants to be executed, in order:
# 1) the groups
# 2) the groups' memberships
# 3) the users
# Create MetadataWorkUnits for CorpGroups
if self.config.ingest_groups:
# 1) the groups
for azure_ad_groups in self._get_azure_ad_groups():
logger.info("Processing another groups batch...")
datahub_corp_group_snapshots = self._map_azure_ad_groups(
azure_ad_groups
)
for group_count, datahub_corp_group_snapshot in enumerate(
datahub_corp_group_snapshots
):
mce = MetadataChangeEvent(
proposedSnapshot=datahub_corp_group_snapshot
)
wu_id = (
f"group-{group_count + 1}"
if self.config.mask_group_id
else datahub_corp_group_snapshot.urn
)
wu = MetadataWorkUnit(id=wu_id, mce=mce)
self.report.report_workunit(wu)
yield wu
# Populate GroupMembership Aspects for CorpUsers
datahub_corp_user_urn_to_group_membership: Dict[
str, GroupMembershipClass
] = defaultdict(lambda: GroupMembershipClass(groups=[]))
if (
self.config.ingest_group_membership
and len(self.selected_azure_ad_groups) > 0
):
# 2) the groups' membership
for azure_ad_group in self.selected_azure_ad_groups:
# Azure supports nested groups, but not DataHub. We need to explode the nested groups into a flat list.
datahub_corp_group_urn = self._map_azure_ad_group_to_urn(azure_ad_group)
if not datahub_corp_group_urn:
error_str = f"Failed to extract DataHub Group Name from Azure AD Group named {azure_ad_group.get('displayName')}. Skipping..."
self.report.report_failure("azure_ad_group_mapping", error_str)
continue
self._add_group_members_to_group_membership(
datahub_corp_group_urn,
azure_ad_group,
datahub_corp_user_urn_to_group_membership,
)
if (
self.config.ingest_groups_users
and self.config.ingest_group_membership
and not self.config.ingest_users
):
# 3) the users
# getting infos about the users belonging to the found groups
datahub_corp_user_snapshots = self._map_azure_ad_users(
self.azure_ad_groups_users
)
yield from self.ingest_ad_users(
datahub_corp_user_snapshots, datahub_corp_user_urn_to_group_membership
)
# Create MetadataWorkUnits for CorpUsers
if self.config.ingest_users:
# 3) the users
for azure_ad_users in self._get_azure_ad_users():
# azure_ad_users = next(self._get_azure_ad_users())
datahub_corp_user_snapshots = self._map_azure_ad_users(azure_ad_users)
yield from self.ingest_ad_users(
datahub_corp_user_snapshots,
datahub_corp_user_urn_to_group_membership,
)
def _add_group_members_to_group_membership(
self,
parent_corp_group_urn: str,
azure_ad_group: dict,
user_urn_to_group_membership: Dict[str, GroupMembershipClass],
) -> None:
# Extract and map members for each group
for azure_ad_group_members in self._get_azure_ad_group_members(azure_ad_group):
# if group doesn't have any members, continue
if not azure_ad_group_members:
continue
for azure_ad_member in azure_ad_group_members:
odata_type = azure_ad_member.get("@odata.type")
if odata_type == "#microsoft.graph.user":
self._add_user_to_group_membership(
parent_corp_group_urn,
azure_ad_member,
user_urn_to_group_membership,
)
elif odata_type == "#microsoft.graph.group":
# Since DataHub does not support nested group, we add the members to the parent group and not the nested one.
self._add_group_members_to_group_membership(
parent_corp_group_urn,
azure_ad_member,
user_urn_to_group_membership,
)
else:
# Unless told otherwise, we only care about users and groups. Silently skip other object types.
logger.warning(
f"Unsupported @odata.type '{odata_type}' found in Azure group member. Skipping...."
)
def _add_user_to_group_membership(
self,
group_urn: str,
azure_ad_user: dict,
user_urn_to_group_membership: Dict[str, GroupMembershipClass],
) -> None:
user_urn = self._map_azure_ad_user_to_urn(azure_ad_user)
if not user_urn:
error_str = f"Failed to extract DataHub Username from Azure ADUser {azure_ad_user.get('displayName')}. Skipping..."
self.report.report_failure("azure_ad_user_mapping", error_str)
else:
self.azure_ad_groups_users.append(azure_ad_user)
# update/create the GroupMembership aspect for this group member.
if group_urn not in user_urn_to_group_membership[user_urn].groups:
user_urn_to_group_membership[user_urn].groups.append(group_urn)
def ingest_ad_users(
self,
datahub_corp_user_snapshots: Generator[CorpUserSnapshot, Any, None],
datahub_corp_user_urn_to_group_membership: dict,
) -> Generator[MetadataWorkUnit, Any, None]:
for user_count, datahub_corp_user_snapshot in enumerate(
datahub_corp_user_snapshots
):
# Add GroupMembership if applicable
if (
datahub_corp_user_snapshot.urn
in datahub_corp_user_urn_to_group_membership.keys()
):
datahub_group_membership = (
datahub_corp_user_urn_to_group_membership.get(
datahub_corp_user_snapshot.urn
)
)
assert datahub_group_membership
datahub_corp_user_snapshot.aspects.append(datahub_group_membership)
mce = MetadataChangeEvent(proposedSnapshot=datahub_corp_user_snapshot)
wu_id = (
f"user-{user_count + 1}"
if self.config.mask_user_id
else datahub_corp_user_snapshot.urn
)
wu = MetadataWorkUnit(id=wu_id, mce=mce)
self.report.report_workunit(wu)
yield wu
def get_report(self) -> SourceReport:
return self.report
def close(self) -> None:
pass
def _get_azure_ad_groups(self) -> Iterable[List]:
yield from self._get_azure_ad_data(kind="/groups")
def _get_azure_ad_users(self) -> Iterable[List]:
yield from self._get_azure_ad_data(kind="/users")
def _get_azure_ad_group_members(self, azure_ad_group: dict) -> Iterable[List]:
group_id = azure_ad_group.get("id")
kind = f"/groups/{group_id}/members"
yield from self._get_azure_ad_data(kind=kind)
def _get_azure_ad_data(self, kind: str) -> Iterable[List]:
headers = {"Authorization": "Bearer {}".format(self.token)}
# 'ConsistencyLevel': 'eventual'}
url = self.config.graph_url + kind
while True:
if not url:
break
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = json.loads(response.text)
try:
url = json_data["@odata.nextLink"]
except KeyError:
# no more data will follow
url = False # type: ignore
yield json_data["value"]
else:
error_str = (
f"Response status code: {str(response.status_code)}. "
f"Response content: {str(response.content)}"
)
logger.error(error_str)
self.report.report_failure("_get_azure_ad_data_", error_str)
continue
def _map_identity_to_urn(self, func, id_to_extract, mapping_identifier, id_type):
result, error_str = None, None
try:
result = func(id_to_extract)
except Exception as e:
error_str = "Failed to extract DataHub {} from Azure AD {} with name {} due to '{}'".format(
id_type, id_type, id_to_extract.get("displayName"), repr(e)
)
if not result:
error_str = "Failed to extract DataHub {} from Azure AD {} with name {} due to unknown reason".format(
id_type, id_type, id_to_extract.get("displayName")
)
if error_str is not None:
logger.error(error_str)
self.report.report_failure(mapping_identifier, error_str)
return result, error_str
def _map_azure_ad_groups(self, azure_ad_groups):
for azure_ad_group in azure_ad_groups:
corp_group_urn, error_str = self._map_identity_to_urn(
self._map_azure_ad_group_to_urn,
azure_ad_group,
"azure_ad_group_mapping",
"group",
)
if error_str is not None:
continue
group_name = self._extract_regex_match_from_dict_value(
azure_ad_group,
self.config.azure_ad_response_to_groupname_attr,
self.config.azure_ad_response_to_groupname_regex,
)
if not self.config.groups_pattern.allowed(group_name):
self.report.report_filtered(f"{corp_group_urn}")
continue
self.selected_azure_ad_groups.append(azure_ad_group)
corp_group_snapshot = CorpGroupSnapshot(
urn=corp_group_urn,
aspects=[],
)
corp_group_info = self._map_azure_ad_group_to_corp_group(azure_ad_group)
corp_group_snapshot.aspects.append(corp_group_info)
yield corp_group_snapshot
# Converts Azure group profile into DataHub CorpGroupInfoClass Aspect
def _map_azure_ad_group_to_corp_group(self, group):
return CorpGroupInfoClass(
displayName=self._map_azure_ad_group_to_group_name(group),
description=group.get("description"),
email=group.get("mail"),
members=[],
groups=[],
admins=[],
)
# Creates Datahub CorpGroup Urn from Azure AD Group object
def _map_azure_ad_group_to_urn(self, azure_ad_group):
group_name = self._map_azure_ad_group_to_group_name(azure_ad_group)
if not group_name:
return None
# decode the group name to deal with URL encoding, and replace spaces with '_'
url_encoded_group_name = urllib.parse.quote(group_name)
return make_group_urn(url_encoded_group_name)
def _map_azure_ad_group_to_group_name(self, azure_ad_group):
return self._extract_regex_match_from_dict_value(
azure_ad_group,
self.config.azure_ad_response_to_groupname_attr,
self.config.azure_ad_response_to_groupname_regex,
)
def _map_azure_ad_users(self, azure_ad_users):
for user in azure_ad_users:
corp_user_urn, error_str = self._map_identity_to_urn(
self._map_azure_ad_user_to_urn, user, "azure_ad_user_mapping", "user"
)
if error_str is not None:
continue
if not self.config.users_pattern.allowed(corp_user_urn):
self.report.report_filtered(f"{corp_user_urn}.*")
continue
corp_user_snapshot = CorpUserSnapshot(
urn=corp_user_urn,
aspects=[],
)
corp_user_info = self._map_azure_ad_user_to_corp_user(user)
corp_user_snapshot.aspects.append(corp_user_info)
yield corp_user_snapshot
def _map_azure_ad_user_to_user_name(self, azure_ad_user):
return self._extract_regex_match_from_dict_value(
azure_ad_user,
self.config.azure_ad_response_to_username_attr,
self.config.azure_ad_response_to_username_regex,
)
# Creates DataHub CorpUser Urn from Azure AD User object
def _map_azure_ad_user_to_urn(self, azure_ad_user):
user_name = self._map_azure_ad_user_to_user_name(azure_ad_user)
if not user_name:
return None
return make_user_urn(user_name)
def _map_azure_ad_user_to_corp_user(self, azure_ad_user):
full_name = (
str(azure_ad_user.get("givenName", ""))
+ " "
+ str(azure_ad_user.get("surname", ""))
)
return CorpUserInfoClass(
active=True,
displayName=azure_ad_user.get("displayName", full_name),
firstName=azure_ad_user.get("givenName", None),
lastName=azure_ad_user.get("surname", None),
fullName=full_name,
email=azure_ad_user.get("mail"),
title=azure_ad_user.get("jobTitle", None),
countryCode=azure_ad_user.get("mobilePhone", None),
)
def _extract_regex_match_from_dict_value(
self, str_dict: Dict[str, str], key: str, pattern: str
) -> str:
raw_value = str_dict.get(key)
if raw_value is None:
raise ValueError(f"Unable to find the key {key} in Group. Is it wrong?")
match = re.search(pattern, raw_value)
if match is None:
raise ValueError(
f"Unable to extract a name from {raw_value} with the pattern {pattern}"
)
return match.group()
|
# coding=utf-8
from pyecharts.chart import Chart
class Line(Chart):
"""
<<< 折线/面积图 >>>
折线图是用折线将各个数据点标志连接起来的图表,用于展现数据的变化趋势。
"""
def __init__(self, title="", subtitle="", **kwargs):
super(Line, self).__init__(title, subtitle, **kwargs)
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
return self
def __add(
self,
name,
x_axis,
y_axis,
is_symbol_show=True,
symbol_size=4,
is_smooth=False,
is_stack=False,
is_step=False,
is_fill=False,
**kwargs
):
"""
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选。
:param x_axis:
x 坐标轴数据。
:param y_axis:
y 坐标轴数据。
:param is_symbol_show:
是否显示标记图形,默认为 True。
:param is_smooth:
是否平滑曲线显示,默认为 False。
:param is_stack:
数据堆叠,同个类目轴上系列配置相同的 stack 值可以堆叠放置。默认为 False。
:param is_step:
是否是阶梯线图。可以设置为 True 显示成阶梯线图。默认为 False。
也支持设置成'start', 'middle', 'end'分别配置在当前点,当前点与下个
点的中间下个点拐弯。
:param is_fill:
是否填充曲线所绘制面积,默认为 False。
:param kwargs:
"""
assert len(x_axis) == len(y_axis)
kwargs.update(x_axis=x_axis, type="line", flag=True)
chart = self._get_all_options(**kwargs)
xaxis, yaxis = chart["xy_axis"]
if is_stack:
is_stack = "stack_" + str(self._option["series_id"])
else:
is_stack = ""
self._option.update(xAxis=xaxis, yAxis=yaxis)
self._option.get("legend")[0].get("data").append(name)
# 合并 x 和 y 轴数据,避免当 X 轴的类型设置为 'value' 的时候,
# X、Y 轴均显示 Y 轴数据
_data = [list(z) for z in zip(x_axis, y_axis)]
self._option.get("series").append(
{
"type": "line",
"name": name,
"symbol": chart["symbol"],
"symbolSize": symbol_size,
"smooth": is_smooth,
"step": is_step,
"stack": is_stack,
"showSymbol": is_symbol_show,
"data": _data,
"label": chart["label"],
"lineStyle": chart["line_style"],
"areaStyle": chart["area_style"],
"markPoint": chart["mark_point"],
"markLine": chart["mark_line"],
"seriesId": self._option.get("series_id"),
}
)
self._config_components(**kwargs)
|
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, lib
from pandas._typing import (
Axis,
FilePathOrBuffer,
FrameOrSeries,
JSONSerializable,
Label,
Level,
Renamer,
)
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
validate_percentile,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_float,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.indexes.api import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.missing import find_valid_index
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
if TYPE_CHECKING:
from pandas.core.resample import Resampler
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs: Dict[str, str] = dict()
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
f"cannot replace {to_replace} with method {method} on a "
f"{type(self).__name__}"
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: List[str] = [
"_mgr",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_mgr: BlockManager
_attrs: Dict[Optional[Hashable], Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
copy: bool = False,
attrs: Optional[Mapping[Optional[Hashable], Any]] = None,
):
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_mgr", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
@classmethod
def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=cls._get_block_manager_axis(a), copy=False
)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def attrs(self) -> Dict[Optional[Hashable], Any]:
"""
Dictionary of global attributes on this object.
.. warning::
attrs is experimental and may change without warning.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
@classmethod
def _validate_dtype(cls, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:
"""
Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""
Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""
Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Internals
@property
def _data(self):
# GH#33054 retained because some downstream packages uses this,
# e.g. fastparquet
return self._mgr
# ----------------------------------------------------------------------
# Axis
_AXIS_ALIASES = {"rows": 0}
_AXIS_IALIASES = {0: "rows"}
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
_AXIS_NUMBERS: Dict[str, int]
_AXIS_NAMES: Dict[int, str]
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool = False, sentinel=None
):
"""
Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in cls._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError as err:
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!"
) from err
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
d: Dict[str, ABCSeries] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> List[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._mgr.ndim
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s if inplace=False, None otherwise.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis: int, labels: Index) -> None:
labels = ensure_index(labels)
self._mgr.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(
self, method="swapaxes"
)
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the level(s) is removed:
* 0 or 'index': remove level(s) in column.
* 1 or 'columns': remove level(s) in row.
Returns
-------
DataFrame
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self: FrameOrSeries, item) -> FrameOrSeries:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[FrameOrSeries]:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
new_index = ax._transform_index(f, level)
result.set_axis(new_index, axis=axis_no, inplace=True)
result._clear_item_cache()
if inplace:
self._update_inplace(result)
return None
else:
return result.__finalize__(self, method="rename")
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
num_legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._mgr.equals(other._mgr)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = self._values
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
values = self._values
if is_bool_dtype(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._mgr.apply(operator.invert)
result = self._constructor(new_data).__finalize__(self, method="__invert__")
return result
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
f"The {label_axis_name} label '{key}' "
f"is not unique.{multi_message}"
)
)
return values
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
f"{repr(type(self).__name__)} objects are mutable, "
f"thus they cannot be hashed"
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""
Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@doc(items)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
def __array_wrap__(self, result, context=None):
result = lib.item_from_zerodim(result)
if is_scalar(result):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return result
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(
self, method="__array_wrap__"
)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_mgr=self._mgr,
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta,
)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._mgr = state
elif isinstance(state, dict):
if "_data" in state and "_mgr" not in state:
# compat for older pickles
state["_mgr"] = state.pop("_data")
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _mgr to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs[
"to_markdown"
] = """
Print %(klass)s in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
%(klass)s in Markdown-friendly format.
"""
@doc(klass="object")
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
"""
Write {klass} to an Excel sheet.
To write a single {klass} to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool_t = False,
compression: Optional[str] = "infer",
index: bool_t = True,
indent: Optional[int] = None,
) -> Optional[str]:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {'split','records','index','table'}.
* DataFrame:
- default is 'columns'
- allowed values are: {'split', 'records', 'index', 'columns',
'values', 'table'}.
* The format of the JSON string:
- 'split' : dict like {'index' -> [index], 'columns' -> [columns],
'data' -> [values]}
- 'records' : list like [{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
Describing the data, where data component is like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json : Convert a JSON string to pandas object.
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
Examples
--------
>>> import json
>>> df = pd.DataFrame(
... [["a", "b"], ["c", "d"]],
... index=["row 1", "row 2"],
... columns=["col 1", "col 2"],
... )
>>> result = df.to_json(orient="split")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{
"columns": [
"col 1",
"col 2"
],
"index": [
"row 1",
"row 2"
],
"data": [
[
"a",
"b"
],
[
"c",
"d"
]
]
}
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> result = df.to_json(orient="records")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
[
{
"col 1": "a",
"col 2": "b"
},
{
"col 1": "c",
"col 2": "d"
}
]
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> result = df.to_json(orient="index")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{
"row 1": {
"col 1": "a",
"col 2": "b"
},
"row 2": {
"col 1": "c",
"col 2": "d"
}
}
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> result = df.to_json(orient="columns")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{
"col 1": {
"row 1": "a",
"row 2": "c"
},
"col 2": {
"row 1": "b",
"row 2": "d"
}
}
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> result = df.to_json(orient="values")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
[
[
"a",
"b"
],
[
"c",
"d"
]
]
Encoding with Table Schema:
>>> result = df.to_json(orient="table")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{
"schema": {
"fields": [
{
"name": "index",
"type": "string"
},
{
"name": "col 1",
"type": "string"
},
{
"name": "col 2",
"type": "string"
}
],
"primaryKey": [
"index"
],
"pandas_version": "0.20.0"
},
"data": [
{
"index": "row 1",
"col 1": "a",
"col 2": "b"
},
{
"index": "row 2",
"col 1": "c",
"col 2": "d"
}
]
}
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
)
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool_t = False,
format: Optional[str] = None,
index: bool_t = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool_t] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(
self,
path,
compression: Optional[str] = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{booktabs}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{table.tex}``.
.. versionchanged:: 0.20.2
Added to Series.
.. versionchanged:: 1.0.0
Added caption and label arguments.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str, optional
The LaTeX caption to be placed inside ``\caption{}`` in the output.
.. versionadded:: 1.0.0
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
This is used with ``\ref{}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
%(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is 'zip' or inferred as 'zip', other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value) -> None:
"""
The object has called back to us saying maybe it has changed.
"""
self._mgr.set(item, value)
@property
def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
# Note: we need to call ref._maybe_cache_changed even in the
# case where it will raise. (Uh, not clear why)
try:
ref._maybe_cache_changed(cacher[0], self)
except AssertionError:
# ref._mgr.setitem can raise
# AssertionError because of shape mismatch
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._mgr.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self, method="take")
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._mgr.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs: Callable = xs
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._mgr.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._mgr.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _iset_item(self, loc: int, value) -> None:
self._mgr.iset(loc, value)
self._clear_item_cache()
def _set_item(self, key, value) -> None:
self._mgr.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
loc = self.axes[-1].get_loc(key)
self._mgr.idelete(loc)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array """
return self._mgr.is_view
def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
result : same type as self
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._mgr = result._mgr
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise AbstractMethodError(self)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform %(klass)s to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self, method="reindex")
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._mgr
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
# If we've made a copy once, no need to make another one
copy = False
if copy and new_data is self._mgr:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: FrameOrSeries,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int, array-like, BitGenerator, np.random.RandomState, optional
If int, array-like, or BitGenerator (NumPy>=1.17), seed for
random number generator
If np.random.RandomState, use as numpy RandomState object.
..versionchanged:: 1.1.0
array-like and BitGenerator (for NumPy>=1.17) object now passed to
np.random.RandomState() as seed
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError as err:
raise KeyError(
"String passed to weights not a valid column"
) from err
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif frac is not None and frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(func, arg2=b, arg3=c)
... ) # doctest: +SKIP
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((func, 'arg2'), arg1=a, arg3=c)
... ) # doctest: +SKIP
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values.
Produced %(klass)s will have same axis length as self.
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: FrameOrSeries, other, method: Optional[str] = None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : str, optional
A passed method name providing context on where ``__finalize__``
was called.
.. warning:
The value passed as `method` are not currently considered
stable across pandas releases.
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
# For subclasses using _metadata.
for name in self._metadata:
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name: str):
"""
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
"""
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""
Consolidate _mgr -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._mgr.blocks)
result = f()
if len(self._mgr.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._mgr = self._mgr.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace: bool_t = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._mgr.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self) -> bool_t:
f = lambda: self._mgr.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self) -> bool_t:
f = lambda: self._mgr.is_numeric_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._mgr.get_numeric_data()).__finalize__(self,)
def _get_bool_data(self):
return self._constructor(self._mgr.get_bool_data()).__finalize__(self,)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._mgr.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
return self.values
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._mgr.get_dtypes(), index=self._info_axis, dtype=np.object_)
def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._mgr.to_dict(copy=copy).items()
}
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors,)
return self._constructor(new_data).__finalize__(self, method="astype")
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._mgr.copy(deep=deep)
self._clear_item_cache()
return self._constructor(data).__finalize__(self, method="copy")
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
coerce: bool_t = False,
copy: bool_t = True,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._mgr.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._mgr.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self, method="infer_objects")
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
possible to turn off individual conversions to ``StringDtype``, the integer
extension types or ``BooleanDtype``, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 NaN
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
else:
results = [
col._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
for col_name, col in self.items()
]
result = pd.concat(results, axis=1, copy=False)
return result
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._mgr = result._mgr.downcast()
return result
new_data = self._mgr.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
value = value.reindex(self.index, copy=False)
value = value._values
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._mgr.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._mgr.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)._data
else:
raise ValueError(f"invalid fill value with a {type(value)}")
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="fillna")
def ffill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
if not (
is_scalar(to_replace)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._mgr
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
# Operate column-wise
if self.ndim == 1:
raise ValueError(
"Series.replace cannot use dict-like to_replace "
"and non-None value"
)
mapping = {
col: (to_rep, value) for col, to_rep in to_replace.items()
}
return self._replace_columnwise(mapping, inplace, regex)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._mgr.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._mgr.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
# Operate column-wise
if self.ndim == 1:
raise ValueError(
"Series.replace cannot use dict-value and "
"non-None to_replace"
)
mapping = {col: (to_replace, val) for col, val in value.items()}
return self._replace_columnwise(mapping, inplace, regex)
elif not is_list_like(value): # NA -> 0
new_data = self._mgr.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="replace")
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if axis == 0:
df = self
else:
df = self.T
if isinstance(df.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if df.ndim == 2 and np.all(df.dtypes == np.dtype(object)):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(df.index))
else:
index = df.index
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = df._mgr
new_data = data.interpolate(
method=method,
axis=self._info_axis_number,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
result = self._constructor(new_data)
if axis == 1:
result = result.T
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="interpolate")
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where, dtype=np.float64)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls._values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self, method="isna")
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self, method="isnull")
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self, method="notna")
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self, method="notnull")
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self._values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
return self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis=None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
See Also
--------
Series.clip : Trim values at input threshold in series.
DataFrame.clip : Trim values at input threshold in dataframe.
numpy.clip : Clip (limit) the values in an array.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
_shared_docs[
"groupby"
] = """
Group %(klass)s using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
Returns
-------
%(klass)sGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
"""
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: Optional[str] = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
Frequency DateOffset or string.
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {'start', 'end'}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
Same type as caller
Object converted to the specified frequency.
See Also
--------
reindex : Conform DataFrame to new index with optional filling logic.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: int = 0,
on=None,
level=None,
) -> "Resampler":
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Select initial periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the first few rows based on a date offset.
Parameters
----------
offset : str, DateOffset or dateutil.relativedelta
The offset length of the data that will be selected. For instance,
'1M' will display all the rows having their index within the first month.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calendar days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.is_anchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Select final periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the last few rows based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: Optional[bool_t] = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self, method="rank")
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit.
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
_left = left.fillna(method=method, axis=fill_axis, limit=limit)
assert _left is not None # needed for mypy
left = _left
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return (
left.__finalize__(self),
right.__finalize__(other),
)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._mgr
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._mgr:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return (
left.__finalize__(self),
right.__finalize__(other),
)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
else:
# GH#21947 we have an empty DataFrame, could be object-dtype
cond = cond.astype(bool)
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if isinstance(other, NDFrame):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond._values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = other[0]
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
new_other = np.asarray(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._mgr.putmask(
mask=cond, new=other, align=align, axis=block_axis,
)
result = self._constructor(new_data)
return self._update_inplace(result)
else:
new_data = self._mgr.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
result = self._constructor(new_data)
return result.__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : bool %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._mgr.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self, method="shift")
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self, method="slice_shift")
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis: Axis = 0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
axis = self._get_axis_number(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq != orig_freq:
assert orig_freq is not None # for mypy
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
new_ax = index.shift(periods)
else:
new_ax = index.shift(periods, freq)
result = self.copy()
result.set_axis(new_ax, axis, inplace=True)
return result.__finalize__(self, method="tshift")
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
%(klass)s
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self.copy(deep=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self, method="tz_convert")
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self.copy(deep=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self, method="tz_localize")
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(
self: FrameOrSeries, percentiles=None, include=None, exclude=None
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
mean 2006-09-01 08:00:00
min 2000-01-01 00:00:00
25% 2004-12-31 12:00:00
50% 2010-01-01 00:00:00
75% 2010-01-01 00:00:00
max 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all') # doctest: +SKIP
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN a
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object]) # doctest: +SKIP
object
count 3
unique 3
top a
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number]) # doctest: +SKIP
categorical object
count 3 3
unique 3 3
top f a
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object]) # doctest: +SKIP
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data):
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return pd.Series(d, index=stat_index, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_datetime64_any_dtype(data):
return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names: List[Label] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
_data = self.fillna(method=fill_method, axis=axis, limit=limit)
assert _data is not None # needed for mypy
data = _data
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name1, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_any_desc,
func=nanops.nanany,
see_also=_any_see_also,
examples=_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_all_desc,
func=nanops.nanall,
see_also=_all_see_also,
examples=_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc_mad)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
func=nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
func=nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
func=nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="minimum",
accum_func=np.minimum.accumulate,
accum_func_name="min",
examples=_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="sum",
accum_func=np.cumsum,
accum_func_name="sum",
examples=_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="product",
accum_func=np.cumprod,
accum_func_name="prod",
examples=_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="maximum",
accum_func=np.maximum.accumulate,
accum_func_name="max",
examples=_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the sum of the values for the requested axis.\n\n"
"This is equivalent to the method ``numpy.sum``.",
func=nanops.nansum,
see_also=_stat_func_see_also,
examples=_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the mean of the values for the requested axis.",
func=nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
func=nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
func=nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the product of the values for the requested axis.",
func=nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the median of the values for the requested axis.",
func=nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the maximum of the values for the requested axis.\n\n"
"If you want the *index* of the maximum, use ``idxmax``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmax``.",
func=nanops.nanmax,
see_also=_stat_func_see_also,
examples=_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the minimum of the values for the requested axis.\n\n"
"If you want the *index* of the minimum, use ``idxmin``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmin``.",
func=nanops.nanmin,
see_also=_stat_func_see_also,
examples=_min_examples,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core.window import EWM, Expanding, Rolling, Window
@doc(Rolling)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@doc(Expanding)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@doc(EWM)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how: str):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how)
if idxpos is None:
return None
return self.index[idxpos]
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = (
f"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_doc_mad = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
Return cumulative %(desc)s of %(name1)s or %(name2)s.
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(
cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable
) -> Callable:
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
accum_func: Callable,
accum_func_name: str,
examples: str,
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
def block_accum_func(blk_values):
values = blk_values.T if hasattr(blk_values, "T") else blk_values
result = nanops.na_accum_func(values, accum_func, skipna=skipna)
result = result.T if hasattr(result, "T") else result
return result
result = self._mgr.apply(block_accum_func)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self, method=name)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str,
examples: str,
empty_value: bool,
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module has implementation of the generic view."""
import posixpath
import time
import flask
import flask.json
import flask.views
import werkzeug.exceptions
from decapod_api import exceptions
from decapod_api import pagination
from decapod_common import log
try:
import gridfs.grid_file as gridfile
except ImportError:
gridfile = None
LOG = log.getLogger(__name__)
"""Logger."""
class View(flask.views.MethodView):
"""A generic view for decapod.
This has a small set of routines, required for each view. Also,
it provides several utility methods for view registration and
plugging into WSGI application.
"""
NAME = "generic"
"""This is a name of the view for Flask routing."""
ENDPOINT = None
"""This is an endpoint for the view."""
@property
def request_id(self):
"""Returns a unique request ID."""
return getattr(flask.g, "request_id", "?")
@property
def request_json(self):
"""Tries to parse JSON body (with caching).
Raises proper exception on problems.
"""
try:
return flask.request.get_json(force=True)
except werkzeug.exceptions.BadRequest as exc:
LOG.error("Cannot process user request: %s", exc)
raise exceptions.NotAcceptable() from exc
@property
def request_query(self):
"""Returns a dictionary with URL Query parameters."""
return flask.request.args
@property
def request_headers(self):
return flask.request.headers
@property
def initiator_id(self):
"""Returns ID of request initiator."""
token = getattr(flask.g, "token", None)
user_id = getattr(token, "user_id", None)
return user_id
@classmethod
def register_to(cls, application):
"""Registers view to the application."""
application.add_url_rule(
make_endpoint(cls.ENDPOINT),
view_func=cls.as_view(cls.NAME)
)
def prepare_response(self, response):
"""This method prepares response to convert into JSON."""
return response
def dispatch_request(self, *args, **kwargs):
response = super().dispatch_request(*args, **kwargs)
if isinstance(response, flask.Response):
return response
try:
response = self.prepare_response(response)
except Exception as exc:
LOG.error("Cannot build model response: %s", exc)
raise exceptions.UnknownReturnValueError from exc
try:
response = flask.json.jsonify(response)
except Exception as exc:
LOG.error("Cannot convert %s to JSON: %s", response, exc)
raise exceptions.CannotConvertResultToJSONError() from exc
return response
class ModelView(View):
"""A model view for decapod.
This is still a rather generic view with some routines, related
to response building. It converts data to model-based response
according to the MODEL_STRUCTURE. Also it manages pagination
and listing.
"""
MODEL_NAME = None
"""This is a name of the model to use for response."""
@property
def model_name(self):
"""Returns a proper model name for the view.
Each view works with a single model so it makes sense to do so.
"""
return self.MODEL_NAME or self.NAME
def prepare_response(self, response):
assert isinstance(self.model_name, str)
if response is None:
return {}
return response
class CRUDView(ModelView):
"""CRUDView is the most basic and classical REST view.
It presents URL structure like:
GET /users/
GET /users/3/
POST /users/
PUT /users/3
DELETE /users/3
Also, it gives 2 methods for GET requests: get_all(self) (to get
a list of items) and get_item(self, item_id) (to get a single item).
"""
PARAMETER_TYPE = "int"
"""The type of parameter to use."""
PAGINATION_ITEMS_PER_PAGE = 25
"""How may items per pagination page to show."""
@property
def pagination(self):
"""Returns settings for current pagination."""
return pagination.make_pagination(self.request_query)
@classmethod
def register_to(cls, application):
view_func = cls.as_view(cls.NAME)
main_endpoint = make_endpoint(cls.ENDPOINT)
item_endpoint = make_endpoint(
main_endpoint, "<{0}:item_id>".format(cls.PARAMETER_TYPE)
)
application.add_url_rule(
main_endpoint,
view_func=view_func, defaults={"item_id": None}, methods=["GET"]
)
application.add_url_rule(
main_endpoint,
view_func=view_func, methods=["POST"]
)
application.add_url_rule(
item_endpoint,
view_func=view_func, methods=["GET", "POST", "DELETE"]
)
def get(self, item_id):
"""Just a shorthand to manage both GET variants."""
if item_id is None:
return self.get_all()
return self.get_item(item_id)
class VersionedCRUDView(CRUDView):
"""Versioned variant of the CRUDView.
It presents URL structure like:
GET /users/
GET /users/3/
GET /users/3/version/
GET /users/3/version/3/
POST /users/
PUT /users/3
DELETE /users/3
So it allows you to define versioned variants of the models.
Additional convenience methods are:
- get_versions(item_id)
- get_version(item_id, version)
"""
VERSION_TYPE = "int"
"""Type of version. I doubt that one ever modifies that."""
ABSENT_ITEM = object()
"""Just a marker of absent element. Set if None is not enough."""
@classmethod
def register_to(cls, application):
view_func = cls.as_view(cls.NAME)
main_endpoint = make_endpoint(cls.ENDPOINT)
item_endpoint = make_endpoint(
main_endpoint, "<{0}:item_id>".format(cls.PARAMETER_TYPE)
)
version_endpoint = make_endpoint(item_endpoint, "version")
item_version_endpoint = make_endpoint(
version_endpoint, "<{0}:version>".format(cls.VERSION_TYPE)
)
default_get = {"item_id": None, "version": cls.ABSENT_ITEM}
default_versions = {"version": None}
application.add_url_rule(
main_endpoint,
view_func=view_func, defaults=default_get, methods=["GET"]
)
application.add_url_rule(
version_endpoint,
view_func=view_func, defaults=default_versions, methods=["GET"]
)
application.add_url_rule(
item_version_endpoint,
view_func=view_func, methods=["GET"]
)
application.add_url_rule(
main_endpoint,
view_func=view_func, methods=["POST"]
)
application.add_url_rule(
item_endpoint,
view_func=view_func, methods=["GET", "PUT", "DELETE"]
)
def get(self, item_id, version=ABSENT_ITEM):
if item_id is None:
return self.get_all()
if version is self.ABSENT_ITEM:
return self.get_item(item_id=item_id)
if version is None:
return self.get_versions(item_id=item_id)
return self.get_version(item_id=item_id, version=version)
def make_endpoint(*endpoint):
"""Makes endpoint suitable for Flask routing."""
url = posixpath.join(*endpoint)
if not url.startswith("/"):
url = "/{0}".format(url)
if not url.endswith("/"):
url += "/"
return url
def fs_response(fileobj, download, mimetype=None, filename=None,
cache_for=None):
if gridfile is not None and isinstance(fileobj, gridfile.GridOut):
return gridfs_response(fileobj, download, cache_for)
send_file_kwargs = {}
if mimetype is not None:
send_file_kwargs["mimetype"] = mimetype
if download:
send_file_kwargs["as_attachment"] = True
if filename is not None:
send_file_kwargs["attachment_filename"] = filename
if cache_for:
send_file_kwargs["cache_timeout"] = cache_for
return flask.send_file(fileobj, **send_file_kwargs)
def gridfs_response(fileobj, download, cache_for=None):
data = fileobj_generator(fileobj)
response = flask.Response(data, mimetype=fileobj.content_type)
response.set_etag(fileobj.md5)
if download:
response.headers.add(
"Content-Disposition", "attachment", filename=fileobj.filename)
if cache_for is not None:
response.cache_control.public = False
response.cache_control.private = True
response.cache_control.no_store = True
if not cache_for:
response.cache_control.no_cache = True
else:
response.cache_control.max_age = cache_for
response.expires = int(time.time() + cache_for)
return response
def fileobj_generator(fileobj):
with fileobj:
for chunk in fileobj:
yield chunk
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import paddle
import os
import sys
from vgg import *
from paddle.fluid.contrib.slim import CompressPass
from paddle.fluid.contrib.slim import build_compressor
from paddle.fluid.contrib.slim import ImitationGraph
class Model(object):
def __init__(slef):
pass
def compress(self):
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
vgg = VGG11()
predict = vgg.net(img, class_dim=10)
eval_program = fluid.default_main_program().clone(for_test=True)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
with fluid.program_guard(main_program=eval_program):
acc = fluid.layers.accuracy(input=predict, label=label)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimizer.minimize(avg_cost)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=128)
eval_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=1)
train_feed_list = {'img': img.name, 'label': label.name}
train_fetch_list = {'cost': avg_cost.name}
eval_feed_list = {'img': img.name, 'label': label.name}
eval_fetch_list = {'acc': acc.name}
com_pass = CompressPass(
place,
fluid.global_scope(),
fluid.default_main_program(),
train_reader=train_reader,
train_feed_list=train_feed_list,
train_fetch_list=train_fetch_list,
eval_program=eval_program,
eval_reader=eval_reader,
eval_feed_list=eval_feed_list,
eval_fetch_list=eval_fetch_list)
com_pass.config('./config.yaml')
com_pass.run()
if __name__ == "__main__":
model = Model()
model.compress()
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE122_Heap_Based_Buffer_Overflow__c_src_wchar_t_cpy_53b.c
Label Definition File: CWE122_Heap_Based_Buffer_Overflow__c_src.label.xml
Template File: sources-sink-53b.tmpl.c
*/
/*
* @description
* CWE: 122 Heap Based Buffer Overflow
* BadSource: Initialize data as a large string
* GoodSource: Initialize data as a small string
* Sink: cpy
* BadSink : Copy data to string using wcscpy
* Flow Variant: 53 Data flow: data passed as an argument from one function through two others to a fourth; all four functions are in different source files
*
* */
#include "std_testcase.h"
#include <wchar.h>
/* all the sinks are the same, we just want to know where the hit originated if a tool flags one */
#ifndef OMITBAD
/* bad function declaration */
void CWE122_Heap_Based_Buffer_Overflow__c_src_wchar_t_cpy_53c_badSink(wchar_t * data);
void CWE122_Heap_Based_Buffer_Overflow__c_src_wchar_t_cpy_53b_badSink(wchar_t * data)
{
CWE122_Heap_Based_Buffer_Overflow__c_src_wchar_t_cpy_53c_badSink(data);
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* good function declaration */
void CWE122_Heap_Based_Buffer_Overflow__c_src_wchar_t_cpy_53c_goodG2BSink(wchar_t * data);
/* goodG2B uses the GoodSource with the BadSink */
void CWE122_Heap_Based_Buffer_Overflow__c_src_wchar_t_cpy_53b_goodG2BSink(wchar_t * data)
{
CWE122_Heap_Based_Buffer_Overflow__c_src_wchar_t_cpy_53c_goodG2BSink(data);
}
#endif /* OMITGOOD */
|
'use strict'
/**
* Modules (Node.js)
* @constant
*/
const fs = require('fs')
const os = require('os')
const path = require('path')
const readline = require('readline')
const url = require('url')
/**
* Modules (Electron)
* @constant
*/
const electron = require('electron')
const { remote, ipcRenderer } = electron
/**
* Modules (Third party)
* @constant
*/
const dataUriToBuffer = require('data-uri-to-buffer')
const fileType = require('file-type')
const fileUrl = require('file-url')
const getYoutubeId = require('get-youtube-id')
const { Howl, Howler } = require('howler')
const icojs = require('icojs')
const imageDownloader = require('image-downloader')
const isDebug = require('@sidneys/is-env')('debug')
const jimp = require('jimp')
const logger = require('@sidneys/logger')({ write: true })
const moment = require('moment')
const notificationProvider = remote.require('@sidneys/electron-notification-provider')
const opn = require('opn')
const shortid = require('shortid')
const dynamicThrottledQueue = require('dynamic-throttled-queue')
const _ = require('lodash')
/**
* Modules (Local)
* @constant
*/
const appManifest = remote.require('app/scripts/main-process/components/globals').appManifest
const appFilesystem = remote.require('app/scripts/main-process/components/globals').appFilesystem
const configurationManager = remote.require('app/scripts/main-process/managers/configuration-manager')
const pbSms = require('app/scripts/renderer-process/pushbullet/sms')
/**
* Application
* @constant
* @default
*/
const appName = appManifest.name
const appTemporaryDirectory = appFilesystem.tempdir
/**
* General Defaults
* @constant
* @default
*/
const recentPushesAmount = 5
/**
* URL Defaults
* @constant
* @default
*/
const faviconEndpoint = 'https://pb-for-desktop-besticon.herokuapp.com/icon?fallback_icon_color=4AB367&formats=ico,png&size=1..120..200&url='
const pushbulletIconEndpoint = 'https://www.pushbullet.com/img/deviceicons/'
const youtubeThumbnailEndpoint = 'https://img.youtube.com/vi/'
/**
* Notification Defaults & Globals
* @constant
* @default
* @global
*/
const notificationDisplayInterval = 1000
const notificationIconWidth = 88
const notificationFilterCommentTag = '//'
const notificationFilterDebugPrefix = '[FILTERED]'
const notificationQueue = dynamicThrottledQueue({ min_rpi: 1, interval: notificationDisplayInterval, evenly_spaced: true })
/**
* Retrieve PushbulletLastNotificationTimestamp
* @return {Number} - timestamp
*/
let retrievePushbulletLastNotificationTimestamp = () => configurationManager('pushbulletLastNotificationTimestamp').get()
/**
* Store PushbulletLastNotificationTimestamp
* @param {Number} timestamp - Timestamp
* @return {undefined}
*/
let storePushbulletLastNotificationTimestamp = (timestamp) => configurationManager('pushbulletLastNotificationTimestamp').set(timestamp)
/**
* Retrieve ShowAppBadgeCount
* @return {Boolean} - Show
*/
let retrieveAppShowBadgeCount = () => configurationManager('appShowBadgeCount').get()
/**
* Retrieve PushbulletHideNotificationBody
* @return {Boolean} - Hide
// */
let retrievePushbulletHideNotificationBody = () => configurationManager('pushbulletHideNotificationBody').get()
/**
* Retrieve PushbulletSoundEnabled
* @return {Boolean} - Enabled
*/
let retrievePushbulletSoundEnabled = () => configurationManager('pushbulletSoundEnabled').get()
/**
* Retrieve PushbulletSmsEnabled
* @return {Boolean} - Enabled
*/
let retrievePushbulletSmsEnabled = () => configurationManager('pushbulletSmsEnabled').get()
/**
* Retrieve PushbulletSoundFilePath
* @return {String} - Path
*/
let retrievePushbulletSoundFilePath = () => configurationManager('pushbulletSoundFilePath').get()
/**
* Retrieve AppSoundVolume
* @return {Number} - Volume
*/
let retrievePushbulletSoundVolume = () => configurationManager('pushbulletSoundVolume').get()
/**
* Retrieve PushbulletNotificationFilterFilePath
* @return {String} - Path
*/
let retrievePushbulletNotificationFilterFilePath = () => configurationManager('pushbulletNotificationFilterFilePath').get()
/**
* Set application badge count
* @param {Number} total - Number to set
*/
let updateBadge = (total) => {
logger.debug('updateBadge')
if (!retrieveAppShowBadgeCount()) { return }
remote.app.badgeCount = total
}
/**
* Play Sound File
* @param {Function=} callback - Callback
*/
let playSoundFile = (callback = () => {}) => {
logger.debug('playSoundFile')
// Retrieve pushbulletSoundEnabled
const pushbulletSoundEnabled = retrievePushbulletSoundEnabled()
// Skip if not enabled
if (!pushbulletSoundEnabled) { return }
// Retrieve pushbulletSoundFilePath, pushbulletSoundVolume
const pushbulletSoundFilePath = retrievePushbulletSoundFilePath()
const pushbulletSoundVolume = retrievePushbulletSoundVolume()
// Create file:// URL
const url = fileUrl(pushbulletSoundFilePath)
// Create Sound
const sound = new Howl({
volume: pushbulletSoundVolume,
src: [ url ],
autoplay: true,
preload: true,
loop: false
})
/** @listens sound:Event#loaderror */
sound.on('loaderror', (id, error) => {
logger.error('playSoundFile', 'sound#loaderror', id, error)
// Callback
callback(error)
})
/** @listens sound:Event#playerror */
sound.on('playerror', (id, error) => {
logger.error('playSoundFile', 'sound#playerror', id, error)
// Callback
callback(error)
})
/** @listens sound:Event#end */
sound.on('end', (id) => {
logger.debug('playSoundFile', 'sound#end', id)
// Callback
callback()
})
}
/**
* Generate Image for Notification
* @param {Object} push - Push Object
* @returns {String} - Image URL
*/
let generateNotificationImage = (push) => {
logger.debug('generateNotificationImage')
// Account Image
let iconAccount
const accountId = push.receiver_iden
for (let account of window.pb.api.accounts.all) {
if (account['iden'].startsWith(accountId)) {
iconAccount = account.image_url
}
}
// Grant Image
let iconGrant
const grantId = push.client_iden
for (let grant of window.pb.api.grants.all) {
if (grant['client']['iden'] === grantId) {
iconGrant = grant['client']['image_url']
}
}
// Device Image
let iconDevice
const deviceId = push.source_device_iden
for (let device of window.pb.api.devices.all) {
if (device.iden === deviceId) {
iconDevice = `${pushbulletIconEndpoint}${device.icon}.png`
}
}
// SMS Image
let iconSms
if (push.type === 'sms_changed') {
iconSms = `${pushbulletIconEndpoint}phone.png`
}
// Chat Image
let iconChat
if (!!push.sender_email) {
const target = window.pb.targets.by_email(push.sender_email)
if (target && target.hasOwnProperty('image_url')) {
iconChat = target.image_url
}
}
// Mirroring Image
let iconMirroring
if (push.type === 'mirror') {
iconMirroring = `data:image/jpeg;base64,${push.icon}`
}
// Link Image
let iconLink
if (push.type === 'link') {
// Is YouTube URL?
const youtubeId = getYoutubeId(push.url)
if (youtubeId) {
// Fetch YouTube Thumbnail
iconLink = `${youtubeThumbnailEndpoint}${youtubeId}/hqdefault.jpg`
} else {
// Fetch Favicon
iconLink = `${faviconEndpoint}${push.url}`
}
}
// Image Fallbacks Sequence
return iconLink || iconMirroring || iconChat || iconGrant || iconDevice || iconSms || iconAccount
}
/**
* Create Note Push
* @param {String} message - Message
* @param {String=} email - Target E-Mail
* @param {String} deviceId - Target Device Id
* @param {function=} callback - Callback
*/
let createNotePush = (message, email, deviceId, callback = () => {}) => {
logger.debug('createNotePush')
window.pb.api.pushes.create({
type: 'note',
email: !!deviceId ? void 0 : email,
device_iden: !!email ? void 0 : deviceId,
title: message,
body: message
})
// Callback
callback(email || deviceId)
}
/**
* Dismiss Push
* @param {Pushbullet.Push} push - Push Object
*/
let dismissPush = (push) => {
logger.debug('dismissPush')
// direction: self
if (push.direction === 'self') {
if (!push.dismissed && !push.target_device_iden) {
logger.debug('dismissPush', 'self', 'push.title:', push.title)
window.pb.api.pushes.dismiss(push)
}
}
// direction: incoming
if (push.direction === 'incoming') {
if (!push.dismissed) {
logger.debug('dismissPush', 'incoming', 'push.title:', push.title)
window.pb.api.pushes.dismiss(push)
}
}
}
/**
* Parse strings, look for strings in tags (see https://goo.gl/ijKFPd)
* @see https://goo.gl/ijKFPd
* @param {String} message - Message String
* @returns {Object} - Message Object
*/
let parsePush = (message) => {
logger.debug('parsePush', message)
// default
let body = message
let subtitle = message
let title = message
// Parse Push for Notification Formatting
// [ Title ] [ Subtitle ] Body Text
// characters for tag detection
const tagStart = '['
const tagEnd = ']'
let tagList = title.match(new RegExp(`\\${tagStart}(.*?)\\${tagEnd}`, 'gi')) || []
let titleList = title.match(new RegExp(`${tagStart}^${tagStart}\\${tagEnd}${tagEnd}+(?=${tagEnd})`, 'gi')) || []
if (titleList.length > 0) {
/** body */
// remove all tags
tagList.forEach((tag) => {
body = body.replace(tag, '')
})
/** title */
if (titleList.length > 1) {
subtitle = _.startCase(_.toLower(titleList[0]))
titleList.shift()
title = titleList.join(` | `)
}
}
return {
body: body,
subtitle: subtitle,
title: title
}
}
/**
* Decorate Push objects
* @param {Pushbullet.Push|SmsEphemeral|SmsChangeEphemeral|NotificationEphemeral|DismissalEphemeral|ClipboardEphemeral} push - Pushbullet Push
* @returns {DecoratedPush} - Push Object
*/
let decoratePush = (push) => {
logger.debug('decoratePush', push.type)
// Copy Push Object
const decoratedPush = Object.assign({}, push)
switch (String(decoratedPush.type)) {
// Link
case 'link':
decoratedPush.icon = generateNotificationImage(decoratedPush)
if (!decoratedPush.body && !decoratedPush.title) {
decoratedPush.title = decoratedPush.url
}
if (!decoratedPush.body && decoratedPush.title) {
let parsed = parsePush(decoratedPush.title)
decoratedPush.body = parsed.body
decoratedPush.subtitle = parsed.subtitle
decoratedPush.title = parsed.title
}
break
// Note
case 'note':
decoratedPush.title = decoratedPush.title || decoratedPush.body
decoratedPush.body = decoratedPush.body || decoratedPush.title
decoratedPush.icon = generateNotificationImage(decoratedPush)
//push.title = `Note | ${push.title}`
break
// File
case 'file':
decoratedPush.title = decoratedPush.title || decoratedPush.file_name
decoratedPush.body = decoratedPush.body || decoratedPush.title
decoratedPush.url = decoratedPush.file_url
decoratedPush.icon = decoratedPush.image_url || generateNotificationImage(decoratedPush)
//push.title = `File | ${push.title}`
break
// Mirror
case 'mirror':
if (decoratedPush.application_name && decoratedPush.title) {
decoratedPush.title = `${decoratedPush.application_name} | ${decoratedPush.title}`
} else if (decoratedPush.application_name && !decoratedPush.title) {
decoratedPush.title = decoratedPush.application_name
}
decoratedPush.body = decoratedPush.body || decoratedPush.title
decoratedPush.url = decoratedPush.file_url
decoratedPush.icon = decoratedPush.image_url || generateNotificationImage(decoratedPush)
break
// SMS
case 'sms_changed':
if (decoratedPush.notifications.length === 0) { return }
let sms = decoratedPush.notifications[0]
let phonenumber = sms.title
let text = sms.body
let time = (new Date(0)).setUTCSeconds(sms.timestamp)
decoratedPush.title = `SMS | ${phonenumber}`
decoratedPush.body = `${text}${os.EOL}${moment(time).fromNow()}`
decoratedPush.icon = decoratedPush.image_url || generateNotificationImage(decoratedPush)
break
}
// Detect URLs in title
let detectedUrl = (decoratedPush.title && decoratedPush.title.match(/\bhttps?:\/\/\S+/gi)) || []
if (!decoratedPush.url && detectedUrl.length > 0) {
decoratedPush.url = detectedUrl[0]
}
// Trim
decoratedPush.title = decoratedPush.title && decoratedPush.title.trim()
decoratedPush.body = decoratedPush.body && decoratedPush.body.trim()
return decoratedPush
}
/**
* Check ANY of multiple regular expression patterns matches a given string
* @param {String} text - String to test
* @param {Array} patternList - List of regular expression patterns
* @returns {Boolean} - Yes/No
*/
let matchTextAgainstRegexList = (text, patternList) => {
logger.debug('matchTextAgainstRegexList')
// Test if any of the regular expression patterns match
const isMatch = patternList.some((pattern) => {
// Convert pattern to regex
const patternRegex = new RegExp(pattern, 'i')
// DEBUG
// logger.debug('matchTextAgainstRegexList', 'patternRegex', patternRegex)
return patternRegex.test(text)
})
// Returns true if any regex pattern was matched
return Boolean(isMatch)
}
/**
* Parse a file, interpret each line as regex pattern, and check against a list of texts to determine if any matches
* @param {String} filterFilePath - Absolute path to filter file
* @param {Array} textList - List of strings to check against filter
* @param {Function=} callback - Callback
*/
let compareTextListAgainstFilterFile = (filterFilePath, textList, callback = () => {}) => {
logger.debug('compareTextListAgainstFilterFile')
// Initialize filter entry list
let filterEntryList = []
// Initialize filter file reader
const reader = readline.createInterface({
input: fs.createReadStream(filterFilePath)
})
// Filter file reader: read next line
reader.on('line', (line) => {
logger.debug('compareTextListAgainstFilterFile', 'readline#line')
// Only add filter entry if it's not comment (starting with "//")
if (!line.startsWith(notificationFilterCommentTag)) {
filterEntryList.push(line)
}
})
// Filter file reader: error
reader.on('error', (error) => {
logger.error('compareTextListAgainstFilterFile', 'reader', error)
// Callback
callback(error)
})
// Filter file reader: complete
reader.on('close', () => {
logger.debug('compareTextListAgainstFilterFile', 'readline#close')
// Cleanup filter entries, removing empty
filterEntryList = filterEntryList.filter(Boolean)
// DEBUG
// logger.debug('filter entries:')
// filterEntryList.forEach(entry => logger.debug(entry))
// Check if any filter entry matches any text
const isFilterMatch = textList.some(text => matchTextAgainstRegexList(text, filterEntryList))
// Callback
callback(null, isFilterMatch)
})
}
/**
* Show Notification
* @param {Object} notificationOptions - Notificationconfiguration
* @param {Pushbullet.Push|Object=} push - Pushbullet Push
*/
let showNotification = (notificationOptions, push) => {
logger.debug('showNotification')
// Retrieve pushbulletNotificationFilterFilePath
const pushbulletNotificationFilterFilePath = retrievePushbulletNotificationFilterFilePath()
// Create Notification
const notification = notificationProvider.create(notificationOptions)
/** @listens notification#click */
notification.on('click', () => {
logger.debug('notification#click')
// Open url
if (notificationOptions.url) {
opn(notificationOptions.url, { wait: false })
}
// Dismiss within Pushbullet
if (push) {
dismissPush(push)
}
})
/** @listens notification#close */
notification.on('close', () => {
logger.debug('notification#close')
// Dismiss within Pushbullet
if (push) {
dismissPush(push)
}
})
/** @listens notification#reply */
notification.on('reply', (event, message) => {
logger.debug('notification#reply')
if (!!!message) {
logger.warn('reply message was empty')
return
}
// SMS Reply
if (push.type === 'sms_changed') {
pbSms.reply(message, push.source_device_iden, pbSms.getMessageThreadId(push), (target) => {
logger.debug('reply message sent', 'to:', target)
})
}
// Chat Reply
if (push.type === 'note' || push.type === 'link' || push.type === 'file') {
createNotePush(message, push.sender_email, null, (target) => {
logger.debug('reply message sent', 'to:', target)
})
}
})
/** @listens notification#error */
notification.on('error', (error) => {
logger.error('notification#error', error)
})
/** @listens notification#show */
notification.on('show', (event) => {
logger.debug('notification#show')
logger.info('New Notification', notificationOptions.title)
})
// Notification Filter
// Checks if notification title or body contain filtered terms
compareTextListAgainstFilterFile(pushbulletNotificationFilterFilePath, [ notification.title, notification.body ], (error, isFiltered) => {
// Filtered
if (isFiltered) {
logger.warn('Filtered:', notification.title)
// DEBUG
if (isDebug) {
// Prefix Notification
notification.title = `${notificationFilterDebugPrefix} ${notification.title}`
} else {
// Skip Notification
return
}
}
// Not filtered
notificationQueue(() => {
// Play Sound
playSoundFile()
// Show Notification
notification.show()
})
})
}
/**
* Asprect-Resize image and write it to disk
* @param {ArrayBuffer|Array|*} source - Source image path
* @param {String} target - Target image path
* @param {Number} width - Image width
* @param {Function=} callback - Callback
*/
let resizeWriteImage = (source, target, width, callback = () => {}) => {
logger.debug('resizeWriteImage')
jimp.read(source, (error, result) => {
if (error) {
logger.error('resizeWriteImage', 'jimp.read', error)
// Callback
callback(error)
return
}
result.resize(width, jimp.AUTO).write(target, (error) => {
if (error) {
logger.error('resizeWriteImage', 'result.resize', error)
callback(error)
return
}
// Callback
callback(null, target)
})
}).then((result) => {
logger.debug('resizeWriteImage', 'result', result)
})
}
/**
* Create Notification from Push Objects
* @param {Pushbullet.Push|SmsEphemeral|SmsChangeEphemeral|NotificationEphemeral|DismissalEphemeral|ClipboardEphemeral} push - Pushbullet Push
*/
let convertPushToNotification = (push) => {
logger.debug('convertPushToNotification')
// Copy Push Object
const decoratedPush = decoratePush(push)
// Create Options
const notificationOptions = {
body: decoratedPush.body,
icon: decoratedPush.icon,
subtitle: decoratedPush.subtitle,
tag: decoratedPush.iden,
title: decoratedPush.title,
url: decoratedPush.url
}
// SMS Feature Enabled?
if (decoratedPush.type === 'sms_changed') {
const pushbulletSmsEnabled = retrievePushbulletSmsEnabled()
if (!pushbulletSmsEnabled) { return }
}
// Hide Notification Body?
const pushbulletHideNotificationBody = retrievePushbulletHideNotificationBody()
if (pushbulletHideNotificationBody) {
notificationOptions.body = ''
}
// Enable SMS Reply?
if (decoratedPush.type === 'sms_changed') {
notificationOptions.hasReply = true
notificationOptions.replyPlaceholder = 'Your SMS Reply'
}
// Enable Chat Reply?
if ((decoratedPush.type === 'note' || decoratedPush.type === 'link' || decoratedPush.type === 'file') && decoratedPush.direction === 'incoming' && !!decoratedPush.sender_email) {
notificationOptions.hasReply = true
notificationOptions.replyPlaceholder = 'Your Chat Reply'
}
// Image: Create Temporary Path
const imageUrl = notificationOptions.icon || ''
const imageProtocol = url.parse(imageUrl).protocol
const imageFilepathTemporary = path.join(appTemporaryDirectory, `${appName}.push.${shortid.generate()}.png`)
// Image: Skip
if (!imageProtocol) {
showNotification(notificationOptions, decoratedPush)
return
}
// Image: Generate from Data URL
if (imageProtocol === 'data:') {
resizeWriteImage(dataUriToBuffer(imageUrl), imageFilepathTemporary, notificationIconWidth, (error, imageFilepathConverted) => {
if (error) { return }
notificationOptions.icon = imageFilepathConverted
showNotification(notificationOptions, decoratedPush)
})
return
}
// Image: Download from Web
imageDownloader.image({ url: imageUrl, dest: imageFilepathTemporary })
.then((result) => {
const imageFilepathDownloaded = result.filename
const imageBuffer = result.image
const imageType = fileType(imageBuffer)
const isIco = icojs.isICO(imageBuffer)
const isPng = imageType.mime === 'image/png'
const isJpeg = imageType.mime === 'image/jpg' || imageType.mime === 'image/jpeg'
// From .PNG
if (isPng || isJpeg) {
resizeWriteImage(imageBuffer, imageFilepathDownloaded, notificationIconWidth, (error, imageFilepathConverted) => {
if (error) { return }
notificationOptions.icon = imageFilepathConverted
showNotification(notificationOptions, decoratedPush)
})
return
}
// From .ICO
if (isIco) {
icojs.parse(imageBuffer, 'image/png').then(imageList => {
const imageMaximum = imageList[imageList.length - 1]
resizeWriteImage(Buffer.from(imageMaximum.buffer), imageFilepathDownloaded, notificationIconWidth, (error, imageFilepathConverted) => {
if (error) { return }
notificationOptions.icon = imageFilepathConverted
showNotification(notificationOptions, decoratedPush)
})
})
}
})
// Image: Fallback to App Icon
.catch((error) => {
logger.warn('convertPushToNotification', 'imageDownloader', error)
showNotification(notificationOptions, decoratedPush)
})
}
/**
* Test if Push is dismissed via API
* @param {Object} push - Push Object
* @returns {Boolean} - Yes / No
*/
let testIfPushIsIgnored = (push) => {
//logger.debug('testIfPushIsIgnored')
// Push inactive?
if (push.hasOwnProperty('active')) {
if (push.active === false) {
return true
}
}
// // Push directed at PB for Desktop and Push dismissed?
// if (push.direction === 'self' && !!push.dismissed) {
// return true
// }
// Push is an SMS without enclosed notification?
if (push.type === 'sms_changed' && !!!push.notifications.length) {
return true
}
}
/**
* Get all Pushbullet Pushes sorted by recency (ascending)
* @param {Number=} queueLimit - Limit result to fixed number
* @returns {Array|undefined} List of Pushes
*/
let getRecentPushes = (queueLimit = 0) => {
logger.debug('getRecentPushes')
// List recent Pushes
const recentPushesList = window.pb.api.pushes.all.filter(push => !testIfPushIsIgnored(push))
// Sort recent Pushes (by date)
recentPushesList.sort((pushA, pushB) => {
const dateA = pushA.created
const dateB = pushB.created
if (dateA < dateB) {
return -1
} else if (dateA > dateB) {
return 1
}
return 0
})
// Return sliced list
return recentPushesList.slice(recentPushesList.length - queueLimit, recentPushesList.length)
}
/**
* Enqueue 1 + N Pushes
* @param {Array|Object} pushes - Pushbullet push objects
* @param {Boolean} ignoreDate - Ignore time of push, always show
* @param {Boolean} updateBadgeCount - Update badge counter
* @param {Function=} callback - Callback
*/
let enqueuePushes = (pushes, ignoreDate = false, updateBadgeCount = true, callback = () => {}) => {
logger.debug('enqueuePushes')
pushes = _.isArray(pushes) ? pushes : [ pushes ]
if (pushes.length === 0) {
logger.warn('enqueuePushes', 'pushes list was empty')
// Callback
callback(null, 0)
return
}
// Retrieve pushbulletLastNotificationTimestamp
const pushbulletLastNotificationTimestamp = retrievePushbulletLastNotificationTimestamp()
// Init pushes variables
let nextPushesList = pushes
let notifyAfterTimestamp = pushbulletLastNotificationTimestamp || 0
// Filter Pushes before notifyAfterTimestamp
if (!!!ignoreDate) {
nextPushesList = pushes.filter(push => push.created > notifyAfterTimestamp)
}
nextPushesList.forEach((push, pushIndex, pushList) => {
// Client Snoozing?
const isSnoozing = (Date.now() < remote.getGlobal('snoozeUntil'))
// Push ignored?
const isIgnoredPush = testIfPushIsIgnored(push)
if (!isSnoozing && !isIgnoredPush) {
convertPushToNotification(push)
}
// Last Iteration?
if (pushIndex !== pushList.length - 1) { return }
// Store pushbulletLastNotificationTimestamp
if (push.created > notifyAfterTimestamp) {
storePushbulletLastNotificationTimestamp(push.created)
}
// Update AppIcon Badge
if (updateBadgeCount) {
updateBadge(remote.app.getBadgeCount() + nextPushesList.length)
}
// Callback
callback(null, pushList.length)
})
}
/**
* Get all new pushes and show them (if any)
* @param {Function=} callback - Callback
* @public
*/
let enqueueRecentPushes = (callback = () => {}) => {
logger.debug('enqueueRecentPushes')
const pushesList = getRecentPushes(recentPushesAmount)
enqueuePushes(pushesList, true, false, (error, count) => {
if (error) {
logger.error('enqueueRecentPushes', error)
// Callback
callback(error)
return
}
// Callback
callback(null, count)
})
}
/**
* Init
*/
let init = () => {
logger.debug('init')
// Configure Web Audio
// https://github.com/goldfire/howler.js/issues/593
Howler.autoSuspend = false
}
/**
* @listens ipcRenderer:tray-close
*/
ipcRenderer.on('tray-close', () => {
logger.debug('ipcRenderer#tray-close')
})
/**
* @listens window:UIEvent#load
*/
window.addEventListener('load', () => {
logger.debug('window#load')
init()
})
/**
* @exports
*/
module.exports = {
enqueuePushes: enqueuePushes,
enqueueRecentPushes: enqueueRecentPushes,
updateBadge: updateBadge
}
/**
* @typedef DecoratedPush
* @mixes {Pushbullet.Push}
*/
|
#include "xmlvm.h"
#include "java_lang_String.h"
#include "org_apache_harmony_luni_platform_Endianness.h"
#define XMLVM_CURRENT_CLASS_NAME Endianness
#define XMLVM_CURRENT_PKG_CLASS_NAME org_apache_harmony_luni_platform_Endianness
__TIB_DEFINITION_org_apache_harmony_luni_platform_Endianness __TIB_org_apache_harmony_luni_platform_Endianness = {
0, // classInitializationBegan
0, // classInitialized
-1, // initializerThreadId
__INIT_org_apache_harmony_luni_platform_Endianness, // classInitializer
"org.apache.harmony.luni.platform.Endianness", // className
"org.apache.harmony.luni.platform", // package
JAVA_NULL, // enclosingClassName
JAVA_NULL, // enclosingMethodName
JAVA_NULL, // signature
(__TIB_DEFINITION_TEMPLATE*) &__TIB_java_lang_Object, // extends
sizeof(org_apache_harmony_luni_platform_Endianness), // sizeInstance
XMLVM_TYPE_CLASS};
JAVA_OBJECT __CLASS_org_apache_harmony_luni_platform_Endianness;
JAVA_OBJECT __CLASS_org_apache_harmony_luni_platform_Endianness_1ARRAY;
JAVA_OBJECT __CLASS_org_apache_harmony_luni_platform_Endianness_2ARRAY;
JAVA_OBJECT __CLASS_org_apache_harmony_luni_platform_Endianness_3ARRAY;
//XMLVM_BEGIN_IMPLEMENTATION
//XMLVM_END_IMPLEMENTATION
static JAVA_OBJECT _STATIC_org_apache_harmony_luni_platform_Endianness_BIG_ENDIAN;
static JAVA_OBJECT _STATIC_org_apache_harmony_luni_platform_Endianness_LITTLE_ENDIAN;
#include "xmlvm-reflection.h"
static XMLVM_FIELD_REFLECTION_DATA __field_reflection_data[] = {
};
static XMLVM_CONSTRUCTOR_REFLECTION_DATA __constructor_reflection_data[] = {
};
static JAVA_OBJECT constructor_dispatcher(JAVA_OBJECT constructor, JAVA_OBJECT arguments)
{
XMLVM_NOT_IMPLEMENTED();
}
static XMLVM_METHOD_REFLECTION_DATA __method_reflection_data[] = {
};
static JAVA_OBJECT method_dispatcher(JAVA_OBJECT method, JAVA_OBJECT receiver, JAVA_OBJECT arguments)
{
XMLVM_NOT_IMPLEMENTED();
}
void __INIT_org_apache_harmony_luni_platform_Endianness()
{
staticInitializerLock(&__TIB_org_apache_harmony_luni_platform_Endianness);
// While the static initializer mutex is locked, locally store the value of
// whether class initialization began or not
int initBegan = __TIB_org_apache_harmony_luni_platform_Endianness.classInitializationBegan;
// Whether or not class initialization had already began, it has begun now
__TIB_org_apache_harmony_luni_platform_Endianness.classInitializationBegan = 1;
staticInitializerUnlock(&__TIB_org_apache_harmony_luni_platform_Endianness);
JAVA_LONG curThreadId = (JAVA_LONG)pthread_self();
if (initBegan) {
if (__TIB_org_apache_harmony_luni_platform_Endianness.initializerThreadId != curThreadId) {
// Busy wait until the other thread finishes initializing this class
while (!__TIB_org_apache_harmony_luni_platform_Endianness.classInitialized) {
// do nothing
}
}
} else {
__TIB_org_apache_harmony_luni_platform_Endianness.initializerThreadId = curThreadId;
XMLVM_CLASS_USED("org.apache.harmony.luni.platform.Endianness")
__INIT_IMPL_org_apache_harmony_luni_platform_Endianness();
}
}
void __INIT_IMPL_org_apache_harmony_luni_platform_Endianness()
{
// Initialize base class if necessary
XMLVM_CLASS_INIT(java_lang_Object)
__TIB_org_apache_harmony_luni_platform_Endianness.newInstanceFunc = __NEW_INSTANCE_org_apache_harmony_luni_platform_Endianness;
// Copy vtable from base class
XMLVM_MEMCPY(__TIB_org_apache_harmony_luni_platform_Endianness.vtable, __TIB_java_lang_Object.vtable, sizeof(__TIB_java_lang_Object.vtable));
// Initialize vtable for this class
__TIB_org_apache_harmony_luni_platform_Endianness.vtable[5] = (VTABLE_PTR) &org_apache_harmony_luni_platform_Endianness_toString__;
// Initialize interface information
__TIB_org_apache_harmony_luni_platform_Endianness.numImplementedInterfaces = 0;
__TIB_org_apache_harmony_luni_platform_Endianness.implementedInterfaces = (__TIB_DEFINITION_TEMPLATE* (*)[1]) XMLVM_MALLOC(sizeof(__TIB_DEFINITION_TEMPLATE*) * 0);
// Initialize interfaces if necessary and assign tib to implementedInterfaces
_STATIC_org_apache_harmony_luni_platform_Endianness_BIG_ENDIAN = (org_apache_harmony_luni_platform_Endianness*) JAVA_NULL;
_STATIC_org_apache_harmony_luni_platform_Endianness_LITTLE_ENDIAN = (org_apache_harmony_luni_platform_Endianness*) JAVA_NULL;
__TIB_org_apache_harmony_luni_platform_Endianness.declaredFields = &__field_reflection_data[0];
__TIB_org_apache_harmony_luni_platform_Endianness.numDeclaredFields = sizeof(__field_reflection_data) / sizeof(XMLVM_FIELD_REFLECTION_DATA);
__TIB_org_apache_harmony_luni_platform_Endianness.constructorDispatcherFunc = constructor_dispatcher;
__TIB_org_apache_harmony_luni_platform_Endianness.declaredConstructors = &__constructor_reflection_data[0];
__TIB_org_apache_harmony_luni_platform_Endianness.numDeclaredConstructors = sizeof(__constructor_reflection_data) / sizeof(XMLVM_CONSTRUCTOR_REFLECTION_DATA);
__TIB_org_apache_harmony_luni_platform_Endianness.methodDispatcherFunc = method_dispatcher;
__TIB_org_apache_harmony_luni_platform_Endianness.declaredMethods = &__method_reflection_data[0];
__TIB_org_apache_harmony_luni_platform_Endianness.numDeclaredMethods = sizeof(__method_reflection_data) / sizeof(XMLVM_METHOD_REFLECTION_DATA);
__CLASS_org_apache_harmony_luni_platform_Endianness = XMLVM_CREATE_CLASS_OBJECT(&__TIB_org_apache_harmony_luni_platform_Endianness);
__TIB_org_apache_harmony_luni_platform_Endianness.clazz = __CLASS_org_apache_harmony_luni_platform_Endianness;
__TIB_org_apache_harmony_luni_platform_Endianness.baseType = JAVA_NULL;
__CLASS_org_apache_harmony_luni_platform_Endianness_1ARRAY = XMLVM_CREATE_ARRAY_CLASS_OBJECT(__CLASS_org_apache_harmony_luni_platform_Endianness);
__CLASS_org_apache_harmony_luni_platform_Endianness_2ARRAY = XMLVM_CREATE_ARRAY_CLASS_OBJECT(__CLASS_org_apache_harmony_luni_platform_Endianness_1ARRAY);
__CLASS_org_apache_harmony_luni_platform_Endianness_3ARRAY = XMLVM_CREATE_ARRAY_CLASS_OBJECT(__CLASS_org_apache_harmony_luni_platform_Endianness_2ARRAY);
org_apache_harmony_luni_platform_Endianness___CLINIT_();
//XMLVM_BEGIN_WRAPPER[__INIT_org_apache_harmony_luni_platform_Endianness]
//XMLVM_END_WRAPPER
__TIB_org_apache_harmony_luni_platform_Endianness.classInitialized = 1;
}
void __DELETE_org_apache_harmony_luni_platform_Endianness(void* me, void* client_data)
{
//XMLVM_BEGIN_WRAPPER[__DELETE_org_apache_harmony_luni_platform_Endianness]
//XMLVM_END_WRAPPER
}
void __INIT_INSTANCE_MEMBERS_org_apache_harmony_luni_platform_Endianness(JAVA_OBJECT me, int derivedClassWillRegisterFinalizer)
{
__INIT_INSTANCE_MEMBERS_java_lang_Object(me, 0 || derivedClassWillRegisterFinalizer);
((org_apache_harmony_luni_platform_Endianness*) me)->fields.org_apache_harmony_luni_platform_Endianness.displayName_ = (java_lang_String*) JAVA_NULL;
//XMLVM_BEGIN_WRAPPER[__INIT_INSTANCE_MEMBERS_org_apache_harmony_luni_platform_Endianness]
//XMLVM_END_WRAPPER
}
JAVA_OBJECT __NEW_org_apache_harmony_luni_platform_Endianness()
{ XMLVM_CLASS_INIT(org_apache_harmony_luni_platform_Endianness)
org_apache_harmony_luni_platform_Endianness* me = (org_apache_harmony_luni_platform_Endianness*) XMLVM_MALLOC(sizeof(org_apache_harmony_luni_platform_Endianness));
me->tib = &__TIB_org_apache_harmony_luni_platform_Endianness;
__INIT_INSTANCE_MEMBERS_org_apache_harmony_luni_platform_Endianness(me, 0);
//XMLVM_BEGIN_WRAPPER[__NEW_org_apache_harmony_luni_platform_Endianness]
//XMLVM_END_WRAPPER
return me;
}
JAVA_OBJECT __NEW_INSTANCE_org_apache_harmony_luni_platform_Endianness()
{
JAVA_OBJECT me = JAVA_NULL;
return me;
}
JAVA_OBJECT org_apache_harmony_luni_platform_Endianness_GET_BIG_ENDIAN()
{
XMLVM_CLASS_INIT(org_apache_harmony_luni_platform_Endianness)
return _STATIC_org_apache_harmony_luni_platform_Endianness_BIG_ENDIAN;
}
void org_apache_harmony_luni_platform_Endianness_PUT_BIG_ENDIAN(JAVA_OBJECT v)
{
XMLVM_CLASS_INIT(org_apache_harmony_luni_platform_Endianness)
_STATIC_org_apache_harmony_luni_platform_Endianness_BIG_ENDIAN = v;
}
JAVA_OBJECT org_apache_harmony_luni_platform_Endianness_GET_LITTLE_ENDIAN()
{
XMLVM_CLASS_INIT(org_apache_harmony_luni_platform_Endianness)
return _STATIC_org_apache_harmony_luni_platform_Endianness_LITTLE_ENDIAN;
}
void org_apache_harmony_luni_platform_Endianness_PUT_LITTLE_ENDIAN(JAVA_OBJECT v)
{
XMLVM_CLASS_INIT(org_apache_harmony_luni_platform_Endianness)
_STATIC_org_apache_harmony_luni_platform_Endianness_LITTLE_ENDIAN = v;
}
void org_apache_harmony_luni_platform_Endianness___CLINIT_()
{
//XMLVM_BEGIN_WRAPPER[org_apache_harmony_luni_platform_Endianness___CLINIT___]
XMLVM_ENTER_METHOD("org.apache.harmony.luni.platform.Endianness", "<clinit>", "?")
XMLVMElem _r0;
XMLVMElem _r1;
XMLVM_SOURCE_POSITION("Endianness.java", 28)
_r0.o = __NEW_org_apache_harmony_luni_platform_Endianness();
// "BIG_ENDIAN"
_r1.o = xmlvm_create_java_string_from_pool(727);
XMLVM_CHECK_NPE(0)
org_apache_harmony_luni_platform_Endianness___INIT____java_lang_String(_r0.o, _r1.o);
org_apache_harmony_luni_platform_Endianness_PUT_BIG_ENDIAN( _r0.o);
XMLVM_SOURCE_POSITION("Endianness.java", 33)
_r0.o = __NEW_org_apache_harmony_luni_platform_Endianness();
XMLVM_SOURCE_POSITION("Endianness.java", 34)
// "LITTLE_ENDIAN"
_r1.o = xmlvm_create_java_string_from_pool(728);
XMLVM_CHECK_NPE(0)
org_apache_harmony_luni_platform_Endianness___INIT____java_lang_String(_r0.o, _r1.o);
org_apache_harmony_luni_platform_Endianness_PUT_LITTLE_ENDIAN( _r0.o);
XMLVM_SOURCE_POSITION("Endianness.java", 24)
XMLVM_EXIT_METHOD()
return;
//XMLVM_END_WRAPPER
}
void org_apache_harmony_luni_platform_Endianness___INIT____java_lang_String(JAVA_OBJECT me, JAVA_OBJECT n1)
{
//XMLVM_BEGIN_WRAPPER[org_apache_harmony_luni_platform_Endianness___INIT____java_lang_String]
XMLVM_ENTER_METHOD("org.apache.harmony.luni.platform.Endianness", "<init>", "?")
XMLVMElem _r0;
XMLVMElem _r1;
_r0.o = me;
_r1.o = n1;
XMLVM_SOURCE_POSITION("Endianness.java", 43)
XMLVM_CHECK_NPE(0)
java_lang_Object___INIT___(_r0.o);
XMLVM_SOURCE_POSITION("Endianness.java", 44)
XMLVM_CHECK_NPE(0)
((org_apache_harmony_luni_platform_Endianness*) _r0.o)->fields.org_apache_harmony_luni_platform_Endianness.displayName_ = _r1.o;
XMLVM_SOURCE_POSITION("Endianness.java", 45)
XMLVM_EXIT_METHOD()
return;
//XMLVM_END_WRAPPER
}
JAVA_OBJECT org_apache_harmony_luni_platform_Endianness_toString__(JAVA_OBJECT me)
{
//XMLVM_BEGIN_WRAPPER[org_apache_harmony_luni_platform_Endianness_toString__]
XMLVM_ENTER_METHOD("org.apache.harmony.luni.platform.Endianness", "toString", "?")
XMLVMElem _r0;
XMLVMElem _r1;
_r1.o = me;
XMLVM_SOURCE_POSITION("Endianness.java", 53)
XMLVM_CHECK_NPE(1)
_r0.o = ((org_apache_harmony_luni_platform_Endianness*) _r1.o)->fields.org_apache_harmony_luni_platform_Endianness.displayName_;
XMLVM_EXIT_METHOD()
return _r0.o;
//XMLVM_END_WRAPPER
}
|
import {
BrowserRouter as Router,
Switch,
Route
} from 'react-router-dom'
import Home from './pages'
import Blog from './pages/blog'
import Resume from './pages/resume'
import { ThemeProvider } from 'styled-components'
import {
// Nature,
// Traditional,
Dark,
} from './constants/colors'
const theme = {
main: Dark,
}
function App() {
return (
<Router>
<Switch>
<ThemeProvider theme={theme}>
<Route path="/" component={Home} exact></Route>
<Route path="/resume" component={Resume} exact></Route>
<Route path="/blog" component={Blog} exact></Route>
</ThemeProvider>
</Switch>
</Router>
);
}
export default App;
|
const express = require('express');
const cookieParser = require('cookie-parser');
const bodyParser = require('body-parser');
const ejs = require('ejs');
const mysql = require('mysql');
var randomString = require("randomstring");
//Express
const app = express();
app.use(express.static('public'));
app.use(cookieParser());
app.use(bodyParser.urlencoded({ extended: true }));
app.set('views', './views');
app.set('view engine', 'ejs');
app.set('trust proxy', true)
//Database
var db = mysql.createConnection({
host: "localhost",
user: "nodeServer",
password: "password1234",
database: "4S"
});
db.connect(function(err) {
if (err) throw err;
});
//Requests
app.get('/', (req, res) => {
res.redirect('/login')
});
app.get('/login', (req, res) => {
res.render("login");
});
app.post('/login', (req, res) => {
db.query("SELECT * FROM users WHERE username = '"+ req.body.username +"'", function (err, users) {
if (users.length > 0) {
if (req.body.password == users[0].password) {
var newKey = randomString.generate(32);
console.log(users[0].userid);
db.query("UPDATE users SET `activeKey` = '"+newKey+"' WHERE (`userid` = '"+users[0].userid+"')", function (err, result) {
res.cookie("key", newKey);
res.redirect("/console");
});
} else {
res.status(403).send('Incorrect Password');
}
} else {
res.status(403).send('Incorrect Username');
}
});
});
app.get('/console', (req, res) => {
res.render("console", {status: "Safe"});
});
app.get('/info', (req, res) => {
var ip = req.ip;
var cookies = JSON.stringify(req.cookies);
res.render("info", {ip: ip, cookies: cookies});
});
//Start listening
const server = app.listen(80, () => {
console.log(`Express running → PORT ${server.address().port}`);
});
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Delete command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from googlecloudsdk.api_lib.app import appengine_api_client
from googlecloudsdk.api_lib.app import service_util
from googlecloudsdk.api_lib.app import version_util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_printer
from googlecloudsdk.core.util import text
class VersionsDeleteError(exceptions.Error):
"""Errors occurring when deleting versions."""
pass
class Delete(base.DeleteCommand):
"""Delete a specified version.
You cannot delete a version of a service that is currently receiving traffic.
"""
detailed_help = {
'EXAMPLES': """\
To delete a specific version of a specific service, run:
$ {command} --service myService v1
To delete a named version across all services, run:
$ {command} v1
To delete multiple versions of a specific service, run:
$ {command} --service myService v1 v2
To delete multiple named versions across all services, run:
$ {command} v1 v2
""",
}
@staticmethod
def Args(parser):
parser.add_argument('versions', nargs='+', help=(
'The versions to delete (optionally filtered by the --service flag).'))
parser.add_argument('--service', '-s',
help=('If specified, only delete versions from the '
'given service.'))
def Run(self, args):
client = appengine_api_client.GetApiClientForTrack(self.ReleaseTrack())
services = client.ListServices()
# If a service is supplied, only list versions for that service
if args.service:
services = [s for s in services if s.id == args.service]
all_versions = client.ListVersions(services)
# Sort versions to make behavior deterministic enough for unit testing.
versions = sorted(version_util.GetMatchingVersions(all_versions,
args.versions,
args.service), key=str)
services_to_delete = []
for service in sorted(services):
service_versions = len(
[v for v in all_versions if v.service == service.id])
versions_to_delete = len([v for v in versions if v.service == service.id])
if service_versions == versions_to_delete and service_versions > 0:
if service.id == 'default':
raise VersionsDeleteError(
'The default service (module) may not be deleted, and must '
'comprise at least one version.'
)
else:
services_to_delete.append(service)
for version in copy.copy(versions):
if version.service == service.id:
versions.remove(version)
for version in versions:
if version.traffic_split:
# TODO(b/32869800): collect info on all versions before raising.
raise VersionsDeleteError(
'Version [{version}] is currently serving {allocation:.2f}% of '
'traffic for service [{service}].\n\n'
'Please move all traffic away via one of the following methods:\n'
' - deploying a new version with the `--promote` argument\n'
' - running `gcloud app services set-traffic`\n'
' - running `gcloud app versions migrate`'.format(
version=version.id,
allocation=version.traffic_split * 100,
service=version.service))
if services_to_delete:
word = text.Pluralize(len(services_to_delete), 'service')
log.warning(
'Requested deletion of all existing versions for the following {0}:'
.format(word))
resource_printer.Print(services_to_delete, 'list', out=log.status)
console_io.PromptContinue(prompt_string=(
'\nYou cannot delete all versions of a service. Would you like to '
'delete the entire {0} instead?').format(word), cancel_on_no=True)
service_util.DeleteServices(client, services_to_delete)
if versions:
fmt = 'list[title="Deleting the following versions:"]'
resource_printer.Print(versions, fmt, out=log.status)
console_io.PromptContinue(cancel_on_no=True)
else:
if not services_to_delete:
log.warning('No matching versions found.')
version_util.DeleteVersions(client, versions)
|
describe("List", function () {
"use strict";
var List;
beforeEach(module('angularSideBySideSelect'));
beforeEach(function () {
inject(function ($injector) {
List = $injector.get("List");
});
});
it("should be instantiable", function () {
return new List();
});
describe("data", function () {
var list,
testData;
function simpleComparator(a, b) {
return a.name === b.name;
}
beforeEach(function () {
testData = [
{
name: "test"
},
{
name: "test2"
}
];
list = new List(simpleComparator);
});
it("should allow to set data", function () {
list.setData(testData);
expect(list.getData()).toEqual(testData);
});
it("should clean data if undefined is passed as an argument to set data", function () {
list.setData(undefined);
expect(list.getData()).toEqual([]);
});
it("should clean data if null is passed as an argument to set data", function () {
list.setData(null);
expect(list.getData()).toEqual([]);
});
it("should allow to add items", function () {
list.add({name: "test"});
expect(list.getData()).toEqual([{name: "test"}]);
});
it("should allow to add the same item twice", function () {
var item = {name: "test"};
list.add(item);
list.add(item);
expect(list.getData()).toEqual([item, item]);
});
it("should remove data", function () {
list.setData(testData);
list.remove(testData[0]);
expect(list.getData()).toEqual([testData[1]]);
});
it("should ignore removing if not existent item", function () {
list.remove(testData[0]);
expect(list.getData()).toEqual([]);
});
it("should not contain item that is not in data", function () {
list.setData(testData);
expect(list.contains({name: "test3"})).toBeFalsy();
});
it("should contain items from data", function () {
list.setData(testData);
expect(list.contains({name: "test2"})).toBeTruthy();
});
it("should set data safely, without reference to the source", function () {
list.setData(testData);
testData.splice(0, 1);
expect(list.getData().length).toEqual(2);
});
it("should get data safely, without reference to the source", function () {
list.setData(testData);
list.getData().splice(0, 1);
expect(list.getData().length).toEqual(2);
});
describe("selection", function () {
beforeEach(function () {
list.setData(testData);
});
it("should have empty selection at the start", function () {
expect(list.getSelection()).toEqual([]);
});
it("should add item to selection", function () {
list.select(testData[1]);
expect(list.getSelection()).toEqual([testData[1]]);
});
it("should not add item twice to select twice called", function () {
list.select(testData[1]);
list.select(testData[1]);
expect(list.getSelection()).toEqual([testData[1]]);
});
it("should not add item twice to select twice called using comparator", function () {
list.select(testData[1]);
list.select({name: "test2"});
expect(list.getSelection()).toEqual([testData[1]]);
});
it("should allow to find out if item is selected", function () {
list.select(testData[1]);
expect(list.isSelected(testData[1])).toBeTruthy();
});
it("should allow to find out if item is selected comparing items with comparator", function () {
list.select(testData[0]);
expect(list.isSelected({name: "test"})).toBeTruthy();
});
it("should not show not selected items as selected", function () {
expect(list.isSelected(testData[1])).toBeFalsy();
});
it("should remove item from selection", function () {
list.select(testData[1]);
list.deselect(testData[1]);
expect(list.getSelection()).toEqual([]);
});
it("should ignore deselection of not selected items", function () {
list.deselect(testData[1]);
expect(list.getSelection()).toEqual([]);
});
it("should clear selection", function () {
list.select(testData[0]);
list.select(testData[1]);
list.clearSelection();
expect(list.getSelection()).toEqual([]);
});
it("should select items with toggle", function () {
list.toggle(testData[0]);
expect(list.getSelection()).toEqual([testData[0]]);
});
it("should deselect items with toggle", function () {
list.toggle(testData[0]);
list.toggle(testData[0]);
expect(list.getSelection()).toEqual([]);
});
it("should not select items that data does not contain", function () {
list.select({name: "test3"});
expect(list.getSelection()).toEqual([]);
});
});
});
});
|
# Copyright 2020 TestProject (https://testproject.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.webdriver import FirefoxOptions
from src.testproject.enums.report_type import ReportType
from src.testproject.sdk.drivers.webdriver.base import BaseDriver
class Firefox(BaseDriver):
"""Used to create a new Firefox browser instance
Args:
firefox_options (FirefoxOptions): Edge automation session desired capabilities and options
desired_capabilities (dict): Dictionary object containing desired capabilities for Firefox automation session
token (str): The developer token used to communicate with the agent
project_name (str): Project name to report
job_name (str): Job name to report
disable_reports (bool): set to True to disable all reporting (no report will be created on TestProject)
report_type (ReportType): Type of report to produce - cloud, local or both.
"""
def __init__(
self,
firefox_options: FirefoxOptions = None,
desired_capabilities: dict = None,
token: str = None,
project_name: str = None,
job_name: str = None,
disable_reports: bool = False,
report_type: ReportType = ReportType.CLOUD_AND_LOCAL,
agent_url: str = None,
report_name: str = None,
report_path: str = None,
):
# If no options or capabilities are specified at all, use default FirefoxOptions
if firefox_options is None and desired_capabilities is None:
caps = FirefoxOptions().to_capabilities()
else:
# Specified FirefoxOptions take precedence over desired capabilities but either can be used
caps = firefox_options.to_capabilities() if firefox_options is not None else desired_capabilities
super().__init__(
capabilities=caps,
token=token,
project_name=project_name,
job_name=job_name,
disable_reports=disable_reports,
report_type=report_type,
agent_url=agent_url,
report_name=report_name,
report_path=report_path,
)
|
# Author: Jean-Remi King, <jeanremi.king@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_raises, assert_true, assert_equal
from ...utils import requires_sklearn
from ..search_light import SearchLight, GeneralizationLight
from .. import Vectorizer
def make_data():
n_epochs, n_chan, n_time = 50, 32, 10
X = np.random.rand(n_epochs, n_chan, n_time)
y = np.arange(n_epochs) % 2
for ii in range(n_time):
coef = np.random.randn(n_chan)
X[y == 0, :, ii] += coef
X[y == 1, :, ii] -= coef
return X, y
@requires_sklearn
def test_searchlight():
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.pipeline import make_pipeline
X, y = make_data()
n_epochs, _, n_time = X.shape
# init
assert_raises(ValueError, SearchLight, 'foo')
sl = SearchLight(Ridge())
sl = SearchLight(LogisticRegression())
# fit
assert_equal(sl.__repr__()[:13], '<SearchLight(')
sl.fit(X, y)
assert_equal(sl.__repr__()[-28:], ', fitted with 10 estimators>')
assert_raises(ValueError, sl.fit, X[1:], y)
assert_raises(ValueError, sl.fit, X[:, :, 0], y)
# transforms
assert_raises(ValueError, sl.predict, X[:, :, :2])
y_pred = sl.predict(X)
assert_true(y_pred.dtype == int)
assert_array_equal(y_pred.shape, [n_epochs, n_time])
y_proba = sl.predict_proba(X)
assert_true(y_proba.dtype == float)
assert_array_equal(y_proba.shape, [n_epochs, n_time, 2])
# score
score = sl.score(X, y)
assert_array_equal(score.shape, [n_time])
assert_true(np.sum(np.abs(score)) != 0)
assert_true(score.dtype == float)
# n_jobs
sl = SearchLight(LogisticRegression(), n_jobs=2)
sl.fit(X, y)
sl.predict(X)
sl.score(X, y)
# n_jobs > n_estimators
sl.fit(X[..., [0]], y)
sl.predict(X[..., [0]])
# pipeline
class _LogRegTransformer(LogisticRegression):
# XXX needs transformer in pipeline to get first proba only
def transform(self, X):
return super(_LogRegTransformer, self).predict_proba(X)[..., 1]
pipe = make_pipeline(SearchLight(_LogRegTransformer()),
LogisticRegression())
pipe.fit(X, y)
pipe.predict(X)
# n-dimensional feature space
X = np.random.rand(10, 3, 4, 2)
y = np.arange(10) % 2
y_preds = list()
for n_jobs in [1, 2]:
pipe = SearchLight(make_pipeline(Vectorizer(), LogisticRegression()),
n_jobs=n_jobs)
y_preds.append(pipe.fit(X, y).predict(X))
features_shape = pipe.estimators_[0].steps[0][1].features_shape_
assert_array_equal(features_shape, [3, 4])
assert_array_equal(y_preds[0], y_preds[1])
@requires_sklearn
def test_generalizationlight():
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
X, y = make_data()
n_epochs, _, n_time = X.shape
# fit
gl = GeneralizationLight(LogisticRegression())
assert_equal(gl.__repr__()[:21], '<GeneralizationLight(')
gl.fit(X, y)
assert_equal(gl.__repr__()[-28:], ', fitted with 10 estimators>')
# transforms
y_pred = gl.predict(X)
assert_array_equal(y_pred.shape, [n_epochs, n_time, n_time])
assert_true(y_pred.dtype == int)
y_proba = gl.predict_proba(X)
assert_true(y_proba.dtype == float)
assert_array_equal(y_proba.shape, [n_epochs, n_time, n_time, 2])
# transform to different datasize
y_pred = gl.predict(X[:, :, :2])
assert_array_equal(y_pred.shape, [n_epochs, n_time, 2])
# score
score = gl.score(X[:, :, :3], y)
assert_array_equal(score.shape, [n_time, 3])
assert_true(np.sum(np.abs(score)) != 0)
assert_true(score.dtype == float)
# n_jobs
gl = GeneralizationLight(LogisticRegression(), n_jobs=2)
gl.fit(X, y)
y_pred = gl.predict(X)
assert_array_equal(y_pred.shape, [n_epochs, n_time, n_time])
score = gl.score(X, y)
assert_array_equal(score.shape, [n_time, n_time])
# n_jobs > n_estimators
gl.fit(X[..., [0]], y)
gl.predict(X[..., [0]])
# n-dimensional feature space
X = np.random.rand(10, 3, 4, 2)
y = np.arange(10) % 2
y_preds = list()
for n_jobs in [1, 2]:
pipe = GeneralizationLight(
make_pipeline(Vectorizer(), LogisticRegression()), n_jobs=n_jobs)
y_preds.append(pipe.fit(X, y).predict(X))
features_shape = pipe.estimators_[0].steps[0][1].features_shape_
assert_array_equal(features_shape, [3, 4])
assert_array_equal(y_preds[0], y_preds[1])
|
function calcCircleArea(radius) {
let area = Math.PI * (radius ** 2)
console.log(area)
console.log(area.toFixed(2))
}
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _sync = _interopRequireDefault(require("react-icons/lib/md/sync"));
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var _default = _sync.default;
exports.default = _default;
|
/*!
=========================================================
* Paper Kit React - v1.2.0
=========================================================
* Product Page: https://www.creative-tim.com/product/paper-kit-react
* Copyright 2020 Creative Tim (https://www.creative-tim.com)
* Licensed under MIT (https://github.com/creativetimofficial/paper-kit-react/blob/master/LICENSE.md)
* Coded by Creative Tim
=========================================================
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
*/
import React from "react";
// reactstrap components
// core components
function ProfilePageHeader() {
let pageHeader = React.createRef();
React.useEffect(() => {
if (window.innerWidth < 991) {
const updateScroll = () => {
let windowScrollTop = window.pageYOffset / 3;
pageHeader.current.style.transform =
"translate3d(0," + windowScrollTop + "px,0)";
};
window.addEventListener("scroll", updateScroll);
return function cleanup() {
window.removeEventListener("scroll", updateScroll);
};
}
});
return (
<>
<div
style={{
backgroundImage:
"url(" + require("assets/img/fabio-mangione.jpg") + ")",
}}
className="page-header page-header-xs"
data-parallax={true}
ref={pageHeader}
>
<div className="filter" />
</div>
</>
);
}
export default ProfilePageHeader;
|
from django.db import models
import uuid
# Create your models here.
# Create your models here.
class student(models.Model):
stud_no = models.CharField(max_length = 10, help_text = "Enter student number", default = None)
first_name = models.CharField(max_length = 20, help_text = "Enter first name", default = None)
last_name = models.CharField(max_length = 20, help_text = "Enter last name")
class_no = models.CharField(max_length = 10, help_text = "Enter class number")
class Meta:
ordering = ['last_name', 'first_name']
def __str__(self):
"""String for representing the Model object."""
return f'{self.last_name}, {self.first_name}'
class Admin:
pass
class teacher(models.Model):
id = models.UUIDField(primary_key = True, default= uuid.uuid4, help_text ='Unique id for this teacher', editable = False )
staff_no = models.CharField(max_length = 10, help_text = "Enter staff number" )
full_name = models.CharField(max_length = 30, help_text = "Enter teacher full name", default = None)
level = models.CharField(max_length = 10, help_text = "Enter teacher level")
class_held = models.CharField(max_length = 10, help_text = "Enter class number", blank = True)
def __str__(self):
return self.full_name
class teacher_student(teacher):
assigned_student = models.ManyToManyField(student, help_text = 'who is/are the students assigned to this teacher', default = None)
def __str__(self):
return self.staff_no
return self.full_name
return self.assigned_student
return self.stud_no
return self.class_held
return self.first_name
return self.last_name
return self.class_no
def get_absolute_url(self):
"""Returns the url to access a detail record for this teacher."""
return reverse('teacher-detail', args=[str(self.id)])
def display_assigned_student(self):
"""Create a string for the student. This is required to display student in Admin."""
return ', '.join(assigned_student.first_name for assigned_student in self.assigned_student.all()[:3])
display_assigned_student.short_description = 'assigned_student'
|
const uuid = require('uuid');
const Router = require('koa-router');
const userService = require('../service/user');
const Redis = require('../common/redis');
let router = new Router({
prefix: '/user'
});
//测试服务是否开启
router.get("/test", async (ctx) => {
ctx.data = '服务已启动';
});
//后台用户登录
router.post("/login", async (ctx) => {
let sessionId = uuid.v1();
ctx.data = await userService.login(ctx.request.body);
try {
await Redis.set(`admin:${sessionId}`, JSON.stringify(ctx.data));
} catch (e) {
throw e;
}
ctx.data.token = sessionId;
});
module.exports = router.routes();
|
const namespacedRoutePath = require('../../../lib/provider-registration/helpers/namespaced-route-path')
const should = require('should') // eslint-disable-line
describe('Tests for namespacedRoutePath', function () {
it('create route with :host and :id parameter', function () {
const params = { hosts: true, namespace: 'test', path: 'FeatureServer/:layer/:method' }
const fullRoute = namespacedRoutePath(params)
fullRoute.should.equal('/test/:host/:id/FeatureServer/:layer/:method')
})
it('create route with :host parameter and without :id parameter', function () {
const params = { hosts: true, disableIdParam: true, namespace: 'test', path: 'FeatureServer/:layer/:method' }
const fullRoute = namespacedRoutePath(params)
fullRoute.should.equal('/test/:host/FeatureServer/:layer/:method')
})
it('create route without :host parameter', function () {
const params = { namespace: 'test', path: 'FeatureServer/:layer/:method' }
const fullRoute = namespacedRoutePath(params)
fullRoute.should.equal('/test/:id/FeatureServer/:layer/:method')
})
it('create route without :host and :id parameter', function () {
const params = { namespace: 'test', disableIdParam: true, path: 'FeatureServer/:layer/:method' }
const fullRoute = namespacedRoutePath(params)
fullRoute.should.equal('/test/FeatureServer/:layer/:method')
})
it('create route with templated $namespace$ substring', function () {
const params = { namespace: 'test', disableIdParam: true, path: '$namespace/rest/services/FeatureServer/:layer/:method' }
const fullRoute = namespacedRoutePath(params)
fullRoute.should.equal('/test/rest/services/FeatureServer/:layer/:method')
})
it('create route with templated $namespace$ and $providerParams$ substrings', function () {
const params = { namespace: 'test', hosts: true, path: '$namespace/rest/services/$providerParams/FeatureServer/:layer/:method' }
const fullRoute = namespacedRoutePath(params)
fullRoute.should.equal('/test/rest/services/:host/:id/FeatureServer/:layer/:method')
})
it('create route without path construction', function () {
const params = { namespace: 'tests', path: 'rest/info', absolutePath: true }
const fullRoute = namespacedRoutePath(params)
fullRoute.should.equal('/rest/info')
})
it('create route with prefix, :host, :id parameters', function () {
const params = { namespace: 'test', routePrefix: 'api/v1', path: 'FeatureServer/:layer/:method' }
const fullRoute = namespacedRoutePath(params)
fullRoute.should.equal('/api/v1/test/:id/FeatureServer/:layer/:method')
})
})
|
function patchIconToNamedIcon({ j, path, nameAttr, nameAttrIndex, iconName, consequentIconName, alternateIconName }) {
if (isSimpleIcon(nameAttr)) {
path.node.name.name = `${iconName}Icon`;
path.node.attributes.splice(nameAttrIndex, 1);
} else if (isTernaryIcon(nameAttr)) {
path.node.attributes.splice(nameAttrIndex, 1);
const conditionalExpression = j.conditionalExpression(
nameAttr.value.expression.test,
j.jsxElement(j.jsxOpeningElement(j.jsxIdentifier(`${consequentIconName}Icon`), path.node.attributes, true)),
j.jsxElement(j.jsxOpeningElement(j.jsxIdentifier(`${alternateIconName}Icon`), path.node.attributes, true)),
);
if (path.parent.parent.node.type === 'JSXExpressionContainer') {
path.parent.parent.node.expression = conditionalExpression;
} else {
const iconNodeIndex = path.parent.parent.node.children.findIndex(child => child === path.parent.node);
path.parent.parent.node.children[iconNodeIndex] = j.jsxExpressionContainer(conditionalExpression);
}
}
}
function patchIconToNamespace({ j, path, nameAttr, nameAttrIndex, iconName }) {
if (!nameAttr) return;
if (isSimpleIcon(nameAttr)) {
path.node.name = j.jsxMemberExpression(j.jsxIdentifier('Icon'), j.jsxIdentifier(iconName));
path.node.attributes.splice(nameAttrIndex, 1);
}
}
function patchIconPropToNamedIcon({ j, iconAttr, iconName, consequentIconName, alternateIconName }) {
if (isSimpleIcon(iconAttr)) {
iconAttr.value = j.jsxExpressionContainer(
j.jsxElement(j.jsxOpeningElement(j.jsxIdentifier(`${iconName}Icon`), [], true)),
);
} else if (isTernaryIcon(iconAttr)) {
iconAttr.value.expression.consequent = j.jsxElement(
j.jsxOpeningElement(j.jsxIdentifier(`${consequentIconName}Icon`), [], true),
);
iconAttr.value.expression.alternate = j.jsxElement(
j.jsxOpeningElement(j.jsxIdentifier(`${alternateIconName}Icon`), [], true),
);
} else if (
iconAttr.value &&
iconAttr.value.expression &&
iconAttr.value.expression.type === 'ConditionalExpression' &&
iconAttr.value.expression.consequent &&
(iconAttr.value.expression.consequent.type === 'StringLiteral' ||
iconAttr.value.expression.consequent.type === 'Literal')
) {
iconAttr.value.expression.consequent = j.jsxElement(
j.jsxOpeningElement(j.jsxIdentifier(`${iconAttr.value.expression.consequent.value}Icon`), [], true),
);
} else if (
iconAttr.value &&
iconAttr.value.expression &&
iconAttr.value.expression.type === 'ConditionalExpression' &&
iconAttr.value.expression.alternate &&
(iconAttr.value.expression.alternate.type === 'StringLiteral' ||
iconAttr.value.expression.alternate.type === 'Literal')
) {
iconAttr.value.expression.alternate = j.jsxElement(
j.jsxOpeningElement(j.jsxIdentifier(`${iconAttr.value.expression.alternate.value}Icon`), [], true),
);
}
}
function patchIconPropToSimpleIcon({ j, iconAttr, iconName, expressionIconValue }) {
if (isSimpleIcon(iconAttr)) {
iconAttr.value = j.jsxExpressionContainer(
j.jsxElement(
j.jsxOpeningElement(j.jsxMemberExpression(j.jsxIdentifier('Icon'), j.jsxIdentifier(iconName)), [], true),
),
);
} else if (isTernaryIcon(iconAttr)) {
iconAttr.value = j.jsxExpressionContainer(
j.jsxElement(
j.jsxOpeningElement(
j.jsxIdentifier('Icon'),
[j.jsxAttribute(j.jsxIdentifier('name'), expressionIconValue)],
true,
),
),
);
} else if (
iconAttr.value &&
iconAttr.value.expression &&
iconAttr.value.expression.type === 'ConditionalExpression' &&
iconAttr.value.expression.consequent &&
(iconAttr.value.expression.consequent.type === 'StringLiteral' ||
iconAttr.value.expression.consequent.type === 'Literal')
) {
iconAttr.value.expression.consequent = j.jsxElement(
j.jsxOpeningElement(
j.jsxMemberExpression(j.jsxIdentifier('Icon'), j.jsxIdentifier(iconAttr.value.expression.consequent.value)),
[],
true,
),
);
} else if (
iconAttr.value &&
iconAttr.value.expression &&
iconAttr.value.expression.type === 'ConditionalExpression' &&
iconAttr.value.expression.alternate &&
(iconAttr.value.expression.alternate.type === 'StringLiteral' ||
iconAttr.value.expression.alternate.type === 'Literal')
) {
iconAttr.value.expression.alternate = j.jsxElement(
j.jsxOpeningElement(
j.jsxMemberExpression(j.jsxIdentifier('Icon'), j.jsxIdentifier(iconAttr.value.expression.alternate.value)),
[],
true,
),
);
}
}
function isSimpleIcon(attr) {
return (
(attr.value && (attr.value.type === 'StringLiteral' || attr.value.type === 'Literal')) ||
(attr.value &&
attr.value.expression &&
(attr.value.expression.type === 'StringLiteral' || attr.value.expression.type === 'Literal'))
);
}
function isTernaryIcon(attr) {
return (
attr.value &&
attr.value.expression &&
(attr.value.expression.type === 'ConditionalExpression' &&
(attr.value.expression.consequent.type === 'StringLiteral' ||
attr.value.expression.consequent.type === 'Literal') &&
(attr.value.expression.alternate.type === 'StringLiteral' || attr.value.expression.alternate.type === 'Literal'))
);
}
module.exports = function(file, api) {
const j = api.jscodeshift;
const root = j(file.source);
const componentNames = ['Icon', 'Link', 'Button', 'MenuItem'];
const iconNames = new Set();
const iconComponents = [];
const componentsWithIconProp = [];
let preserveIconImport = false;
root.find(j.JSXOpeningElement, node => componentNames.includes(node.name.name)).forEach(path => {
if (path.node.name.name === 'Icon') {
const nameAttrIndex = path.node.attributes.findIndex(attr => attr.name && attr.name.name === 'name');
const nameAttr = path.node.attributes[nameAttrIndex];
const iconComponent = { j, path, nameAttr, nameAttrIndex };
if (nameAttr && isSimpleIcon(nameAttr)) {
iconComponent.iconName = nameAttr.value.value || nameAttr.value.expression.value;
iconNames.add(iconComponent.iconName);
} else if (nameAttr && isTernaryIcon(nameAttr)) {
iconComponent.consequentIconName = nameAttr.value.expression.consequent.value;
iconComponent.alternateIconName = nameAttr.value.expression.alternate.value;
iconNames.add(iconComponent.consequentIconName);
iconNames.add(iconComponent.alternateIconName);
} else {
preserveIconImport = true;
}
iconComponents.push(iconComponent);
} else {
const iconAttrIndex = path.node.attributes.findIndex(attr => attr.name && attr.name.name === 'icon');
const iconAttr = path.node.attributes[iconAttrIndex];
const component = { j, iconAttr };
if (!iconAttr) return;
if (isSimpleIcon(iconAttr)) {
component.iconName = iconAttr.value.value || iconAttr.value.expression.value;
iconNames.add(component.iconName);
} else if (isTernaryIcon(iconAttr)) {
component.expressionIconValue = iconAttr.value;
component.consequentIconName = iconAttr.value.expression.consequent.value;
component.alternateIconName = iconAttr.value.expression.alternate.value;
iconNames.add(component.consequentIconName);
iconNames.add(component.alternateIconName);
}
componentsWithIconProp.push(component);
}
});
iconComponents.forEach(iconComponent => {
if (preserveIconImport) {
patchIconToNamespace(iconComponent);
} else {
patchIconToNamedIcon(iconComponent);
}
});
componentsWithIconProp.forEach(component => {
if (preserveIconImport) {
patchIconPropToSimpleIcon(component);
} else {
patchIconPropToNamedIcon(component);
}
});
let iconImportsInserted = false;
const imports = root.find(j.ImportDeclaration);
const iconImports = preserveIconImport
? []
: [...iconNames].map(iconName =>
j.importDeclaration(
[j.importDefaultSpecifier(j.identifier(`${iconName}Icon`))],
j.stringLiteral(`@skbkontur/react-icons/${iconName}`),
),
);
imports.replaceWith(path => {
const specifiers = path.node.specifiers;
const filteredSpecifiers = specifiers.filter(spec => spec.local.name !== 'Icon');
if (specifiers.length === filteredSpecifiers.length) return path.node;
if (preserveIconImport && filteredSpecifiers.length === 0) return null;
iconImportsInserted = true;
if (filteredSpecifiers.length === 0) return iconImports;
path.node.specifiers = filteredSpecifiers;
return [path.node, ...iconImports];
});
if (preserveIconImport) {
imports
.at(-1)
.insertAfter(
j.importDeclaration(
[j.importDefaultSpecifier(j.identifier('Icon'))],
j.stringLiteral('@skbkontur/react-icons'),
),
);
}
if (iconNames.size === 0) return root.toSource();
if (!preserveIconImport) {
if (!iconImportsInserted) imports.at(-1).insertAfter(iconImports);
}
return root.toSource();
};
|
const loadPlugin = id => {
localStorage.setItem('plugin', id);
const url = `${location.protocol}//${location.host}${location.pathname}`;
const newURL = `${url}?${$.param({
plugin: id
})}`;
window.location.href = newURL;
};
$(() => {
var storedID = localStorage.getItem('plugin');
if (storedID && storedID !== pid) {
loadPlugin(storedID);
} else {
$('#controls').removeAttr('hidden');
}
$('#search-input').keypress(event => {
if (event.which === 13) {
search();
}
});
$('#plugin-select').val(pid);
$('#plugin-select').change(() => {
const id = $('#plugin-select').val();
loadPlugin(id);
});
});
let mangaTitle = "";
let searching = false;
const search = () => {
if (searching)
return;
const query = $.param({
query: $('#search-input').val(),
plugin: pid
});
$.ajax({
type: 'GET',
url: `${base_url}api/admin/plugin/list?${query}`,
contentType: "application/json",
dataType: 'json'
})
.done(data => {
console.log(data);
if (data.error) {
alert('danger', `Search failed. Error: ${data.error}`);
return;
}
mangaTitle = data.title;
$('#title-text').text(data.title);
buildTable(data.chapters);
})
.fail((jqXHR, status) => {
alert('danger', `Search failed. Error: [${jqXHR.status}] ${jqXHR.statusText}`);
})
.always(() => {});
};
const buildTable = (chapters) => {
$('#table').attr('hidden', '');
$('table').empty();
const keys = Object.keys(chapters[0]).map(k => `<th>${k}</th>`).join('');
const thead = `<thead><tr>${keys}</tr></thead>`;
$('table').append(thead);
const rows = chapters.map(ch => {
const tds = Object.values(ch).map(v => {
const maxLength = 40;
const shouldShrink = v && v.length > maxLength;
const content = shouldShrink ? `<span title="${v}">${v.substring(0, maxLength)}...</span><div uk-dropdown><span>${v}</span></div>` : v;
return `<td>${content}</td>`
}).join('');
return `<tr data-id="${ch.id}" data-title="${ch.title}">${tds}</tr>`;
});
const tbody = `<tbody id="selectable">${rows}</tbody>`;
$('table').append(tbody);
$('#selectable').selectable({
filter: 'tr'
});
$('#table table').tablesorter();
$('#table').removeAttr('hidden');
};
const selectAll = () => {
$('tbody > tr').each((i, e) => {
$(e).addClass('ui-selected');
});
};
const unselect = () => {
$('tbody > tr').each((i, e) => {
$(e).removeClass('ui-selected');
});
};
const download = () => {
const selected = $('tbody > tr.ui-selected');
if (selected.length === 0) return;
UIkit.modal.confirm(`Download ${selected.length} selected chapters?`).then(() => {
$('#download-btn').attr('hidden', '');
$('#download-spinner').removeAttr('hidden');
const chapters = selected.map((i, e) => {
return {
id: $(e).attr('data-id'),
title: $(e).attr('data-title')
}
}).get();
console.log(chapters);
$.ajax({
type: 'POST',
url: base_url + 'api/admin/plugin/download',
data: JSON.stringify({
plugin: pid,
chapters: chapters,
title: mangaTitle
}),
contentType: "application/json",
dataType: 'json'
})
.done(data => {
console.log(data);
if (data.error) {
alert('danger', `Failed to add chapters to the download queue. Error: ${data.error}`);
return;
}
const successCount = parseInt(data.success);
const failCount = parseInt(data.fail);
alert('success', `${successCount} of ${successCount + failCount} chapters added to the download queue. You can view and manage your download queue on the <a href="${base_url}admin/downloads">download manager page</a>.`);
})
.fail((jqXHR, status) => {
alert('danger', `Failed to add chapters to the download queue. Error: [${jqXHR.status}] ${jqXHR.statusText}`);
})
.always(() => {
$('#download-spinner').attr('hidden', '');
$('#download-btn').removeAttr('hidden');
});
});
};
|
from flask import render_template,redirect,url_for,flash,request
from flask_login import login_user, current_user, logout_user,login_required
from . import auth
from .. import db,bcrypt
from ..models import User
from .forms import LoginForm,SignUpForm
@auth.route('/signup',methods = ["GET","POST"])
def signup():
form = SignUpForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(email = form.email.data, username = form.username.data,password = hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('auth.login'))
return render_template('auth/signup.html',signup_form = form)
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user and bcrypt.check_password_hash(user.password, login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
else:
flash('Invalid username or Password')
title ="One Minute Pitch"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
|
"""
.. _howto_spotfindingresults:
Assessing :py:class:`.SpotFindingResults`
=========================================
Purpose of this tutorial:
* Deciding between spot-based or pixel-based decoding
* Choosing a :py:class:`.FindSpotsAlgorithm`
* Tuning a :py:class:`.FindSpotsAlgorithm`
Although it is not necessary to visualize spots found by the :py:class:`.FindSpotsAlgorithm` before
decoding every field of view in your data, it can be a useful step when building an image
processing pipeline. Visually assessing the detected spots will ensure the spot-based decoding
approach and :py:class:`.FindSpotsAlgorithm` you chose is optimized for your data. To learn more
about how spots can be found and decoded in starfish see :ref:`section_finding_and_decoding`.
There are two methods for viewing spots. The first is to access the :py:class:`.SpotAttributes`
of a selected :term:`ImageSlice` and add it as points to the napari viewer. The second is to use a
``TraceBuilder`` to convert the :py:class:`.SpotFindingResults` to an
:py:class:`.IntensityTable`, which can then be passed to :py:func:`.display`.
.. note::
:py:class:`.DecodedIntensityTable` can also be passed to :py:func:`.display`.
"""
# Load and process ISS images to find spots with BlobDetector
from starfish.image import ApplyTransform, LearnTransform, Filter
from starfish.types import Axes
from starfish import data, display, FieldOfView
from starfish.spots import FindSpots
experiment = data.ISS()
fov = experiment.fov()
imgs = fov.get_image(FieldOfView.PRIMARY_IMAGES) # primary images
dots = fov.get_image("dots") # reference round for image registration
# filter raw data
masking_radius = 15
filt = Filter.WhiteTophat(masking_radius, is_volume=False)
filt.run(imgs, in_place=True)
filt.run(dots, in_place=True)
# register primary images to reference round
learn_translation = LearnTransform.Translation(reference_stack=dots, axes=Axes.ROUND, upsampling=1000)
transforms_list = learn_translation.run(imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max"))
warp = ApplyTransform.Warp()
warp.run(imgs, transforms_list=transforms_list, in_place=True)
# run blob detector on dots (reference image with every spot)
bd = FindSpots.BlobDetector(
min_sigma=1,
max_sigma=3,
num_sigma=10,
threshold=0.01,
is_volume=False,
measurement_type='mean',
)
spots = bd.run(image_stack=imgs, reference_image=dots)
####################################################################################################
# The first way to visualize detected spots is to access the :py:class:`.SpotAttributes`. Since
# spots were found using a reference image, the :py:class:`.SpotAttributes` for every
# :term:`ImageSlice` in :py:class:`.SpotFindingResults` is the same and it doesn't matter
# which ImageSlice is selected to display. If no reference image were passed to
# :py:meth:`.BlobDetector.run`, then each ImageSlice would contain different
# :py:class:`.SpotAttributes` and it would be best to display each as a different points layer to
# be compared with the :py:class:`.ImageStack`.
# uncomment code to view
# %gui qt
# viewer = display(stack=dots)
# viewer.add_points(data=spots[{Axes.CH:1, Axes.ROUND:0}].spot_attrs.data[['z', 'y',
# 'x']].to_numpy(), size=5)
####################################################################################################
# The other way to visualize detected spots is to convert the :py:class:`.SpotFindingResults` to
# an :py:class:`.IntensityTable`. This can be done by decoding to a
# :py:class:`.DecodedIntensityTable`, which is a subclass of :py:class:`.IntensityTable`.
# However, a :py:class:`.Codebook` independent method is to use a ``TraceBuilder`` to return an
# :py:class:`.IntensityTable`. See :ref:`howto_tracebuildingstrategies` to pick the suitable
# ``TraceBuilder``.
from starfish.core.spots.DecodeSpots.trace_builders import build_spot_traces_exact_match
intensity_table = build_spot_traces_exact_match(spots)
# uncomment code to view
# %gui qt
# viewer = display(stack=dots, spots=intensity_table)
|
const { path, immutablePreset } = require('./index');
describe('pathon', () => {
test('methods', () => {
const pRoot = path('root', { child: true }, immutablePreset);
expect(typeof pRoot.set).toBe('function');
expect(typeof pRoot.del).toBe('function');
expect(typeof pRoot.get).toBe('function');
expect(typeof pRoot.batch).toBe('function');
expect(typeof pRoot.watch).toBe('function');
expect(typeof pRoot.unwatch).toBe('function');
expect(typeof pRoot.getPath).toBe('function');
expect(typeof pRoot.path).toBe('function');
const pChild = pRoot.path('child');
expect(typeof pChild.set).toBe('function');
expect(typeof pChild.del).toBe('function');
expect(typeof pChild.get).toBe('function');
expect(typeof pChild.batch).toBe('function');
expect(typeof pChild.watch).toBe('function');
expect(typeof pChild.unwatch).toBe('function');
expect(typeof pChild.getPath).toBe('function');
expect(typeof pChild.path).toBe('function');
});
// TODO: reset
// TODO: delete
const testsGet = preset => () => {
test('root default', () => {
const pRoot = path('root', undefined, preset);
expect(pRoot.get()).toEqual({});
});
test('root default literal', () => {
const initialState = 0;
const pRoot = path('root', initialState, preset);
expect(pRoot.get()).toBe(initialState);
});
describe('object', () => {
test('root', () => {
const initialState = {};
const pRoot = path('root', initialState, preset);
expect(pRoot.get()).toBe(initialState);
});
test('child', () => {
const initialState = { child: true };
const pRoot = path('root', initialState, preset);
expect(pRoot.get()).toBe(initialState);
expect(pRoot.path('child').get()).toBe(initialState.child);
});
test('child default', () => {
const initialStateRoot = {};
const initialStateChild = true;
const pRoot = path('root', initialStateRoot, preset);
const pChild = pRoot.path('child', initialStateChild);
expect(pRoot.get()).toEqual({ child: true });
expect(pChild.get()).toBe(initialStateChild);
});
test('chidren', () => {
const initialState = { child1: true, child2: true };
const pRoot = path('root', initialState, preset);
expect(pRoot.get()).toBe(initialState);
expect(pRoot.path('child1').get()).toBe(initialState.child1);
expect(pRoot.path('child2').get()).toBe(initialState.child2);
});
test('chidren deep', () => {
const initialState = {
child1: true,
child2: { child: { child: true } },
};
const pRoot = path('root', initialState, preset);
expect(pRoot.get()).toBe(initialState);
expect(pRoot.path('child1').get()).toBe(initialState.child1);
expect(
pRoot
.path('child2')
.path('child')
.path('child')
.get(),
).toBe(initialState.child2.child.child);
});
test('chidren deep default', () => {
const initialStateRoot = { child1: true };
const initialStateChild2 = { child: { child: true } };
const pRoot = path('root', initialStateRoot, preset);
const pChild1 = pRoot.path('child2', initialStateChild2);
expect(pRoot.get()).toEqual({
child1: true,
child2: { child: { child: true } },
});
expect(pRoot.path('child1').get()).toBe(initialStateRoot.child1);
expect(pChild1.get()).toBe(initialStateChild2);
expect(
pChild1
.path('child')
.path('child')
.get(),
).toBe(initialStateChild2.child.child);
});
});
describe('array', () => {
test('root', () => {
const initialState = [];
const pRoot = path('root', initialState, preset);
expect(pRoot.get()).toBe(initialState);
});
test('child', () => {
const initialState = [1];
const pRoot = path('root', initialState, preset);
expect(pRoot.get()).toBe(initialState);
expect(pRoot.path(0).get()).toBe(initialState[0]);
});
test('child default', () => {
const initialStateRoot = [];
const initialStateChild = true;
const pRoot = path('root', initialStateRoot, preset);
const pChild = pRoot.path(0, initialStateChild);
expect(pRoot.get()).toEqual([true]);
expect(pChild.get()).toBe(initialStateChild);
});
test('chidren', () => {
const initialState = [1, 2];
const pRoot = path('root', initialState, preset);
expect(pRoot.get()).toBe(initialState);
expect(pRoot.path(0).get()).toBe(initialState[0]);
expect(pRoot.path(1).get()).toBe(initialState[1]);
});
test('chidren deep', () => {
const initialState = [1, { child: { child: true } }];
const pRoot = path('root', initialState, preset);
expect(pRoot.get()).toBe(initialState);
expect(pRoot.path(0).get()).toBe(initialState[0]);
expect(
pRoot
.path(1)
.path('child')
.path('child')
.get(),
).toBe(initialState[1].child.child);
});
test('chidren deep default', () => {
const initialStateRoot = [1];
const initialStateChild1 = { child: { child: true } };
const pRoot = path('root', initialStateRoot, preset);
const pChild1 = pRoot.path(1, initialStateChild1);
expect(pRoot.get()).toEqual([1, { child: { child: true } }]);
expect(pRoot.path(0).get()).toBe(initialStateRoot[0]);
expect(pChild1.get()).toBe(initialStateChild1);
expect(
pChild1
.path('child')
.path('child')
.get(),
).toBe(initialStateChild1.child.child);
});
});
};
const testMemorizedChild = preset => () => {
test('memirize first child', () => {
const pRoot = path('root', { child: true }, preset);
expect(pRoot.path('child')).toBe(pRoot.path('child'));
});
};
const testBatch = preset => () => {
test('batch itself for loop', () => {
const iterations = 5;
const subscriptionToRoot = jest.fn();
const subscriptionToСounter = jest.fn();
const pRoot = path('root', { counter: 0 }, preset);
const pCounter = pRoot.path('counter');
pRoot.watch(subscriptionToRoot);
pCounter.watch(subscriptionToСounter);
pCounter.batch(path => {
for (let i = 1; i <= iterations; ++i) {
path.set(i);
}
});
expect(subscriptionToRoot.mock.calls.length).toBe(1);
expect(subscriptionToСounter.mock.calls.length).toBe(1);
expect(pCounter.get()).toBe(iterations);
});
test('batch inner path from outside', () => {
const iterations = 5;
const subscriptionToRoot = jest.fn();
const subscriptionToСounter = jest.fn();
const pRoot = path('root', { counter: 0 }, preset);
const pCounter = pRoot.path('counter');
pRoot.watch(subscriptionToRoot);
pCounter.watch(subscriptionToСounter);
pCounter.batch(() => {
for (let i = 1; i <= iterations; ++i) {
pCounter.set(i);
}
});
expect(subscriptionToRoot.mock.calls.length).toBe(1);
expect(subscriptionToСounter.mock.calls.length).toBe(1);
expect(pCounter.get()).toBe(iterations);
});
test('batch outer path from outside', () => {
const iterations = 5;
const subscriptionToRoot = jest.fn();
const subscriptionToСounter = jest.fn();
const pRoot = path('root', { counter: 0 }, preset);
const pCounter = pRoot.path('counter');
pRoot.watch(subscriptionToRoot);
pCounter.watch(subscriptionToСounter);
pCounter.batch(() => {
for (let i = 1; i <= iterations; ++i) {
pRoot.set({ counter: i });
}
});
expect(subscriptionToRoot.mock.calls.length).toBe(1);
expect(subscriptionToСounter.mock.calls.length).toBe(1);
expect(pCounter.get()).toBe(iterations);
});
};
const testWatch = preset => () => {
const pRoot = path('root', { child1: false, child2: false }, preset);
const subscriptionToRoot = jest.fn();
const subscriptionToRootRepeat = jest.fn();
const subscriptionToChild1 = jest.fn();
const subscriptionToChild2 = jest.fn();
const unwatchSubscriptionToRootRepeat = pRoot.watch(
subscriptionToRootRepeat,
);
pRoot.watch(subscriptionToRoot);
// test double subscription
pRoot.watch(subscriptionToRootRepeat);
pRoot.path('child1').watch(subscriptionToChild1);
pRoot.path('child2').watch(subscriptionToChild2);
test('watch', () => {
pRoot.path('child1').set(true);
expect(subscriptionToRoot.mock.calls.length).toBe(1);
expect(subscriptionToRootRepeat.mock.calls.length).toBe(1);
});
test('watch only touched path`s', () => {
expect(subscriptionToChild1.mock.calls.length).toBe(1);
expect(subscriptionToChild2.mock.calls.length).toBe(0);
});
test('unwatch', () => {
unwatchSubscriptionToRootRepeat();
pRoot.path('child2').set(true);
expect(subscriptionToRoot.mock.calls.length).toBe(2);
expect(subscriptionToRootRepeat.mock.calls.length).toBe(1);
});
test('watch only touched path`s', () => {
expect(subscriptionToChild1.mock.calls.length).toBe(1);
expect(subscriptionToChild2.mock.calls.length).toBe(1);
});
};
const testDelete = preset => () => {
test('object', () => {
const pRoot = path('root', { 1: 1, 2: 2 }, preset);
pRoot.del('1');
expect(pRoot.get()).toEqual({ 2: 2 });
});
test('array', () => {
const pRoot = path('root', [0, 1, 2, 3, 4], preset);
pRoot.del(-1);
expect(pRoot.get()).toEqual([0, 1, 2, 3, 4]);
pRoot.del('4');
expect(pRoot.get()).toEqual([0, 1, 2, 3]);
pRoot.del('2');
expect(pRoot.get()).toEqual([0, 1, 3]);
pRoot.del('0');
expect(pRoot.get()).toEqual([1, 3]);
pRoot.del(pRoot.get().length);
expect(pRoot.get()).toEqual([1, 3]);
pRoot.del(pRoot.get().length - 1);
expect(pRoot.get()).toEqual([1]);
pRoot.del(0);
expect(pRoot.get()).toEqual([]);
});
test('nested', () => {
const pRoot = path(
'root',
{ 1: 1, 2: [{ 1: 1, 2: 2 }, { 1: 1, 2: 2 }, { 1: 1, 2: 2 }] },
preset,
);
pRoot
.path('2')
.path('1')
.del('1');
expect(pRoot.get()).toEqual({
1: 1,
2: [{ 1: 1, 2: 2 }, { 2: 2 }, { 1: 1, 2: 2 }],
});
});
test('subscription', () => {
const pRoot = path('root', { 1: 1, 2: 2 }, preset);
const subscriptionToRoot = jest.fn();
pRoot.watch(subscriptionToRoot);
pRoot.del('1');
expect(subscriptionToRoot.mock.calls.length).toBe(1);
});
};
describe('immutablePreset', () => {
describe('get', testsGet(immutablePreset));
describe('memorized child', testMemorizedChild(immutablePreset));
describe('batch', testBatch(immutablePreset));
describe('watchers', testWatch(immutablePreset));
describe('delete', testDelete(immutablePreset));
test('immutable update parent', () => {
const initialState = { counter: 0 };
const pRoot = path('root', initialState, immutablePreset);
const pCounter1 = pRoot.path('counter');
pCounter1.set(1);
expect(pRoot.get() !== initialState).toBe(true);
});
test('set and update children', () => {
const initialState = {
counter1: 0,
counter2: 0,
counterDeep: { counter: 0 },
};
const pRoot = path('root', initialState, immutablePreset);
const pCounter1 = pRoot.path('counter1');
const pCounter2 = pRoot.path('counter2');
const pCounterDeepCounter = pRoot.path('counterDeep').path('counter');
let trackingCounter1 = false;
let trackingCounter2 = false;
let trackingCounterDeepCounter = false;
pCounter1.watch(() => (trackingCounter1 = true));
pCounter2.watch(() => (trackingCounter2 = true));
pCounterDeepCounter.watch(() => (trackingCounterDeepCounter = true));
pRoot.set({ counter1: 1 });
pRoot.set({ counterDeep: { counter: 1 } });
expect(pCounter1.get()).toBe(1);
expect(trackingCounter1).toBe(true);
expect(trackingCounter2).toBe(false);
expect(trackingCounterDeepCounter).toBe(true);
expect(pCounterDeepCounter.get()).toBe(1);
});
// TODO: rewrite
test('root path state', () => {
const initialState = {};
const pRoot = path('root', initialState, immutablePreset);
expect(pRoot.get()).toBe(initialState);
let tracking;
const watcher = newState => (tracking = newState);
const deepField = {};
pRoot.watch(watcher);
pRoot.set({ deepField });
expect(pRoot.get()).toBe(tracking);
expect(pRoot.get().deepField).toBe(deepField);
});
});
describe('defaultPreset', () => {
describe('get', testsGet());
describe('memorized child', testMemorizedChild());
describe('batch', testBatch());
describe('watchers', testWatch());
describe('delete', testDelete());
});
});
|
"""Polynomial contrast coding"""
import numpy as np
import pandas as pd
from patsy.contrasts import Poly
from category_encoders.ordinal import OrdinalEncoder
from sklearn.base import BaseEstimator, TransformerMixin
import category_encoders.utils as util
__author__ = 'willmcginnis'
class PolynomialEncoder(BaseEstimator, TransformerMixin):
"""Polynomial contrast coding for the encoding of categorical features.
Parameters
----------
verbose: int
integer indicating verbosity of output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
impute_missing: bool
boolean for whether or not to apply the logic for handle_unknown, will be deprecated in the future.
handle_unknown: str
options are 'error', 'ignore' and 'impute', defaults to 'impute', which will impute the category -1. Warning: if
impute is used, an extra column will be added in if the transform matrix has unknown categories. This can cause
unexpected changes in the dimension in some cases.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = PolynomialEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 21 columns):
intercept 506 non-null int64
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS_0 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD_0 506 non-null float64
RAD_1 506 non-null float64
RAD_2 506 non-null float64
RAD_3 506 non-null float64
RAD_4 506 non-null float64
RAD_5 506 non-null float64
RAD_6 506 non-null float64
RAD_7 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(20), int64(1)
memory usage: 83.1 KB
None
References
----------
.. [1] Contrast Coding Systems for categorical variables. UCLA: Statistical Consulting Group. from
https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/.
.. [2] Gregory Carey (2003). Coding Categorical Variables, from
http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf
"""
def __init__(self, verbose=0, cols=None, mapping=None, drop_invariant=False, return_df=True, impute_missing=True, handle_unknown='impute'):
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.mapping = mapping
self.impute_missing = impute_missing
self.handle_unknown = handle_unknown
self.cols = cols
self.ordinal_encoder = None
self._dim = None
self.feature_names = None
def fit(self, X, y=None, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# if the input dataset isn't already a dataframe, convert it to one (using default column names)
# first check the type
X = util.convert_input(X)
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
# train an ordinal pre-encoder
self.ordinal_encoder = OrdinalEncoder(
verbose=self.verbose,
cols=self.cols,
impute_missing=self.impute_missing,
handle_unknown=self.handle_unknown
)
self.ordinal_encoder = self.ordinal_encoder.fit(X)
ordinal_mapping = self.ordinal_encoder.category_mapping
mappings_out = []
for switch in ordinal_mapping:
values = switch.get('mapping').get_values()
column_mapping = self.fit_polynomial_coding(values)
mappings_out.append({'col': switch.get('col'), 'mapping': column_mapping, })
self.mapping = mappings_out
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
# drop all output columns with 0 variance.
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, override_return_df=False):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# first check the type
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim, ))
if not self.cols:
return X
X = self.ordinal_encoder.transform(X)
X = self.polynomial_coding(X, self.mapping)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
@staticmethod
def fit_polynomial_coding(values):
if len(values) < 2:
return pd.DataFrame()
polynomial_contrast_matrix = Poly().code_without_intercept(values)
df = pd.DataFrame(data=polynomial_contrast_matrix.matrix, columns=polynomial_contrast_matrix.column_suffixes)
df.index += 1
df.loc[0] = np.zeros(len(values) - 1)
return df
@staticmethod
def polynomial_coding(X_in, mapping):
"""
"""
X = X_in.copy(deep=True)
cols = X.columns.values.tolist()
X['intercept'] = pd.Series([1] * X.shape[0], index=X.index)
for switch in mapping:
col = switch.get('col')
mod = switch.get('mapping')
new_columns = []
for i in range(len(mod.columns)):
c = mod.columns[i]
new_col = str(col) + '_%d' % (i, )
X[new_col] = mod[c].loc[X[col]].values
new_columns.append(new_col)
old_column_index = cols.index(col)
cols[old_column_index: old_column_index + 1] = new_columns
cols = ['intercept'] + cols
X = X.reindex(columns=cols)
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns:
--------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError("Estimator has to be fitted to return feature names.")
else:
return self.feature_names
|
from layers import MLP
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class MMOE(keras.Model):
def __init__(self, num_tasks, num_experts, expert_hidden_units, task_hidden_units, feat_vocab,
embedding_size):
super().__init__()
self.embedding_layer = {feat: keras.layers.Embedding(vocab_size, embedding_size) for feat, vocab_size in
feat_vocab.items()}
self.num_tasks = num_tasks
self.experts = [MLP(units=expert_hidden_units, last_activation='relu') for _ in range(num_experts)]
self.gates = [layers.Dense(num_experts, activation='softmax', use_bias=True) for _ in range(num_tasks)]
self.task_towers = [MLP(units=task_hidden_units, last_activation='sigmoid') for _ in range(num_tasks)]
def compute_embedding(self, inputs):
embedding = [self.embedding_layer[feat](inputs[feat]) for feat in inputs]
embedding = tf.concat(embedding, axis=-1)
embedding = tf.squeeze(embedding, axis=1)
return embedding
def call(self, inputs, training=None, mask=None):
inputs = self.compute_embedding(inputs)
experts_outputs = []
for expert in self.experts:
expert_output = expert(inputs)
expert_output = tf.expand_dims(expert_output, axis=0)
experts_outputs.append(expert_output)
experts_outputs = tf.concat(experts_outputs, axis=0) # num_experts,None,expert_hidden_units[-1]
experts_outputs = tf.transpose(experts_outputs, perm=(1, 0, 2)) # None,num_experts,expert_hidden_units[-1]
outputs = []
for i in range(self.num_tasks):
gate = self.gates[i]
task_tower = self.task_towers[i]
gate_weights = gate(inputs) # None,num_experts
gate_weights = tf.expand_dims(gate_weights, axis=1) # None,1,num_experts
weighted_outputs = tf.matmul(gate_weights, experts_outputs) # None,1,expert_hidden_units[-1]
weighted_outputs = tf.squeeze(weighted_outputs, axis=1)
task_output = task_tower(weighted_outputs)
outputs.append(task_output)
outputs[1] = outputs[0] * outputs[1]
outputs = tf.concat(outputs, axis=1)
return outputs
def compute_cvr(self, inputs):
inputs = self.compute_embedding(inputs)
experts_outputs = []
for expert in self.experts:
expert_output = expert(inputs)
expert_output = tf.expand_dims(expert_output, axis=0)
experts_outputs.append(expert_output)
experts_outputs = tf.concat(experts_outputs, axis=0) # num_experts,None,expert_hidden_units[-1]
experts_outputs = tf.transpose(experts_outputs, perm=(1, 0, 2)) # None,num_experts,expert_hidden_units[-1]
outputs = []
for i in range(self.num_tasks):
gate = self.gates[i]
task_tower = self.task_towers[i]
gate_weights = gate(inputs) # None,num_experts
gate_weights = tf.expand_dims(gate_weights, axis=1) # None,1,num_experts
weighted_outputs = tf.matmul(gate_weights, experts_outputs) # None,1,expert_hidden_units[-1]
weighted_outputs = tf.squeeze(weighted_outputs, axis=1)
task_output = task_tower(weighted_outputs)
outputs.append(task_output)
return outputs[1]
def compute_ctr(self, inputs):
inputs = self.compute_embedding(inputs)
experts_outputs = []
for expert in self.experts:
expert_output = expert(inputs)
expert_output = tf.expand_dims(expert_output, axis=0)
experts_outputs.append(expert_output)
experts_outputs = tf.concat(experts_outputs, axis=0) # num_experts,None,expert_hidden_units[-1]
experts_outputs = tf.transpose(experts_outputs, perm=(1, 0, 2)) # None,num_experts,expert_hidden_units[-1]
outputs = []
for i in range(self.num_tasks):
gate = self.gates[i]
task_tower = self.task_towers[i]
gate_weights = gate(inputs) # None,num_experts
gate_weights = tf.expand_dims(gate_weights, axis=1) # None,1,num_experts
weighted_outputs = tf.matmul(gate_weights, experts_outputs) # None,1,expert_hidden_units[-1]
weighted_outputs = tf.squeeze(weighted_outputs, axis=1)
task_output = task_tower(weighted_outputs)
outputs.append(task_output)
return outputs[0]
def compute_ctcvr(self, inputs):
inputs = self.compute_embedding(inputs)
experts_outputs = []
for expert in self.experts:
expert_output = expert(inputs)
expert_output = tf.expand_dims(expert_output, axis=0)
experts_outputs.append(expert_output)
experts_outputs = tf.concat(experts_outputs, axis=0) # num_experts,None,expert_hidden_units[-1]
experts_outputs = tf.transpose(experts_outputs, perm=(1, 0, 2)) # None,num_experts,expert_hidden_units[-1]
outputs = []
for i in range(self.num_tasks):
gate = self.gates[i]
task_tower = self.task_towers[i]
gate_weights = gate(inputs) # None,num_experts
gate_weights = tf.expand_dims(gate_weights, axis=1) # None,1,num_experts
weighted_outputs = tf.matmul(gate_weights, experts_outputs) # None,1,expert_hidden_units[-1]
weighted_outputs = tf.squeeze(weighted_outputs, axis=1)
task_output = task_tower(weighted_outputs)
outputs.append(task_output)
return outputs[0] * outputs[1]
|
import React, { Component } from 'react';
class TestForm extends Component {
constructor(){
super();
this.state = {
currentInput : "",
currentOutput : ""
}
}
clickHandler(e){
this.props.addTest(this.state.currentInput, this.state.currentOutput);
this.setState({'currentOutput' : "", 'currentInput' : ""});
}
onInputChange(e){
e.preventDefault();
this.setState({
'currentInput' : e.target.value
});
}
onOutputChange(e){
e.preventDefault();
this.setState({
'currentOutput' : e.target.value
});
}
render() {
return (
<div className="TestForm">
<label>Tests</label>
<br/>
<label>Input</label>:<label>Output</label>
{this.props.tests.map( test =>{
return (<div>
<input type='text' readonly value = {test.input}/> :
<input type='text' readonly value = {test.output}/>
</div>);
})
}
<input type='text' value={this.state.currentInput} onChange={this.onInputChange.bind(this)}/>
<input type='text' value={this.state.currentOutput} onChange={this.onOutputChange.bind(this)}/>
<input type='button' onClick={this.clickHandler.bind(this)} value="Add Test"/>
</div>
);
}
}
export default TestForm;
|
# -*- coding: utf-8 -*-
# Author: Jiajun Ren <jiajunren0522@gmail.com>
import logging
from renormalizer.mps import Mpo, Mps, MpDm, gs, ThermalProp
from renormalizer.spectra.base import SpectraTdMpsJobBase
from renormalizer.mps.mps import BraKetPair
from renormalizer.utils import Quantity, OptimizeConfig
logger = logging.getLogger(__name__)
class SpectraExact(SpectraTdMpsJobBase):
"""
0T emission spectra exact propagator
the bra part e^iEt is negected to reduce the osillation
and
for single molecule, the EX space propagator e^iHt is local, and so exact
GS/EXshift is the ground/excited state space energy shift
the aim is to reduce the oscillation of the correlation fucntion
support:
all cases: 0Temi
1mol case: 0Temi, TTemi, 0Tabs, TTabs
"""
def __init__(
self,
model,
spectratype,
temperature=Quantity(0, "K"),
optimize_config=None,
offset=Quantity(0),
ex_shift=0,
gs_shift=0,
):
# != 0 cases not tested
assert ex_shift == gs_shift == 0
assert temperature == 0
if spectratype == "emi":
self.space1 = "EX"
self.space2 = "GS"
self.shift1 = ex_shift
self.shift2 = gs_shift
if temperature != 0:
assert len(model) == 1
else:
assert len(model) == 1
self.space1 = "GS"
self.space2 = "EX"
self.shift1 = gs_shift
self.shift2 = ex_shift
if optimize_config is None:
optimize_config = OptimizeConfig()
self.optimize_config = optimize_config
super(SpectraExact, self).__init__(
model, spectratype, temperature, offset=offset
)
self.i_mps = self.latest_mps.ket_mps
self.e_mean = self.i_mps.expectation(self.h_mpo)
def init_mps(self):
mmax = self.optimize_config.procedure[0][0]
i_mps = Mps.random(self.h_mpo.model, self.nexciton, mmax, 1)
i_mps.optimize_config = self.optimize_config
energy, i_mps = gs.optimize_mps(i_mps, self.h_mpo)
if self.spectratype == "emi":
operator = "a"
else:
operator = r"a^\dagger"
dipole_mpo = Mpo.onsite(self.model, operator, dipole=True)
if self.temperature != 0:
beta = self.temperature.to_beta()
# print "beta=", beta
# thermal_mpo = Mpo.exact_propagator(self.model, -beta / 2.0, space=self.space1, shift=self.shift1)
# ket_mps = thermal_mpo.apply(i_mps)
# ket_mps.normalize()
# no test, don't know work or not
i_mpdm = MpDm.from_mps(i_mps)
tp = ThermalProp(i_mpdm, exact=True, space=self.space1)
tp.evolve(None, 1, beta / 2j)
ket_mps = tp.latest_mps
else:
ket_mps = i_mps
a_ket_mps = dipole_mpo.apply(ket_mps, canonicalise=True)
a_ket_mps.canonical_normalize()
if self.temperature != 0:
a_bra_mps = ket_mps.copy()
else:
a_bra_mps = a_ket_mps.copy()
return BraKetPair(a_bra_mps, a_ket_mps)
def evolve_single_step(self, evolve_dt):
latest_bra_mps, latest_ket_mps = self.latest_mps
latest_ket_mps = latest_ket_mps.evolve_exact(self.h_mpo, evolve_dt, self.space2)
if self.temperature != 0:
latest_bra_mps = latest_bra_mps.evolve_exact(
self.h_mpo, evolve_dt, self.space1
)
return BraKetPair(latest_bra_mps, latest_ket_mps)
|
import React from 'react';
import { Modal } from '../src/Overlay';
import { Bounce } from '../src/Animation';
class ModalDemo extends React.Component {
constructor(props, context) {
super(props, context);
this.handleOpenModal = this.handleOpenModal.bind(this);
this.handleHide = this.handleHide.bind(this);
this.handleChangeBackdrop = this.handleChangeBackdrop.bind(this);
this.state = {
show: false,
backdrop: true
};
}
handleOpenModal() {
this.setState({
show: true
});
}
handleChangeBackdrop(event) {
const backdropValues = [true, false, 'static'];
this.setState({
backdrop: backdropValues[event.target.value]
});
}
handleHide(e) {
if (e && e.target !== e.currentTarget) {
return;
}
if (this.state.backdrop !== true) {
return;
}
this.setState({
show: false
});
}
render() {
const { backdrop, show } = this.state;
return (
<div className="row">
<h2>Modal</h2>
<button onClick={this.handleOpenModal}>open modal</button>
<select onChange={this.handleChangeBackdrop} defaultValue={0}>
<option value={0}>true</option>
<option value={1}>false</option>
<option value={2}>static</option>
</select>
<Modal
backdrop={backdrop}
transition={Bounce}
backdropClassName={'modal-backdrop'}
containerClassName={'modal-open'}
onHide={this.handleHide}
show={show}
>
<div
className="modal"
style={{
display: this.state.show ? 'block' : 'none'
}}
onClick={this.handleHide}
>
<div className="modal-dialog">
<div className="modal-content">
<div className="modal-header">Header</div>
<div className="modal-body">Body</div>
<div className="modal-footer">Footer</div>
</div>
</div>
</div>
</Modal>
</div>
);
}
}
export default ModalDemo;
|
from collections import namedtuple
class Error(namedtuple('Error', ['maybe_id', 'maybe_links', 'maybe_status',
'maybe_code', 'maybe_title', 'maybe_detail',
'maybe_source', 'maybe_meta'])):
"""Representation of a single error from a list of errors in "errors"."""
__slots__ = ()
def __new__(cls, maybe_id=None, maybe_links=None, maybe_status=None,
maybe_code=None, maybe_title=None, maybe_detail=None,
maybe_source=None, maybe_meta=None):
return super(Error, cls).__new__(cls, maybe_id, maybe_links,
maybe_status, maybe_code, maybe_title,
maybe_detail, maybe_source, maybe_meta)
class Errors(namedtuple('Errors', ['list_errors'])):
"""Representation of the top-level "errors" section of a response."""
__slots__ = ()
def __new__(cls, list_errors):
return super(Errors, cls).__new__(cls, list_errors)
def mk_single(obj, config):
maybe_id = obj.get( 'id', None)
maybe_links = obj.get( 'links', None)
maybe_status = obj.get('status', None)
maybe_code = obj.get( 'code', None)
maybe_title = obj.get( 'title', None)
maybe_detail = obj.get('detail', None)
maybe_source = obj.get('source', None)
maybe_meta = obj.get( 'meta', None)
return Error(maybe_id, maybe_links, maybe_status, maybe_code,
maybe_title, maybe_detail, maybe_source, maybe_meta)
def mk(obj, config):
list_errors = [mk_single(obj_error, config) for obj_error in obj]
return Errors(list_errors)
|
/**
* EasyUI for jQuery 1.8.0
*
* Copyright (c) 2009-2019 www.jeasyui.com. All rights reserved.
*
* Licensed under the freeware license: http://www.jeasyui.com/license_freeware.php
* To use it on other terms please contact us: info@jeasyui.com
*
*/
(function($){
var _1=1;
function _2(_3){
$(_3).addClass("sidemenu");
};
function _4(_5,_6){
var _7=$(_5).sidemenu("options");
if(_6){
$.extend(_7,{width:_6.width,height:_6.height});
}
$(_5)._size(_7);
$(_5).find(".accordion").accordion("resize");
};
function _8(_9,_a,_b){
var _c=$(_9).sidemenu("options");
var tt=$("<ul class=\"sidemenu-tree\"></ul>").appendTo(_a);
tt.tree({data:_b,animate:_c.animate,onBeforeSelect:function(_d){
if(_d.children){
return false;
}
},onSelect:function(_e){
_12(_9,_e.id,true);
},onExpand:function(_f){
_25(_9,_f);
},onCollapse:function(_10){
_25(_9,_10);
},onClick:function(_11){
if(_11.children){
if(_11.state=="open"){
$(_11.target).addClass("tree-node-nonleaf-collapsed");
}else{
$(_11.target).removeClass("tree-node-nonleaf-collapsed");
}
$(this).tree("toggle",_11.target);
}
}});
tt.unbind(".sidemenu").bind("mouseleave.sidemenu",function(){
$(_a).trigger("mouseleave");
});
_12(_9,_c.selectedItemId);
};
function _13(_14,_15,_16){
var _17=$(_14).sidemenu("options");
$(_15).tooltip({content:$("<div></div>"),position:_17.floatMenuPosition,valign:"top",data:_16,onUpdate:function(_18){
var _19=$(this).tooltip("options");
var _1a=_19.data;
_18.accordion({width:_17.floatMenuWidth,multiple:false}).accordion("add",{title:_1a.text,collapsed:false,collapsible:false});
_8(_14,_18.accordion("panels")[0],_1a.children);
},onShow:function(){
var t=$(this);
var tip=t.tooltip("tip").addClass("sidemenu-tooltip");
tip.children(".tooltip-content").addClass("sidemenu");
tip.find(".accordion").accordion("resize");
tip.add(tip.find("ul.tree")).unbind(".sidemenu").bind("mouseover.sidemenu",function(){
t.tooltip("show");
}).bind("mouseleave.sidemenu",function(){
t.tooltip("hide");
});
t.tooltip("reposition");
},onPosition:function(_1b,top){
var tip=$(this).tooltip("tip");
if(!_17.collapsed){
tip.css({left:-999999});
}else{
if(top+tip.outerHeight()>$(window)._outerHeight()+$(document).scrollTop()){
top=$(window)._outerHeight()+$(document).scrollTop()-tip.outerHeight();
tip.css("top",top);
}
}
}});
};
function _1c(_1d,_1e){
$(_1d).find(".sidemenu-tree").each(function(){
_1e($(this));
});
$(_1d).find(".tooltip-f").each(function(){
var tip=$(this).tooltip("tip");
if(tip){
tip.find(".sidemenu-tree").each(function(){
_1e($(this));
});
$(this).tooltip("reposition");
}
});
};
function _12(_1f,_20,_21){
var _22=null;
var _23=$(_1f).sidemenu("options");
_1c(_1f,function(t){
t.find("div.tree-node-selected").removeClass("tree-node-selected");
var _24=t.tree("find",_20);
if(_24){
$(_24.target).addClass("tree-node-selected");
_23.selectedItemId=_24.id;
t.trigger("mouseleave.sidemenu");
_22=_24;
}
});
if(_21&&_22){
_23.onSelect.call(_1f,_22);
}
};
function _25(_26,_27){
_1c(_26,function(t){
var _28=t.tree("find",_27.id);
if(_28){
var _29=t.tree("options");
var _2a=_29.animate;
_29.animate=false;
t.tree(_27.state=="open"?"expand":"collapse",_28.target);
_29.animate=_2a;
}
});
};
function _2b(_2c){
var _2d=$(_2c).sidemenu("options");
$(_2c).empty();
if(_2d.data){
$.easyui.forEach(_2d.data,true,function(_2e){
if(!_2e.id){
_2e.id="_easyui_sidemenu_"+(_1++);
}
if(!_2e.iconCls){
_2e.iconCls="sidemenu-default-icon";
}
if(_2e.children){
_2e.nodeCls="tree-node-nonleaf";
if(!_2e.state){
_2e.state="closed";
}
if(_2e.state=="open"){
_2e.nodeCls="tree-node-nonleaf";
}else{
_2e.nodeCls="tree-node-nonleaf tree-node-nonleaf-collapsed";
}
}
});
var acc=$("<div></div>").appendTo(_2c);
acc.accordion({fit:_2d.height=="auto"?false:true,border:_2d.border,multiple:_2d.multiple});
var _2f=_2d.data;
for(var i=0;i<_2f.length;i++){
acc.accordion("add",{title:_2f[i].text,selected:_2f[i].state=="open",iconCls:_2f[i].iconCls,onBeforeExpand:function(){
return !_2d.collapsed;
}});
var ap=acc.accordion("panels")[i];
_8(_2c,ap,_2f[i].children);
_13(_2c,ap.panel("header"),_2f[i]);
}
}
};
function _30(_31,_32){
var _33=$(_31).sidemenu("options");
_33.collapsed=_32;
var acc=$(_31).find(".accordion");
var _34=acc.accordion("panels");
acc.accordion("options").animate=false;
if(_33.collapsed){
$(_31).addClass("sidemenu-collapsed");
for(var i=0;i<_34.length;i++){
var _35=_34[i];
if(_35.panel("options").collapsed){
_33.data[i].state="closed";
}else{
_33.data[i].state="open";
acc.accordion("unselect",i);
}
var _36=_35.panel("header");
_36.find(".panel-title").html("");
_36.find(".panel-tool").hide();
}
}else{
$(_31).removeClass("sidemenu-collapsed");
for(var i=0;i<_34.length;i++){
var _35=_34[i];
if(_33.data[i].state=="open"){
acc.accordion("select",i);
}
var _36=_35.panel("header");
_36.find(".panel-title").html(_35.panel("options").title);
_36.find(".panel-tool").show();
}
}
acc.accordion("options").animate=_33.animate;
};
function _37(_38){
$(_38).find(".tooltip-f").each(function(){
$(this).tooltip("destroy");
});
$(_38).remove();
};
$.fn.sidemenu=function(_39,_3a){
if(typeof _39=="string"){
var _3b=$.fn.sidemenu.methods[_39];
return _3b(this,_3a);
}
_39=_39||{};
return this.each(function(){
var _3c=$.data(this,"sidemenu");
if(_3c){
$.extend(_3c.options,_39);
}else{
_3c=$.data(this,"sidemenu",{options:$.extend({},$.fn.sidemenu.defaults,$.fn.sidemenu.parseOptions(this),_39)});
_2(this);
}
_4(this);
_2b(this);
_30(this,_3c.options.collapsed);
});
};
$.fn.sidemenu.methods={options:function(jq){
return jq.data("sidemenu").options;
},resize:function(jq,_3d){
return jq.each(function(){
_4(this,_3d);
});
},collapse:function(jq){
return jq.each(function(){
_30(this,true);
});
},expand:function(jq){
return jq.each(function(){
_30(this,false);
});
},destroy:function(jq){
return jq.each(function(){
_37(this);
});
}};
$.fn.sidemenu.parseOptions=function(_3e){
var t=$(_3e);
return $.extend({},$.parser.parseOptions(_3e,["width","height"]));
};
$.fn.sidemenu.defaults={width:200,height:"auto",border:true,animate:true,multiple:true,collapsed:false,data:null,floatMenuWidth:200,floatMenuPosition:"right",onSelect:function(_3f){
}};
})(jQuery);
|
import connectToggle from '../connectors/connectToggle.js';
import ToggleComponent from '../components/Toggle.js';
/**
* The Toggle provides an on/off filtering feature based on an attribute value. Note that if you provide an “off” option, it will be refined at initialization.
* @name Toggle
* @kind widget
* @requirements To use this widget, you'll need an attribute to toggle on.
*
* You can't toggle on null or not-null values. If you want to address this particular use-case you'll need to compute an
* extra boolean attribute saying if the value exists or not. See this [thread](https://discourse.algolia.com/t/how-to-create-a-toggle-for-the-absence-of-a-string-attribute/2460) for more details.
*
* @propType {string} attributeName - Name of the attribute on which to apply the `value` refinement. Required when `value` is present.
* @propType {string} label - Label for the toggle.
* @propType {any} value - Value of the refinement to apply on `attributeName` when checked.
* @propType {boolean} [defaultRefinement=false] - Default state of the widget. Should the toggle be checked by default?
* @themeKey ais-Toggle__root - the root of the component
* @themeKey ais-Toggle__checkbox - the toggle checkbox
* @themeKey ais-Toggle__label - the toggle label
* @example
* import React from 'react';
*
* import { Toggle, InstantSearch } from 'react-instantsearch/dom';
*
* export default function App() {
* return (
* <InstantSearch
* appId="latency"
* apiKey="6be0576ff61c053d5f9a3225e2a90f76"
* indexName="ikea"
* >
* <Toggle
* attributeName="materials"
* label="Made with solid pine"
* value={'Solid pine'}
* />
* </InstantSearch>
* );
* }
*/
export default connectToggle(ToggleComponent);
|
# Generated by Django 2.1.4 on 2019-03-18 14:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0092_auto_20190305_1358'),
]
operations = [
migrations.AlterField(
model_name='message',
name='message_type',
field=models.CharField(choices=[('ERROR', 'Error'), ('WARNING', 'Warning'), ('OTHER', 'Other'), ('DEBUG', 'Debug')], default='OTHER', max_length=7),
),
]
|
# Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import validators
from neutron.api.v2 import attributes as attrs
from neutron.common import utils
from neutron.db import _resource_extend as resource_extend
from neutron.db import portsecurity_db_common
from neutron.extensions import portsecurity as psec
class PortSecurityDbMixin(portsecurity_db_common.PortSecurityDbCommon):
resource_extend.register_funcs(
attrs.NETWORKS, ['_extend_port_security_dict'])
resource_extend.register_funcs(
attrs.PORTS, ['_extend_port_security_dict'])
def _extend_port_security_dict(self, response_data, db_data):
if ('port-security' in
getattr(self, 'supported_extension_aliases', [])):
super(PortSecurityDbMixin, self)._extend_port_security_dict(
response_data, db_data)
def _determine_port_security_and_has_ip(self, context, port):
"""Returns a tuple of booleans (port_security_enabled, has_ip).
Port_security is the value associated with the port if one is present
otherwise the value associated with the network is returned. has_ip is
if the port is associated with an ip or not.
"""
has_ip = self._ip_on_port(port)
# we don't apply security groups for dhcp, router
if port.get('device_owner') and utils.is_port_trusted(port):
return (False, has_ip)
if validators.is_attr_set(port.get(psec.PORTSECURITY)):
port_security_enabled = port[psec.PORTSECURITY]
# If port has an ip and security_groups are passed in
# conveniently set port_security_enabled to true this way
# user doesn't also have to pass in port_security_enabled=True
# when creating ports.
elif has_ip and validators.is_attr_set(port.get('security_groups')):
port_security_enabled = True
else:
port_security_enabled = self._get_network_security_binding(
context, port['network_id'])
return (port_security_enabled, has_ip)
def _ip_on_port(self, port):
return bool(port.get('fixed_ips'))
|
/********************************************************************
* Copyright (C) 2013-2014 Texas Instruments Incorporated.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the
* distribution.
*
* Neither the name of Texas Instruments Incorporated nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CSLR_MMU_H
#define CSLR_MMU_H
#ifdef __cplusplus
extern "C"
{
#endif
#include <ti/csl/cslr.h>
/**************************************************************************
* Register Overlay Structure for MMU_BLOCK
**************************************************************************/
typedef struct {
volatile Uint32 REVISION;
volatile Uint8 RSVD0[12];
volatile Uint32 SYSCONFIG;
volatile Uint32 SYSSTS;
volatile Uint32 IRQSTS;
volatile Uint32 IRQEN;
volatile Uint8 RSVD1[32];
volatile Uint32 WALKING_ST;
volatile Uint32 CNTL;
volatile Uint32 FAULT_AD;
volatile Uint32 TTB;
volatile Uint32 LOCK;
volatile Uint32 LD_TLB;
volatile Uint32 CAM;
volatile Uint32 RAM;
volatile Uint32 GFLUSH;
volatile Uint32 FLUSH_ENTRY;
volatile Uint32 READ_CAM;
volatile Uint32 READ_RAM;
volatile Uint32 EMU_FAULT_AD;
volatile Uint32 TTB_MSB;
volatile Uint32 RAM_MSB;
volatile Uint32 READ_RAM_MSB;
volatile Uint32 FAULT_PC;
volatile Uint32 FAULT_STS;
volatile Uint32 GPR;
volatile Uint8 RSVD2[4];
volatile Uint32 BYPASS_REGION1_ADDR;
volatile Uint32 BYPASS_REGION1_SIZE;
volatile Uint32 BYPASS_REGION2_ADDR;
volatile Uint32 BYPASS_REGION2_SIZE;
volatile Uint32 BYPASS_REGION3_ADDR;
volatile Uint32 BYPASS_REGION3_SIZE;
volatile Uint32 BYPASS_REGION4_ADDR;
volatile Uint32 BYPASS_REGION4_SIZE;
} CSL_mmuRegs;
/**************************************************************************
* Register Macros
**************************************************************************/
#define CSL_MMU_REVISION (0x0U)
#define CSL_MMU_SYSCONFIG (0x10U)
/* CSL Aliased modification for PRCM compatibility */
#define MMU_SYSCONFIG (CSL_MMU_SYSCONFIG)
#define CSL_MMU_SYSSTS (0x14U)
#define CSL_MMU_IRQSTS (0x18U)
#define CSL_MMU_IRQEN (0x1CU)
#define CSL_MMU_WALKING_ST (0x40U)
#define CSL_MMU_CNTL (0x44U)
#define CSL_MMU_FAULT_AD (0x48U)
#define CSL_MMU_TTB (0x4CU)
#define CSL_MMU_LOCK (0x50U)
#define CSL_MMU_LD_TLB (0x54U)
#define CSL_MMU_CAM (0x58U)
#define CSL_MMU_RAM (0x5CU)
#define CSL_MMU_GFLUSH (0x60U)
#define CSL_MMU_FLUSH_ENTRY (0x64U)
#define CSL_MMU_READ_CAM (0x68U)
#define CSL_MMU_READ_RAM (0x6CU)
#define CSL_MMU_EMU_FAULT_AD (0x70U)
#define CSL_MMU_TTB_MSB (0x74U)
#define CSL_MMU_RAM_MSB (0x78U)
#define CSL_MMU_READ_RAM_MSB (0x7CU)
#define CSL_MMU_BYPASS_REGION1_ADDR (0x90U)
#define CSL_MMU_BYPASS_REGION1_SIZE (0x94U)
#define CSL_MMU_BYPASS_REGION2_ADDR (0x98U)
#define CSL_MMU_BYPASS_REGION2_SIZE (0x9CU)
#define CSL_MMU_BYPASS_REGION3_ADDR (0xA0U)
#define CSL_MMU_BYPASS_REGION3_SIZE (0xA4U)
#define CSL_MMU_BYPASS_REGION4_ADDR (0xA8U)
#define CSL_MMU_BYPASS_REGION4_SIZE (0xACU)
#define CSL_MMU_FAULT_PC (0x80U)
#define CSL_MMU_FAULT_STS (0x84U)
#define CSL_MMU_GPR (0x88U)
/**************************************************************************
* Field Definition Macros
**************************************************************************/
/* REVISION */
#define CSL_MMU_REVISION_REV_MAJ_MASK (0x000000F0U)
#define CSL_MMU_REVISION_REV_MAJ_SHIFT (0x00000004U)
#define CSL_MMU_REVISION_REV_MAJ_RESETVAL (0x00000003U)
#define CSL_MMU_REVISION_REV_MAJ_MAX (0x0000000fU)
#define CSL_MMU_REVISION_REV_MIN_MASK (0x0000000FU)
#define CSL_MMU_REVISION_REV_MIN_SHIFT (0x00000000U)
#define CSL_MMU_REVISION_REV_MIN_RESETVAL (0x00000000U)
#define CSL_MMU_REVISION_REV_MIN_MAX (0x0000000fU)
#define CSL_MMU_REVISION_RESETVAL (0x00000030U)
/* SYSCONFIG */
#define CSL_MMU_SYSCONFIG_CLOCKACTIVITY_MASK (0x00000300U)
#define CSL_MMU_SYSCONFIG_CLOCKACTIVITY_SHIFT (0x00000008U)
#define CSL_MMU_SYSCONFIG_CLOCKACTIVITY_RESETVAL (0x00000000U)
#define CSL_MMU_SYSCONFIG_CLOCKACTIVITY_MAX (0x00000003U)
/* CSL Aliased modification for PRCM compatibility */
#define MMU_SYSCONFIG_CLOCKACTIVITY_SHIFT (CSL_MMU_SYSCONFIG_CLOCKACTIVITY_SHIFT)
#define MMU_SYSCONFIG_CLOCKACTIVITY_MASK (CSL_MMU_SYSCONFIG_CLOCKACTIVITY_MASK)
#define CSL_MMU_SYSCONFIG_IDLEMODE_MASK (0x00000018U)
#define CSL_MMU_SYSCONFIG_IDLEMODE_SHIFT (0x00000003U)
#define CSL_MMU_SYSCONFIG_IDLEMODE_RESETVAL (0x00000000U)
#define CSL_MMU_SYSCONFIG_IDLEMODE_SFIDLE (0x00000000U)
#define CSL_MMU_SYSCONFIG_IDLEMODE_SNIDLE (0x00000001U)
#define CSL_MMU_SYSCONFIG_IDLEMODE_SSIDLE (0x00000002U)
#define CSL_MMU_SYSCONFIG_IDLEMODE_RES (0x00000003U)
/* CSL Aliased modification for PRCM compatibility */
#define MMU_SYSCONFIG_IDLEMODE_SHIFT (CSL_MMU_SYSCONFIG_IDLEMODE_SHIFT)
#define MMU_SYSCONFIG_IDLEMODE_MASK (CSL_MMU_SYSCONFIG_IDLEMODE_MASK)
#define MMU_SYSCONFIG_IDLEMODE_SFIDLE (CSL_MMU_SYSCONFIG_IDLEMODE_SFIDLE)
#define MMU_SYSCONFIG_IDLEMODE_SNIDLE (CSL_MMU_SYSCONFIG_IDLEMODE_SNIDLE)
#define MMU_SYSCONFIG_IDLEMODE_SSIDLE (CSL_MMU_SYSCONFIG_IDLEMODE_SSIDLE)
#define MMU_SYSCONFIG_IDLEMODE_RES (CSL_MMU_SYSCONFIG_IDLEMODE_RES)
#define CSL_MMU_SYSCONFIG_SOFTRESET_MASK (0x00000002U)
#define CSL_MMU_SYSCONFIG_SOFTRESET_SHIFT (0x00000001U)
#define CSL_MMU_SYSCONFIG_SOFTRESET_RESETVAL (0x00000000U)
#define CSL_MMU_SYSCONFIG_SOFTRESET_READ0 (0x00000000U)
#define CSL_MMU_SYSCONFIG_SOFTRESET_READ1 (0x00000001U)
#define CSL_MMU_SYSCONFIG_SOFTRESET_WRITE0 (0x00000000U)
#define CSL_MMU_SYSCONFIG_SOFTRESET_WRITE1 (0x00000001U)
#define CSL_MMU_SYSCONFIG_AUTOIDLE_MASK (0x00000001U)
#define CSL_MMU_SYSCONFIG_AUTOIDLE_SHIFT (0x00000000U)
#define CSL_MMU_SYSCONFIG_AUTOIDLE_RESETVAL (0x00000000U)
#define CSL_MMU_SYSCONFIG_AUTOIDLE_CLKFREE (0x00000000U)
#define CSL_MMU_SYSCONFIG_AUTOIDLE_AUTOCLKGATE (0x00000001U)
/* CSL Aliased modification for PRCM compatibility */
#define MMU_SYSCONFIG_AUTOIDLE_SHIFT (CSL_MMU_SYSCONFIG_AUTOIDLE_SHIFT)
#define MMU_SYSCONFIG_AUTOIDLE_MASK (CSL_MMU_SYSCONFIG_AUTOIDLE_MASK)
#define MMU_SYSCONFIG_AUTOIDLE_CLKFREE (CSL_MMU_SYSCONFIG_AUTOIDLE_CLKFREE)
#define MMU_SYSCONFIG_AUTOIDLE_AUTOCLKGATE (CSL_MMU_SYSCONFIG_AUTOIDLE_AUTOCLKGATE)
#define CSL_MMU_SYSCONFIG_RESETVAL (0x00000000U)
/* SYSSTS */
#define CSL_MMU_SYSSTS_RESETDONE_MASK (0x00000001U)
#define CSL_MMU_SYSSTS_RESETDONE_SHIFT (0x00000000U)
#define CSL_MMU_SYSSTS_RESETDONE_RESETVAL (0x00000000U)
#define CSL_MMU_SYSSTS_RESETDONE_RSTONGOING (0x00000000U)
#define CSL_MMU_SYSSTS_RESETDONE_RSTCOMP (0x00000001U)
#define CSL_MMU_SYSSTS_RESETVAL (0x00000000U)
/* IRQSTS */
#define CSL_MMU_IRQSTS_MULTIHITFAULT_MASK (0x00000010U)
#define CSL_MMU_IRQSTS_MULTIHITFAULT_SHIFT (0x00000004U)
#define CSL_MMU_IRQSTS_MULTIHITFAULT_RESETVAL (0x00000000U)
#define CSL_MMU_IRQSTS_MULTIHITFAULT_READ0 (0x00000000U)
#define CSL_MMU_IRQSTS_MULTIHITFAULT_READ1 (0x00000001U)
#define CSL_MMU_IRQSTS_MULTIHITFAULT_WRITE0 (0x00000000U)
#define CSL_MMU_IRQSTS_MULTIHITFAULT_WRITE1 (0x00000001U)
#define CSL_MMU_IRQSTS_TBLWALKFAULT_MASK (0x00000008U)
#define CSL_MMU_IRQSTS_TBLWALKFAULT_SHIFT (0x00000003U)
#define CSL_MMU_IRQSTS_TBLWALKFAULT_RESETVAL (0x00000000U)
#define CSL_MMU_IRQSTS_TBLWALKFAULT_READ0 (0x00000000U)
#define CSL_MMU_IRQSTS_TBLWALKFAULT_READ1 (0x00000001U)
#define CSL_MMU_IRQSTS_TBLWALKFAULT_WRITE0 (0x00000000U)
#define CSL_MMU_IRQSTS_TBLWALKFAULT_WRITE1 (0x00000001U)
#define CSL_MMU_IRQSTS_EMUMISS_MASK (0x00000004U)
#define CSL_MMU_IRQSTS_EMUMISS_SHIFT (0x00000002U)
#define CSL_MMU_IRQSTS_EMUMISS_RESETVAL (0x00000000U)
#define CSL_MMU_IRQSTS_EMUMISS_READ0 (0x00000000U)
#define CSL_MMU_IRQSTS_EMUMISS_READ1 (0x00000001U)
#define CSL_MMU_IRQSTS_EMUMISS_WRITE0 (0x00000000U)
#define CSL_MMU_IRQSTS_EMUMISS_WRITE1 (0x00000001U)
#define CSL_MMU_IRQSTS_TRANSLATIONFAULT_MASK (0x00000002U)
#define CSL_MMU_IRQSTS_TRANSLATIONFAULT_SHIFT (0x00000001U)
#define CSL_MMU_IRQSTS_TRANSLATIONFAULT_RESETVAL (0x00000000U)
#define CSL_MMU_IRQSTS_TRANSLATIONFAULT_READ0 (0x00000000U)
#define CSL_MMU_IRQSTS_TRANSLATIONFAULT_READ1 (0x00000001U)
#define CSL_MMU_IRQSTS_TRANSLATIONFAULT_WRITE0 (0x00000000U)
#define CSL_MMU_IRQSTS_TRANSLATIONFAULT_WRITE1 (0x00000001U)
#define CSL_MMU_IRQSTS_TLBMISS_MASK (0x00000001U)
#define CSL_MMU_IRQSTS_TLBMISS_SHIFT (0x00000000U)
#define CSL_MMU_IRQSTS_TLBMISS_RESETVAL (0x00000000U)
#define CSL_MMU_IRQSTS_TLBMISS_READ0 (0x00000000U)
#define CSL_MMU_IRQSTS_TLBMISS_READ1 (0x00000001U)
#define CSL_MMU_IRQSTS_TLBMISS_WRITE0 (0x00000000U)
#define CSL_MMU_IRQSTS_TLBMISS_WRITE1 (0x00000001U)
#define CSL_MMU_IRQSTS_RESETVAL (0x00000000U)
/* IRQEN */
#define CSL_MMU_IRQEN_MULTIHITFAULT_MASK (0x00000010U)
#define CSL_MMU_IRQEN_MULTIHITFAULT_SHIFT (0x00000004U)
#define CSL_MMU_IRQEN_MULTIHITFAULT_RESETVAL (0x00000000U)
#define CSL_MMU_IRQEN_MULTIHITFAULT_MHFLTMASK (0x00000000U)
#define CSL_MMU_IRQEN_MULTIHITFAULT_MHFLTGINT (0x00000001U)
#define CSL_MMU_IRQEN_TBLWALKFAULT_MASK (0x00000008U)
#define CSL_MMU_IRQEN_TBLWALKFAULT_SHIFT (0x00000003U)
#define CSL_MMU_IRQEN_TBLWALKFAULT_RESETVAL (0x00000000U)
#define CSL_MMU_IRQEN_TBLWALKFAULT_TWLFLTMASK (0x00000000U)
#define CSL_MMU_IRQEN_TBLWALKFAULT_TWLFLTGINT (0x00000001U)
#define CSL_MMU_IRQEN_EMUMISS_MASK (0x00000004U)
#define CSL_MMU_IRQEN_EMUMISS_SHIFT (0x00000002U)
#define CSL_MMU_IRQEN_EMUMISS_RESETVAL (0x00000000U)
#define CSL_MMU_IRQEN_EMUMISS_EMUMFLTMASK (0x00000000U)
#define CSL_MMU_IRQEN_EMUMISS_EMUMFLTGINT (0x00000001U)
#define CSL_MMU_IRQEN_TRANSLATIONFAULT_MASK (0x00000002U)
#define CSL_MMU_IRQEN_TRANSLATIONFAULT_SHIFT (0x00000001U)
#define CSL_MMU_IRQEN_TRANSLATIONFAULT_RESETVAL (0x00000000U)
#define CSL_MMU_IRQEN_TRANSLATIONFAULT_TRANFLTMASK (0x00000000U)
#define CSL_MMU_IRQEN_TRANSLATIONFAULT_TRANFLTGINT (0x00000001U)
#define CSL_MMU_IRQEN_TLBMISS_MASK (0x00000001U)
#define CSL_MMU_IRQEN_TLBMISS_SHIFT (0x00000000U)
#define CSL_MMU_IRQEN_TLBMISS_RESETVAL (0x00000000U)
#define CSL_MMU_IRQEN_TLBMISS_TRMISSINTM (0x00000000U)
#define CSL_MMU_IRQEN_TLBMISS_TRMISSGINT (0x00000001U)
#define CSL_MMU_IRQEN_RESETVAL (0x00000000U)
/* WALKING_ST */
#define CSL_MMU_WALKING_ST_TWLRUNNING_MASK (0x00000001U)
#define CSL_MMU_WALKING_ST_TWLRUNNING_SHIFT (0x00000000U)
#define CSL_MMU_WALKING_ST_TWLRUNNING_RESETVAL (0x00000000U)
#define CSL_MMU_WALKING_ST_TWLRUNNING_READ0 (0x00000000U)
#define CSL_MMU_WALKING_ST_TWLRUNNING_READ1 (0x00000001U)
#define CSL_MMU_WALKING_ST_RESETVAL (0x00000000U)
/* CNTL */
#define CSL_MMU_CNTL_EMUTLBUPDATE_MASK (0x00000008U)
#define CSL_MMU_CNTL_EMUTLBUPDATE_SHIFT (0x00000003U)
#define CSL_MMU_CNTL_EMUTLBUPDATE_RESETVAL (0x00000000U)
#define CSL_MMU_CNTL_EMUTLBUPDATE_EMUDIS (0x00000000U)
#define CSL_MMU_CNTL_EMUTLBUPDATE_EMUEN (0x00000001U)
#define CSL_MMU_CNTL_TWLEN_MASK (0x00000004U)
#define CSL_MMU_CNTL_TWLEN_SHIFT (0x00000002U)
#define CSL_MMU_CNTL_TWLEN_RESETVAL (0x00000000U)
#define CSL_MMU_CNTL_TWLEN_TWLEN (0x00000000U)
#define CSL_MMU_CNTL_TWLEN_MMUEN (0x00000001U)
#define CSL_MMU_CNTL_MMUEN_MASK (0x00000002U)
#define CSL_MMU_CNTL_MMUEN_SHIFT (0x00000001U)
#define CSL_MMU_CNTL_MMUEN_RESETVAL (0x00000000U)
#define CSL_MMU_CNTL_MMUEN_MMUDIS (0x00000000U)
#define CSL_MMU_CNTL_MMUEN_MMUEN (0x00000001U)
#define CSL_MMU_CNTL_RESETVAL (0x00000000U)
/* FAULT_AD */
#define CSL_MMU_FAULT_AD_FAULTADDR_MASK (0xFFFFFFFFU)
#define CSL_MMU_FAULT_AD_FAULTADDR_SHIFT (0x00000000U)
#define CSL_MMU_FAULT_AD_FAULTADDR_RESETVAL (0x00000000U)
#define CSL_MMU_FAULT_AD_FAULTADDR_MAX (0xffffffffU)
#define CSL_MMU_FAULT_AD_RESETVAL (0x00000000U)
/* TTB */
#define CSL_MMU_TTB_TTBADDR_MASK (0xFFFFFF80U)
#define CSL_MMU_TTB_TTBADDR_SHIFT (0x00000007U)
#define CSL_MMU_TTB_TTBADDR_RESETVAL (0x00000000U)
#define CSL_MMU_TTB_TTBADDR_MAX (0x01ffffffU)
#define CSL_MMU_TTB_RESETVAL (0x00000000U)
/* LOCK */
#define CSL_MMU_LOCK_BASEVALUE_MASK (0x00007C00U)
#define CSL_MMU_LOCK_BASEVALUE_SHIFT (0x0000000AU)
#define CSL_MMU_LOCK_BASEVALUE_RESETVAL (0x00000000U)
#define CSL_MMU_LOCK_BASEVALUE_MAX (0x0000001fU)
#define CSL_MMU_LOCK_CURRENTVICTIM_MASK (0x000001F0U)
#define CSL_MMU_LOCK_CURRENTVICTIM_SHIFT (0x00000004U)
#define CSL_MMU_LOCK_CURRENTVICTIM_RESETVAL (0x00000000U)
#define CSL_MMU_LOCK_CURRENTVICTIM_MAX (0x0000001fU)
#define CSL_MMU_LOCK_RESETVAL (0x00000000U)
/* LD_TLB */
#define CSL_MMU_LD_TLB_LDTLBITEM_MASK (0x00000001U)
#define CSL_MMU_LD_TLB_LDTLBITEM_SHIFT (0x00000000U)
#define CSL_MMU_LD_TLB_LDTLBITEM_RESETVAL (0x00000000U)
#define CSL_MMU_LD_TLB_LDTLBITEM_READ0 (0x00000000U)
#define CSL_MMU_LD_TLB_LDTLBITEM_READ1 (0x00000001U)
#define CSL_MMU_LD_TLB_LDTLBITEM_WRITE0 (0x00000000U)
#define CSL_MMU_LD_TLB_LDTLBITEM_WRITE1 (0x00000001U)
#define CSL_MMU_LD_TLB_RESETVAL (0x00000000U)
/* CAM */
#define CSL_MMU_CAM_VATAG_MASK (0xFFFFF000U)
#define CSL_MMU_CAM_VATAG_SHIFT (0x0000000CU)
#define CSL_MMU_CAM_VATAG_RESETVAL (0x00000000U)
#define CSL_MMU_CAM_VATAG_MAX (0x000fffffU)
#define CSL_MMU_CAM_P_MASK (0x00000008U)
#define CSL_MMU_CAM_P_SHIFT (0x00000003U)
#define CSL_MMU_CAM_P_RESETVAL (0x00000000U)
#define CSL_MMU_CAM_P_CANFLUSH (0x00000000U)
#define CSL_MMU_CAM_P_NOFLUSH (0x00000001U)
#define CSL_MMU_CAM_V_MASK (0x00000004U)
#define CSL_MMU_CAM_V_SHIFT (0x00000002U)
#define CSL_MMU_CAM_V_RESETVAL (0x00000000U)
#define CSL_MMU_CAM_V_INVALID (0x00000000U)
#define CSL_MMU_CAM_V_VALID (0x00000001U)
#define CSL_MMU_CAM_PAGESIZE_MASK (0x00000003U)
#define CSL_MMU_CAM_PAGESIZE_SHIFT (0x00000000U)
#define CSL_MMU_CAM_PAGESIZE_RESETVAL (0x00000000U)
#define CSL_MMU_CAM_PAGESIZE_SECTION (0x00000000U)
#define CSL_MMU_CAM_PAGESIZE_LARGE (0x00000001U)
#define CSL_MMU_CAM_PAGESIZE_SMALL (0x00000002U)
#define CSL_MMU_CAM_PAGESIZE_SUPER (0x00000003U)
#define CSL_MMU_CAM_RESETVAL (0x00000000U)
/* RAM */
#define CSL_MMU_RAM_PHYSICALADDR_MASK (0xFFFFF000U)
#define CSL_MMU_RAM_PHYSICALADDR_SHIFT (0x0000000CU)
#define CSL_MMU_RAM_PHYSICALADDR_RESETVAL (0x00000000U)
#define CSL_MMU_RAM_PHYSICALADDR_MAX (0x000fffffU)
#define CSL_MMU_RAM_RESETVAL (0x00000000U)
/* GFLUSH */
#define CSL_MMU_GFLUSH_GLOBALFLUSH_MASK (0x00000001U)
#define CSL_MMU_GFLUSH_GLOBALFLUSH_SHIFT (0x00000000U)
#define CSL_MMU_GFLUSH_GLOBALFLUSH_RESETVAL (0x00000000U)
#define CSL_MMU_GFLUSH_GLOBALFLUSH_READ0 (0x00000000U)
#define CSL_MMU_GFLUSH_GLOBALFLUSH_READ1 (0x00000001U)
#define CSL_MMU_GFLUSH_GLOBALFLUSH_WRITE0 (0x00000000U)
#define CSL_MMU_GFLUSH_GLOBALFLUSH_WRITE1 (0x00000001U)
#define CSL_MMU_GFLUSH_RESETVAL (0x00000000U)
/* FLUSH_ENTRY */
#define CSL_MMU_FLUSH_ENTRY_FLUSHENTRY_MASK (0x00000001U)
#define CSL_MMU_FLUSH_ENTRY_FLUSHENTRY_SHIFT (0x00000000U)
#define CSL_MMU_FLUSH_ENTRY_FLUSHENTRY_RESETVAL (0x00000000U)
#define CSL_MMU_FLUSH_ENTRY_FLUSHENTRY_READ0 (0x00000000U)
#define CSL_MMU_FLUSH_ENTRY_FLUSHENTRY_READ1 (0x00000001U)
#define CSL_MMU_FLUSH_ENTRY_FLUSHENTRY_WRITE0 (0x00000000U)
#define CSL_MMU_FLUSH_ENTRY_FLUSHENTRY_WRITE1 (0x00000001U)
#define CSL_MMU_FLUSH_ENTRY_RESETVAL (0x00000000U)
/* READ_CAM */
#define CSL_MMU_READ_CAM_VATAG_MASK (0xFFFFF000U)
#define CSL_MMU_READ_CAM_VATAG_SHIFT (0x0000000CU)
#define CSL_MMU_READ_CAM_VATAG_RESETVAL (0x00000000U)
#define CSL_MMU_READ_CAM_VATAG_MAX (0x000fffffU)
#define CSL_MMU_READ_CAM_P_MASK (0x00000008U)
#define CSL_MMU_READ_CAM_P_SHIFT (0x00000003U)
#define CSL_MMU_READ_CAM_P_RESETVAL (0x00000000U)
#define CSL_MMU_READ_CAM_P_CANFLUSH (0x00000000U)
#define CSL_MMU_READ_CAM_P_NOFLUSH (0x00000001U)
#define CSL_MMU_READ_CAM_V_MASK (0x00000004U)
#define CSL_MMU_READ_CAM_V_SHIFT (0x00000002U)
#define CSL_MMU_READ_CAM_V_RESETVAL (0x00000000U)
#define CSL_MMU_READ_CAM_V_INVALID (0x00000000U)
#define CSL_MMU_READ_CAM_V_VALID (0x00000001U)
#define CSL_MMU_READ_CAM_PAGESIZE_MASK (0x00000003U)
#define CSL_MMU_READ_CAM_PAGESIZE_SHIFT (0x00000000U)
#define CSL_MMU_READ_CAM_PAGESIZE_RESETVAL (0x00000000U)
#define CSL_MMU_READ_CAM_PAGESIZE_SECTION (0x00000000U)
#define CSL_MMU_READ_CAM_PAGESIZE_LARGE (0x00000001U)
#define CSL_MMU_READ_CAM_PAGESIZE_SMALL (0x00000002U)
#define CSL_MMU_READ_CAM_PAGESIZE_SUPER (0x00000003U)
#define CSL_MMU_READ_CAM_RESETVAL (0x00000000U)
/* READ_RAM */
#define CSL_MMU_READ_RAM_PHYSICALADDR_MASK (0xFFFFF000U)
#define CSL_MMU_READ_RAM_PHYSICALADDR_SHIFT (0x0000000CU)
#define CSL_MMU_READ_RAM_PHYSICALADDR_RESETVAL (0x00000000U)
#define CSL_MMU_READ_RAM_PHYSICALADDR_MAX (0x000fffffU)
#define CSL_MMU_READ_RAM_RESETVAL (0x00000000U)
/* EMU_FAULT_AD */
#define CSL_MMU_EMU_FAULT_AD_EMUFAULTADDR_MASK (0xFFFFFFFFU)
#define CSL_MMU_EMU_FAULT_AD_EMUFAULTADDR_SHIFT (0x00000000U)
#define CSL_MMU_EMU_FAULT_AD_EMUFAULTADDR_RESETVAL (0x00000000U)
#define CSL_MMU_EMU_FAULT_AD_EMUFAULTADDR_MAX (0xffffffffU)
#define CSL_MMU_EMU_FAULT_AD_RESETVAL (0x00000000U)
/* TTB_MSB */
#define CSL_MMU_TTB_MSB_TTB_MSB_MASK (0x0000000FU)
#define CSL_MMU_TTB_MSB_TTB_MSB_SHIFT (0x00000000U)
#define CSL_MMU_TTB_MSB_TTB_MSB_RESETVAL (0x00000000U)
#define CSL_MMU_TTB_MSB_TTB_MSB_MAX (0x0000000fU)
#define CSL_MMU_TTB_MSB_RESETVAL (0x00000000U)
/* RAM_MSB */
#define CSL_MMU_RAM_MSB_PA_MSB_MASK (0x0000000FU)
#define CSL_MMU_RAM_MSB_PA_MSB_SHIFT (0x00000000U)
#define CSL_MMU_RAM_MSB_PA_MSB_RESETVAL (0x00000000U)
#define CSL_MMU_RAM_MSB_PA_MSB_MAX (0x0000000fU)
#define CSL_MMU_RAM_MSB_RESETVAL (0x00000000U)
/* READ_RAM_MSB */
#define CSL_MMU_READ_RAM_MSB_PA_MSB_MASK (0x0000000FU)
#define CSL_MMU_READ_RAM_MSB_PA_MSB_SHIFT (0x00000000U)
#define CSL_MMU_READ_RAM_MSB_PA_MSB_RESETVAL (0x00000000U)
#define CSL_MMU_READ_RAM_MSB_PA_MSB_MAX (0x0000000fU)
#define CSL_MMU_READ_RAM_MSB_RESETVAL (0x00000000U)
/* BYPASS_REGION1_ADDR */
#define CSL_MMU_BYPASS_REGION1_ADDR_START_ADDR_MASK (0xFFFF0000U)
#define CSL_MMU_BYPASS_REGION1_ADDR_START_ADDR_SHIFT (0x00000010U)
#define CSL_MMU_BYPASS_REGION1_ADDR_START_ADDR_RESETVAL (0x00000000U)
#define CSL_MMU_BYPASS_REGION1_ADDR_START_ADDR_MAX (0x0000ffffU)
#define CSL_MMU_BYPASS_REGION1_ADDR_RESETVAL (0x00000000U)
/* BYPASS_REGION1_SIZE */
#define CSL_MMU_BYPASS_REGION1_SIZE_SIZE_MASK (0x0000000FU)
#define CSL_MMU_BYPASS_REGION1_SIZE_SIZE_SHIFT (0x00000000U)
#define CSL_MMU_BYPASS_REGION1_SIZE_SIZE_RESETVAL (0x00000000U)
#define CSL_MMU_BYPASS_REGION1_SIZE_SIZE_MAX (0x0000000fU)
#define CSL_MMU_BYPASS_REGION1_SIZE_RESETVAL (0x00000000U)
/* BYPASS_REGION2_ADDR */
#define CSL_MMU_BYPASS_REGION2_ADDR_START_ADDR_MASK (0xFFFF0000U)
#define CSL_MMU_BYPASS_REGION2_ADDR_START_ADDR_SHIFT (0x00000010U)
#define CSL_MMU_BYPASS_REGION2_ADDR_START_ADDR_RESETVAL (0x00000000U)
#define CSL_MMU_BYPASS_REGION2_ADDR_START_ADDR_MAX (0x0000ffffU)
#define CSL_MMU_BYPASS_REGION2_ADDR_RESETVAL (0x00000000U)
/* BYPASS_REGION2_SIZE */
#define CSL_MMU_BYPASS_REGION2_SIZE_SIZE_MASK (0x0000000FU)
#define CSL_MMU_BYPASS_REGION2_SIZE_SIZE_SHIFT (0x00000000U)
#define CSL_MMU_BYPASS_REGION2_SIZE_SIZE_RESETVAL (0x00000000U)
#define CSL_MMU_BYPASS_REGION2_SIZE_SIZE_MAX (0x0000000fU)
#define CSL_MMU_BYPASS_REGION2_SIZE_RESETVAL (0x00000000U)
/* BYPASS_REGION3_ADDR */
#define CSL_MMU_BYPASS_REGION3_ADDR_START_ADDR_MASK (0xFFFF0000U)
#define CSL_MMU_BYPASS_REGION3_ADDR_START_ADDR_SHIFT (0x00000010U)
#define CSL_MMU_BYPASS_REGION3_ADDR_START_ADDR_RESETVAL (0x00000000U)
#define CSL_MMU_BYPASS_REGION3_ADDR_START_ADDR_MAX (0x0000ffffU)
#define CSL_MMU_BYPASS_REGION3_ADDR_RESETVAL (0x00000000U)
/* BYPASS_REGION3_SIZE */
#define CSL_MMU_BYPASS_REGION3_SIZE_SIZE_MASK (0x0000000FU)
#define CSL_MMU_BYPASS_REGION3_SIZE_SIZE_SHIFT (0x00000000U)
#define CSL_MMU_BYPASS_REGION3_SIZE_SIZE_RESETVAL (0x00000000U)
#define CSL_MMU_BYPASS_REGION3_SIZE_SIZE_MAX (0x0000000fU)
#define CSL_MMU_BYPASS_REGION3_SIZE_RESETVAL (0x00000000U)
/* BYPASS_REGION4_ADDR */
#define CSL_MMU_BYPASS_REGION4_ADDR_START_ADDR_MASK (0xFFFF0000U)
#define CSL_MMU_BYPASS_REGION4_ADDR_START_ADDR_SHIFT (0x00000010U)
#define CSL_MMU_BYPASS_REGION4_ADDR_START_ADDR_RESETVAL (0x00000000U)
#define CSL_MMU_BYPASS_REGION4_ADDR_START_ADDR_MAX (0x0000ffffU)
#define CSL_MMU_BYPASS_REGION4_ADDR_RESETVAL (0x00000000U)
/* BYPASS_REGION4_SIZE */
#define CSL_MMU_BYPASS_REGION4_SIZE_SIZE_MASK (0x0000000FU)
#define CSL_MMU_BYPASS_REGION4_SIZE_SIZE_SHIFT (0x00000000U)
#define CSL_MMU_BYPASS_REGION4_SIZE_SIZE_RESETVAL (0x00000000U)
#define CSL_MMU_BYPASS_REGION4_SIZE_SIZE_MAX (0x0000000fU)
#define CSL_MMU_BYPASS_REGION4_SIZE_RESETVAL (0x00000000U)
/* FAULT_PC */
#define CSL_MMU_FAULT_PC_PC_MASK (0xFFFFFFFFU)
#define CSL_MMU_FAULT_PC_PC_SHIFT (0x00000000U)
#define CSL_MMU_FAULT_PC_PC_RESETVAL (0x00000000U)
#define CSL_MMU_FAULT_PC_PC_MAX (0xffffffffU)
#define CSL_MMU_FAULT_PC_RESETVAL (0x00000000U)
/* FAULT_STS */
#define CSL_MMU_FAULT_STS_MMU_FAULT_TRANS_ID_MASK (0x000001F0U)
#define CSL_MMU_FAULT_STS_MMU_FAULT_TRANS_ID_SHIFT (0x00000004U)
#define CSL_MMU_FAULT_STS_MMU_FAULT_TRANS_ID_RESETVAL (0x00000000U)
#define CSL_MMU_FAULT_STS_MMU_FAULT_TRANS_ID_MAX (0x0000001fU)
#define CSL_MMU_FAULT_STS_RD_WR_MASK (0x00000008U)
#define CSL_MMU_FAULT_STS_RD_WR_SHIFT (0x00000003U)
#define CSL_MMU_FAULT_STS_RD_WR_RESETVAL (0x00000000U)
#define CSL_MMU_FAULT_STS_RD_WR_MAX (0x00000001U)
#define CSL_MMU_FAULT_STS_MMU_FAULT_TYPE_MASK (0x00000006U)
#define CSL_MMU_FAULT_STS_MMU_FAULT_TYPE_SHIFT (0x00000001U)
#define CSL_MMU_FAULT_STS_MMU_FAULT_TYPE_RESETVAL (0x00000000U)
#define CSL_MMU_FAULT_STS_MMU_FAULT_TYPE_MAX (0x00000003U)
#define CSL_MMU_FAULT_STS_FAULTINDICATION_MASK (0x00000001U)
#define CSL_MMU_FAULT_STS_FAULTINDICATION_SHIFT (0x00000000U)
#define CSL_MMU_FAULT_STS_FAULTINDICATION_RESETVAL (0x00000000U)
#define CSL_MMU_FAULT_STS_FAULTINDICATION_MAX (0x00000001U)
#define CSL_MMU_FAULT_STS_RESETVAL (0x00000000U)
/* GPR */
#define CSL_MMU_GPR_GPO_MASK (0xFFFF0000U)
#define CSL_MMU_GPR_GPO_SHIFT (0x00000010U)
#define CSL_MMU_GPR_GPO_RESETVAL (0x00000000U)
#define CSL_MMU_GPR_GPO_MAX (0x0000ffffU)
#define CSL_MMU_GPR_FAULT_INTR_DIS_MASK (0x00000001U)
#define CSL_MMU_GPR_FAULT_INTR_DIS_SHIFT (0x00000000U)
#define CSL_MMU_GPR_FAULT_INTR_DIS_RESETVAL (0x00000000U)
#define CSL_MMU_GPR_FAULT_INTR_DIS_MAX (0x00000001U)
#define CSL_MMU_GPR_RESETVAL (0x00000000U)
#ifdef __cplusplus
}
#endif
#endif
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TestPlan(Model):
"""TestPlan.
:param area:
:type area: :class:`ShallowReference <test.v4_0.models.ShallowReference>`
:param automated_test_environment:
:type automated_test_environment: :class:`TestEnvironment <test.v4_0.models.TestEnvironment>`
:param automated_test_settings:
:type automated_test_settings: :class:`TestSettings <test.v4_0.models.TestSettings>`
:param build:
:type build: :class:`ShallowReference <test.v4_0.models.ShallowReference>`
:param build_definition:
:type build_definition: :class:`ShallowReference <test.v4_0.models.ShallowReference>`
:param client_url:
:type client_url: str
:param description:
:type description: str
:param end_date:
:type end_date: datetime
:param id:
:type id: int
:param iteration:
:type iteration: str
:param manual_test_environment:
:type manual_test_environment: :class:`TestEnvironment <test.v4_0.models.TestEnvironment>`
:param manual_test_settings:
:type manual_test_settings: :class:`TestSettings <test.v4_0.models.TestSettings>`
:param name:
:type name: str
:param owner:
:type owner: :class:`IdentityRef <test.v4_0.models.IdentityRef>`
:param previous_build:
:type previous_build: :class:`ShallowReference <test.v4_0.models.ShallowReference>`
:param project:
:type project: :class:`ShallowReference <test.v4_0.models.ShallowReference>`
:param release_environment_definition:
:type release_environment_definition: :class:`ReleaseEnvironmentDefinitionReference <test.v4_0.models.ReleaseEnvironmentDefinitionReference>`
:param revision:
:type revision: int
:param root_suite:
:type root_suite: :class:`ShallowReference <test.v4_0.models.ShallowReference>`
:param start_date:
:type start_date: datetime
:param state:
:type state: str
:param updated_by:
:type updated_by: :class:`IdentityRef <test.v4_0.models.IdentityRef>`
:param updated_date:
:type updated_date: datetime
:param url:
:type url: str
"""
_attribute_map = {
'area': {'key': 'area', 'type': 'ShallowReference'},
'automated_test_environment': {'key': 'automatedTestEnvironment', 'type': 'TestEnvironment'},
'automated_test_settings': {'key': 'automatedTestSettings', 'type': 'TestSettings'},
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_definition': {'key': 'buildDefinition', 'type': 'ShallowReference'},
'client_url': {'key': 'clientUrl', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'end_date': {'key': 'endDate', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'int'},
'iteration': {'key': 'iteration', 'type': 'str'},
'manual_test_environment': {'key': 'manualTestEnvironment', 'type': 'TestEnvironment'},
'manual_test_settings': {'key': 'manualTestSettings', 'type': 'TestSettings'},
'name': {'key': 'name', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'previous_build': {'key': 'previousBuild', 'type': 'ShallowReference'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'release_environment_definition': {'key': 'releaseEnvironmentDefinition', 'type': 'ReleaseEnvironmentDefinitionReference'},
'revision': {'key': 'revision', 'type': 'int'},
'root_suite': {'key': 'rootSuite', 'type': 'ShallowReference'},
'start_date': {'key': 'startDate', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'str'},
'updated_by': {'key': 'updatedBy', 'type': 'IdentityRef'},
'updated_date': {'key': 'updatedDate', 'type': 'iso-8601'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, area=None, automated_test_environment=None, automated_test_settings=None, build=None, build_definition=None, client_url=None, description=None, end_date=None, id=None, iteration=None, manual_test_environment=None, manual_test_settings=None, name=None, owner=None, previous_build=None, project=None, release_environment_definition=None, revision=None, root_suite=None, start_date=None, state=None, updated_by=None, updated_date=None, url=None):
super(TestPlan, self).__init__()
self.area = area
self.automated_test_environment = automated_test_environment
self.automated_test_settings = automated_test_settings
self.build = build
self.build_definition = build_definition
self.client_url = client_url
self.description = description
self.end_date = end_date
self.id = id
self.iteration = iteration
self.manual_test_environment = manual_test_environment
self.manual_test_settings = manual_test_settings
self.name = name
self.owner = owner
self.previous_build = previous_build
self.project = project
self.release_environment_definition = release_environment_definition
self.revision = revision
self.root_suite = root_suite
self.start_date = start_date
self.state = state
self.updated_by = updated_by
self.updated_date = updated_date
self.url = url
|
import React from 'react';
import PropTypes from 'prop-types';
import Text from '../Text';
import Wrapper from './Wrapper';
const Tag = ({ label }) => {
return (
<Wrapper>
<Text color="grey" fontWeight="bold" fontSize="xs" textTransform="uppercase">
{label}
</Text>
</Wrapper>
);
};
Tag.defaultProps = {
label: null,
};
Tag.propTypes = {
label: PropTypes.string,
};
export default Tag;
|
import * as React from 'react'
import { GatsbyImage } from "gatsby-plugin-image"
import { graphql, useStaticQuery } from 'gatsby'
import { Carousel } from 'react-responsive-carousel'
import 'react-responsive-carousel/lib/styles/carousel.min.css'
import '../../styles/carousel.css';
const VeniceHotel2018SlideShow = () => {
const { allFile } = useStaticQuery(
graphql`
query {
allFile(
filter: {relativeDirectory: {eq: "slideshows/venice-hotel-2018"}}
sort: {fields: name, order: ASC}
) {
edges {
node {
childImageSharp {
gatsbyImageData (
placeholder: BLURRED
)
fields {
exif {
raw {
image {
ImageDescription
}
}
}
}
}
}
}
}
}
`,
)
return (
<div>
<Carousel
showArrows={true}
showThumbs={false}
>
{allFile.edges.map(({ node }) => (
<div>
<GatsbyImage image={node.childImageSharp.gatsbyImageData} alt="" style={{ marginLeft: "auto", marginRight: "auto", maxHeight: "80vh", maxWidth: `calc(80vh * ((${node.childImageSharp.gatsbyImageData.width}) / (${node.childImageSharp.gatsbyImageData.height})))` }}/>
<p>
{node.childImageSharp.fields.exif.raw.image.ImageDescription}
</p>
</div>
))}
</Carousel>
</div>
)
}
export default VeniceHotel2018SlideShow;
|
import { Meteor } from 'meteor/meteor';
import { withTracker } from 'meteor/react-meteor-data';
import { Chain, ChainStates } from '/imports/api/chain/chain.js';
import ChainStatus from './ChainStatus.jsx';
export default ChainStatusContainer = withTracker((curr) => {
let statusHandle;
let chainStatesHandle;
let loading = true;
if (Meteor.isClient) {
statusHandle = Meteor.subscribe('chain.status');
chainStatesHandle = Meteor.subscribe('chainStates.latest');
loading = !statusHandle.ready() && !chainStatesHandle.ready();
}
let status;
let states;
let statusExist;
if (Meteor.isServer || (!loading)) {
status = Chain.findOne({ chainId: Meteor.settings.public.chainId });
states = ChainStates.findOne({}, { sort: { height: -1 }, limit: 1 });
if (Meteor.isServer) {
// loading = false;
statusExist = !!status && !!states;
} else {
statusExist = !loading && !!status && !!states;
}
}
return {
loading,
statusExist,
status: statusExist ? status : {},
states: statusExist ? states : {},
};
})(ChainStatus);
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 14 22:22:07 2020
@author: Tom
"""
numbers_dict = {
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine",
10: "ten",
11: "eleven",
12: "twelve",
13: "thirteen",
14: "fourteen",
15: "fifteen",
16: "sixteen",
17: "seventeen",
18: "eighteen",
19: "nineteen",
20: "twenty",
30: "thirty",
40: "forty",
50: "fifty",
60: "sixty",
70: "seventy",
80: "eighty",
90: "ninety",
100: "hundred"
}
num_letters = 0
for i in range(1, 20):
num_letters += len(numbers_dict[i])
for i in range(20, 100):
if i % 10 == 0:
num_letters += len(numbers_dict[i])
else:
num_letters += len(numbers_dict[i - i % 10]) + \
len(numbers_dict[i % 10])
num_letters *= 10
num_letters += 99*((3+7+3) + (3+7+3) + (5+7+3) + (4+7+3) + (4+7+3) +
(3+7+3) + (5+7+3) + (5+7+3) + (4+7+3))
num_letters += len("hundred")*9 + (3+3+5+4+4+3+5+5+4)
num_letters += len("onethousand")
print(num_letters)
|
function buildElement(tag, options) {
const element = document.createElement(tag);
return setElement(element, options);
}
function setElement(
element,
{
classList = [], attributes = {},
events = [], children = [],
...rest
} = {}
) {
for(klass of classList) {
element.classList.add(klass);
}
for(let attr in attributes) {
const value = attributes[attr];
element.setAttribute(attr, value);
}
for(let ev in events) {
const callback = events[ev];
element[ev] = callback;
}
for(child of children) {
element.appendChild(child);
}
for(prop in rest) {
element[prop] = rest[prop];
}
return element;
}
function onDragStart(event) {
setTimeout(() => {
event.target.classList.add('hide');
}, 0);
event.dataTransfer.setData('text/plain', event.target.id);
}
function onDragEnd(event) {
event.target.classList.remove('hide');
}
function onDragOver(event) {
event.preventDefault();
}
function onDrop(event) {
event.preventDefault();
const id = event.dataTransfer.getData('text/plain');
const draggableElement = document.getElementById(id);
const previewCard = event.target.closest('.board').querySelector('.preview-card');
const dropzone = event.target.closest('.board')
.querySelector('.card-container');
if(event.target.classList.contains('card')) {
dropzone.insertBefore(draggableElement, event.target);
} else {
dropzone.appendChild(draggableElement);
}
previewCard.remove();
}
function Placeholder() {
return buildElement(
'div', {
classList: ['card', 'preview-card'],
});
}
const placeholder = Placeholder();
function onDragEnter({target, ..._}, cardContainer) {
if(!target.classList) { return; }
if(target.classList.contains('card')) {
cardContainer.insertBefore(placeholder, target);
} else {
cardContainer.appendChild(placeholder);
}
}
function onDragLeave() {
document.removeChild(placeholder);
}
function Board(header) {
const cardContainer = CardContainer();
const board = buildElement('div');
return setElement(
board, {
classList: ['board'],
events: {
ondragover: onDragOver,
ondrop: onDrop,
ondragenter: event => { onDragEnter(event, cardContainer) },
ondragLeave: onDragLeave
}, children: [
RemoveElementButton(board),
BoardHeader(header),
cardContainer,
AddCardButton(cardContainer)
],
// attributes: {draggable: true}
}
)
}
function Card(str_text) {
if(!this.counter) { this.counter = 0 }
this.counter += 1;
const text = document.createTextNode(str_text);
const card = buildElement('div');
return setElement(
card, {
id: `drag-card-${this.counter}`,
classList: ['card'],
attributes: {draggable: true},
children: [
text,
RemoveElementButton(card)
], events: {
ondragstart: onDragStart,
ondragend: onDragEnd
},
}
)
}
function CardContainer() {
return buildElement(
'div', {classList:['card-container']}
)
}
function BoardHeader(header) {
return buildElement(
'h3', {
classList: ['board-header'],
innerHTML: header,
}
)
}
function RemoveElementButton(element) {
return buildElement(
'div', {
classList: ['remove-card'],
innerHTML: '<i class="fas fa-trash"></i>',
events: { onclick: () => { element.remove(); } }
}
)
}
function TextField(placeholder) {
return buildElement(
'input', {
placeholder: placeholder,
attributes: { required: true }
}
)
}
function FormApplyButton() {
return buildElement(
'button', {
classList: ['apply-button'],
innerHTML: 'Criar'
}
)
}
function FormCancelButton(addElementButton, elementForm) {
return buildElement(
'button', {
classList: ['cancel-button'],
attributes: { type: 'button' },
innerHTML: 'Cancelar',
events: {
onclick: () => {
addElementButton.hidden = false;
elementForm.remove();
}
}
}
)
}
function CardForm(addCardButton, cardContainer) {
const cardForm = buildElement('form');
const cardFormTextField = TextField("Insira Título do Cartão");
return setElement(
cardForm, {
classList: ['card-form'],
children: [
cardFormTextField,
FormApplyButton(),
FormCancelButton(addCardButton, cardForm)
], onsubmit: event => {
const card = Card(cardFormTextField.value);
addCardButton.hidden = false;
cardContainer.appendChild(card);
event.target.remove();
return false;
}
}
)
}
function BoardForm() {
const boardForm = buildElement('form');
const boardFormTextField = TextField("Insira Título da Lista");
return setElement(
boardForm, {
classList: ['board-form'],
children: [
boardFormTextField,
FormApplyButton(),
FormCancelButton(AddBoardButton, boardForm)
], onsubmit: event => {
const board = Board(boardFormTextField.value);
AddBoardButton.hidden = false;
BoardContainer.insertBefore(board, AddBoardButton);
event.target.remove();
return false;
}
});
}
function addCard(addCardButton, cardContainer, {target, ..._}) {
addCardButton.hidden = true;
const cardForm = CardForm(addCardButton, cardContainer);
cardContainer.appendChild(cardForm);
cardForm.querySelector('input').focus();
}
function AddCardButton(cardContainer) {
const addCardButton = buildElement('div');
return setElement(
addCardButton, {
classList: ['new-card'],
innerHTML: "+ Adicionar outro item",
events: {
onclick: event => { addCard(addCardButton, cardContainer, event); }
}
}
)
}
function addBoard(event) {
AddBoardButton.hidden = true;
const boardForm = BoardForm();
BoardContainer.insertBefore(boardForm, event.target);
boardForm.querySelector('input').focus();
}
const AddBoardButton = buildElement(
'a', {
innerHTML: "+ Adicionar Outra Lista",
classList: ['new-board'],
id: 'new-board-button',
events: { onclick: addBoard }
})
const BoardContainer = buildElement(
'a', {
classList: ['board-container'],
id: 'board-container',
children: [AddBoardButton]
})
window.onload = () => { document.body.appendChild(BoardContainer); };
|
// / <reference types="cypress" />
// ***********************************************************
// This example plugins/index.js can be used to load plugins
//
// You can change the location of this file or turn off loading
// the plugins file with the 'pluginsFile' configuration option.
//
// You can read more here:
// https://on.cypress.io/plugins-guide
// ***********************************************************
// This function is called when a project is opened or re-opened (e.g. due to
// the project's config changing)
/**
* @type {Cypress.PluginConfig}
*/
/* eslint-disable @typescript-eslint/no-var-requires */
const graphql = require("graphql-request");
module.exports = async (on, config) => {
// make env variables visible for cypress
config.env.API_URI = process.env.API_URI;
config.env.APP_MOUNT_URI = process.env.APP_MOUNT_URI;
config.env.mailHogUrl = process.env.CYPRESS_MAILHOG;
config.env.SHOP = await getShopInfo(process.env);
config.env.STRIPE_SECRET_KEY = process.env.STRIPE_SECRET_KEY;
config.env.STRIPE_PUBLIC_KEY = process.env.STRIPE_PUBLIC_KEY;
on("before:browser:launch", (browser = {}, launchOptions) => {
launchOptions.args.push("--proxy-bypass-list=<-loopback>");
return launchOptions;
});
return config;
};
function getShopInfo(envVariables) {
// envVariables.CYPRESS_USER_NAME
const variables = {
email: envVariables.CYPRESS_USER_NAME,
password: envVariables.CYPRESS_USER_PASSWORD
};
const createTokenMutation = graphql.gql`mutation tokenCreate($email: String!, $password: String!){
tokenCreate(email:$email, password:$password){
token
}
}`;
const getShopInfoQuery = graphql.gql`query{
shop{
version
}
}`;
const client = new graphql.GraphQLClient(envVariables.API_URI, {
headers: {}
});
return client.request(createTokenMutation, variables).then(data => {
const token = data.tokenCreate.token;
client.setHeader("Authorization", `JWT ${token}`);
return client
.request(getShopInfoQuery)
.then(shopInfo => shopInfo.shop.version);
});
}
|
import React from 'react'
import PropTypes from 'prop-types'
import classNames from 'classnames'
import BarItem from './BarItem'
import toLower from 'lodash/toLower'
import logo from '../../assets/logo.png'
import style from './index.scss'
const Sidebar = props => {
let {appName, menuCollaps, selectedItem, toggleMenu, changeActiveItem, navigateTo} = props
let siderItems = null
siderItems = <ul className={style.accordionList}>
</ul>
return (
<div className={classNames(style.wrapper, menuCollaps? style.wrapperCollaps: "", 'clearfix')}>
<div className={style.sidebarHeader}>
<img src={logo} alt="tookitaki" title="logo" />
<h5>ALPHA</h5>
</div>
<a href="javascript:void(0)" className={style.menuToggle} onClick={() => toggleMenu()}>
<i className={classNames('fa', menuCollaps? 'fa-chevron-right' : 'fa-chevron-left')}
aria-hidden="true"></i>
</a>
<nav id={style.sidebar}>
{siderItems}
</nav>
</div>
);
}
Sidebar.propTypes = {
appName: PropTypes.string,
toggleMenu: PropTypes.func,
changeActiveItem: PropTypes.func,
menuCollaps: PropTypes.bool,
selectedItem: PropTypes.number,
navigateTo: PropTypes.func,
};
export default Sidebar;
|
import logging
import uuid
from django.conf import settings
from django.contrib.auth import get_user_model
from django.shortcuts import render
from django.urls import reverse
from helusers.utils import uuid_to_username
from auth_backends.adfs.base import BaseADFS
from auth_backends.helsinki_tunnistus_suomifi import HelsinkiTunnistus
from auth_backends.tunnistamo import Tunnistamo
from users.models import LoginMethod
from users.views import AuthenticationErrorView
logger = logging.getLogger(__name__)
def get_user_uuid(details, backend, response, user=None, *args, **kwargs):
"""Add `uuid` argument to the pipeline.
Makes `uuid` argument available to other pipeline entries.
If the backend provides `get_user_uuid` method (as is the case with
the ADFS backends and Keycloak suomi.fi backend), it is used to
generate the UUID. Otherwise, the UUID is generated with `uuid.uuid1`
function.
The argument is named same as the django-helusers `uuid` field.
This allows syncing the helusers `uuid`-field with uuid generated
here using SOCIAL_AUTH_backend_name_USER_FIELDS.
"""
if user and getattr(user, 'uuid'):
return {
'uuid': user.uuid,
}
if callable(getattr(backend, 'get_user_uuid', None)):
new_uuid = backend.get_user_uuid(details, response)
else:
new_uuid = uuid.uuid1()
return {
'uuid': new_uuid,
}
def get_username(strategy, user=None, *args, **kwargs):
"""Sets the `username` argument.
If the user exists already, use the existing username. Otherwise
generate username from the `new_uuid` using the
`helusers.utils.uuid_to_username` function.
"""
storage = strategy.storage
if not user:
user_uuid = kwargs.get('uuid')
if not user_uuid:
return
final_username = uuid_to_username(user_uuid)
else:
final_username = storage.user.get_username(user)
return {
'username': final_username
}
def require_email(strategy, details, backend, user=None, *args, **kwargs):
"""Enforce email address.
Stop authentication and render the `email_needed` template if the
`details` received from the social auth doesn't include an email
address.
"""
logger.debug(f"enforcing email; user:{user}; details:{details}, backend: {backend.name}")
if user:
logger.debug(f"user: {user} already exists. Will not check email.")
return
# Some backends do not have email available for all their users, allow config to
# bypass this check. (unused suomi.fi backend is one such)
if backend.name in settings.EMAIL_EXEMPT_AUTH_BACKENDS:
logger.debug(f"backend '{backend.name}' exempt from email checks")
return
if details.get('email'):
return
reauth_uri = reverse('social:begin', kwargs={'backend': backend.name})
if backend.name == 'facebook':
reauth_uri += '?auth_type=rerequest'
return render(strategy.request, 'email_needed.html', {'reauth_uri': reauth_uri}, status=401)
def associate_by_email(strategy, details, user=None, *args, **kwargs):
"""Deny duplicate email addresses for new users except in specific cases
If the incoming email is associated with existing user, authentication
is denied. Exceptions are:
* the existing user does not have associated social login
* the incoming email belongs to a trusted domain
* the duplicate email address check has been disabled in the settings
In the first two cases, the incoming social login is associated with the existing user.
In the third case a separate new user is created.
"""
logger.debug(f"starting association by email; user:{user}; details:{details}")
if user:
return
if settings.ALLOW_DUPLICATE_EMAILS:
return
email = details.get('email')
if not email:
return
backend = kwargs['backend']
User = get_user_model() # noqa
existing_users = User.objects.filter(email__iexact=email).order_by('-date_joined')
if not existing_users:
return
logger.debug(f"found existing users with email '{email}': {existing_users}")
user = existing_users[0]
trusted_email_domains = backend.setting('TRUSTED_EMAIL_DOMAINS', [])
explicitly_trusted = False
if trusted_email_domains:
email_domain = email.split('@')[1]
if email_domain in trusted_email_domains or trusted_email_domains == '*':
explicitly_trusted = True
social_set = user.social_auth.all()
# If the account doesn't have any social logins yet, or if we
# explicitly trust the social media provider, allow the signup.
if explicitly_trusted or not social_set:
return {
'user': user,
}
logger.debug(f"'{email}' already in use by existing user and email domain not trusted")
providers = [a.provider for a in social_set]
strategy.request.other_logins = LoginMethod.objects.filter(provider_id__in=providers)
error_view = AuthenticationErrorView(request=strategy.request)
return error_view.get(strategy.request)
def update_ad_groups(details, backend, user=None, *args, **kwargs):
"""Update users AD groups.
Updates the users `ADGroup`s if the user authenticated through an ADFS
backend.
"""
if not isinstance(backend, (BaseADFS, Tunnistamo)) or not user or 'ad_groups' not in details:
return
user.update_ad_groups(details['ad_groups'])
def check_existing_social_associations(backend, strategy, user=None, social=None, *args, **kwargs):
"""Deny adding additional social auths
social_core.pipeline.social_auth.associate_user would automatically
add additional social auths for the user, if they succesfully
authenticated to another IdP while holding a session with Tunnistamo.
We don't want this to happen, as there is no interface for managing
additional IdPs.
"""
logger.debug(f"starting check for existing social assoc; user:{user}; backend: {backend.name}; social:{social}")
if user and not social:
social_set = user.social_auth.all()
providers = [a.provider for a in social_set]
logger.debug(f"social does not exist; providers: {providers}")
if providers and backend.name not in providers:
strategy.request.other_logins = LoginMethod.objects.filter(provider_id__in=providers)
error_view = AuthenticationErrorView(request=strategy.request)
return error_view.get(strategy.request)
def save_social_auth_backend(backend, user=None, *args, **kwargs):
if user:
user.last_login_backend = backend.name
user.save()
def save_loa_to_session(backend, strategy, user=None, social=None, *args, **kwargs):
if not user or not social or not isinstance(backend, HelsinkiTunnistus) or not backend.id_token:
return
# Save the "loa" claim received from the Helsinki Tunnistus Keycloak to the session.
# This will be in turn added as a "loa" claim to the tokens Tunnistamo supplies.
strategy.request.session["heltunnistussuomifi_loa"] = backend.id_token.get(
"loa",
"low"
)
|
from pandac.PandaModules import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase import DirectObject
from toontown.friends import FriendHandle
from otp.avatar import Avatar
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.friends import ToontownFriendSecret
import ToonAvatarDetailPanel
import AvatarPanelBase
import PlayerDetailPanel
from otp.otpbase import OTPGlobals
GAME_LOGO_NAMES = {'Default': 'GameLogo_Unknown',
'Disney XD': 'GameLogo_XD',
'Toontown': 'GameLogo_Toontown',
'Pirates': 'GameLogo_Pirates'}
GAME_LOGO_FILE = 'phase_3/models/misc/game_logo_card'
class PlayerInfoPanel(AvatarPanelBase.AvatarPanelBase):
notify = DirectNotifyGlobal.directNotify.newCategory('PlayerInfoPanel')
def __init__(self, playerId):
from toontown.friends import FriendsListPanel
AvatarPanelBase.AvatarPanelBase.__init__(self, None, FriendsListPanel=FriendsListPanel)
self.setup(playerId)
self.avId = 0
self.avName = None
return
def setup(self, playerId):
from toontown.friends import FriendsListPanel
self.playerId = playerId
self.playerInfo = base.cr.playerFriendsManager.playerId2Info.get(playerId)
if not self.playerInfo:
return
avId = None
avatar = None
if playerId:
if self.playerInfo.onlineYesNo:
avId = self.playerInfo.avatarId
avatar = base.cr.playerFriendsManager.identifyFriend(avId)
self.notify.debug('Opening player panel, %s' % self.playerInfo)
self.avatar = avatar
self.noAv = 0
if not avatar:
self.noAv = 1
self.accountText = None
self.listName = ' '
world = self.playerInfo.location
if self.playerInfo.onlineYesNo == 0:
world = TTLocalizer.AvatarDetailPanelRealLife
self.accountText = self.playerInfo.playerName
if self.noAv:
avButtonState = DGG.DISABLED
else:
avButtonState = DGG.NORMAL
self.online = self.playerInfo.onlineYesNo
if self.online:
onlineButtonState = DGG.NORMAL
else:
onlineButtonState = DGG.DISABLED
base.localAvatar.obscureFriendsListButton(1)
gui = loader.loadModel('phase_3.5/models/gui/avatar_panel_gui')
self.frame = DirectFrame(
image=gui.find('**/avatar_panel'),
relief=None,
pos=(1.1, 100, 0.525))
disabledImageColor = Vec4(1, 1, 1, 0.4)
text0Color = Vec4(1, 1, 1, 1)
text1Color = Vec4(0.5, 1, 0.5, 1)
text2Color = Vec4(1, 1, 0.5, 1)
text3Color = Vec4(0.6, 0.6, 0.6, 1)
if self.playerInfo:
logoImageName = GAME_LOGO_NAMES['Default']
if not self.playerInfo.onlineYesNo:
logoImageName = GAME_LOGO_NAMES['Default']
elif GAME_LOGO_NAMES.has_key(self.playerInfo.location):
logoImageName = GAME_LOGO_NAMES[self.playerInfo.location]
model = loader.loadModel(GAME_LOGO_FILE)
logoImage = model.find('**/' + logoImageName)
del model
self.outsideLogo = DirectLabel(
parent=self.frame,
relief=None,
image=logoImage,
pos=(0.0125, 0.0, 0.25),
image_color=(1.0, 1.0, 1.0, 1),
scale=(0.175, 1, 0.175))
font = ToontownGlobals.getInterfaceFont()
textScale = 0.047
textWrap = 7.5
textAlign = TextNode.ACenter
textPos = (0, 0)
self.nameLabel = DirectLabel(
parent=self.frame,
pos=(0.0125, 0, 0.385),
relief=None,
text=self.listName,
text_font=font,
text_fg=Vec4(0, 0, 0, 1),
text_pos=textPos,
text_scale=textScale,
text_wordwrap=textWrap,
text_align=textAlign,
text_shadow=(1, 1, 1, 1))
if self.accountText:
self.accountLabel = DirectLabel(
parent=self.frame,
pos=(0.0125, 0, 0.385),
text=self.accountText,
relief=None,
text_font=font,
text_fg=Vec4(0, 0, 0, 1),
text_pos=textPos,
text_scale=textScale,
text_wordwrap=textWrap,
text_align=textAlign,
text_shadow=(1, 1, 1, 1))
self.accountLabel.show()
self.closeButton = DirectButton(
parent=self.frame,
image=(
gui.find('**/CloseBtn_UP'),
gui.find('**/CloseBtn_DN'),
gui.find('**/CloseBtn_Rllvr'),
gui.find('**/CloseBtn_UP')),
relief=None,
pos=(0.157644, 0, -0.379167),
command=self.__handleClose)
self.friendButton = DirectButton(
parent=self.frame,
image=(
gui.find('**/Frnds_Btn_UP'),
gui.find('**/Frnds_Btn_DN'),
gui.find('**/Frnds_Btn_RLVR'),
gui.find('**/Frnds_Btn_UP')),
image3_color=disabledImageColor,
image_scale=0.9,
relief=None,
text=TTLocalizer.AvatarPanelFriends,
text_scale=0.06,
pos=(-0.103, 0, 0.133),
text0_fg=text0Color,
text1_fg=text1Color,
text2_fg=text2Color,
text3_fg=text3Color,
text_pos=(0.06, -0.02),
text_align=TextNode.ALeft,
state=avButtonState,
command=self.__handleFriend)
self.friendButton['state'] = DGG.DISABLED
self.goToButton = DirectButton(
parent=self.frame,
image=(
gui.find('**/Go2_Btn_UP'),
gui.find('**/Go2_Btn_DN'),
gui.find('**/Go2_Btn_RLVR'),
gui.find('**/Go2_Btn_UP')),
image3_color=disabledImageColor,
image_scale=0.9,
relief=None,
pos=(-0.103, 0, 0.045),
text=TTLocalizer.AvatarPanelGoTo,
text0_fg=text0Color,
text1_fg=text1Color,
text2_fg=text2Color,
text3_fg=text3Color,
text_scale=0.06,
text_pos=(0.06, -0.015),
text_align=TextNode.ALeft,
state=avButtonState,
command=self.__handleGoto)
self.goToButton['state'] = DGG.DISABLED
self.whisperButton = DirectButton(
parent=self.frame,
image=(
gui.find('**/ChtBx_ChtBtn_UP'),
gui.find('**/ChtBx_ChtBtn_DN'),
gui.find('**/ChtBx_ChtBtn_RLVR'),
gui.find('**/ChtBx_ChtBtn_UP')),
image3_color=disabledImageColor,
relief=None,
image_scale=0.9,
pos=(-0.103, 0, -0.0375),
text=TTLocalizer.AvatarPanelWhisper,
text0_fg=text0Color,
text1_fg=text1Color,
text2_fg=text2Color,
text3_fg=text3Color,
text_scale=TTLocalizer.PIPwisperButton,
text_pos=(0.06, -0.0125),
text_align=TextNode.ALeft,
state=onlineButtonState,
command=self.__handleWhisper)
self.secretsButton = DirectButton(
parent=self.frame,
image=(
gui.find('**/ChtBx_ChtBtn_UP'),
gui.find('**/ChtBx_ChtBtn_DN'),
gui.find('**/ChtBx_ChtBtn_RLVR'),
gui.find('**/ChtBx_ChtBtn_UP')),
image3_color=disabledImageColor,
image_scale=0.9,
relief=None,
pos=(-0.103, 0, -0.13),
text=TTLocalizer.AvatarPanelSecrets,
text0_fg=text0Color,
text1_fg=text1Color,
text2_fg=text2Color,
text3_fg=text3Color,
text_scale=TTLocalizer.PIPsecretsButton,
text_pos=(0.055, -0.01),
text_align=TextNode.ALeft,
state=avButtonState,
command=self.__handleSecrets)
self.secretsButton['state'] = DGG.DISABLED
if not base.localAvatar.isTeleportAllowed():
self.goToButton['state'] = DGG.DISABLED
ignoreStr, ignoreCmd, ignoreSize = self.getIgnoreButtonInfo()
self.ignoreButton = DirectButton(
parent=self.frame,
image=(
gui.find('**/Ignore_Btn_UP'),
gui.find('**/Ignore_Btn_DN'),
gui.find('**/Ignore_Btn_RLVR'),
gui.find('**/Ignore_Btn_UP')),
image3_color=disabledImageColor,
image_scale=0.9,
relief=None,
pos=(-0.103697, 0, -0.21),
text=ignoreStr,
text0_fg=text0Color,
text1_fg=text1Color,
text2_fg=text2Color,
text3_fg=text3Color,
text_scale=ignoreSize,
text_pos=(0.06, -0.015),
text_align=TextNode.ALeft,
state=avButtonState,
command=ignoreCmd)
if base.cr.productName not in ['JP', 'DE', 'BR', 'FR']:
self.reportButton = DirectButton(
parent=self.frame,
image=(
gui.find('**/report_BtnUP'),
gui.find('**/report_BtnDN'),
gui.find('**/report_BtnRLVR'),
gui.find('**/report_BtnUP')),
image3_color=disabledImageColor,
image_scale=0.65,
relief=None,
pos=(-0.103, 0, -0.29738),
text=TTLocalizer.AvatarPanelReport,
text0_fg=text0Color,
text1_fg=text1Color,
text2_fg=text2Color,
text3_fg=text3Color,
text_scale=0.06,
text_pos=(0.06, -0.015),
text_align=TextNode.ALeft,
command=self.handleReport)
self.detailButton = DirectButton(
parent=self.frame,
image=(
gui.find('**/ChtBx_BackBtn_UP'),
gui.find('**/ChtBx_BackBtn_DN'),
gui.find('**/ChtBx_BackBtn_Rllvr'),
gui.find('**/ChtBx_BackBtn_UP')),
relief=None,
text=('', TTLocalizer.PlayerPanelDetail,
TTLocalizer.PlayerPanelDetail, ''),
text_fg=text2Color,
text_shadow=(0, 0, 0, 1),
text_scale=TTLocalizer.PIPdetailButton,
text_pos=(0.085, 0.055),
text_align=TextNode.ACenter,
pos=(-0.133773, 0, -0.387132),
state=DGG.NORMAL,
command=self.__handleDetails)
gui.removeNode()
menuX = -0.05
menuScale = 0.064
self.frame.show()
messenger.send('avPanelDone')
self.accept('playerOnline', self.__handlePlayerChanged)
self.accept('playerOffline', self.__handlePlayerChanged)
self.accept(OTPGlobals.PlayerFriendUpdateEvent, self.__handlePlayerChanged)
self.accept(OTPGlobals.PlayerFriendRemoveEvent, self.__handlePlayerUnfriend)
return
def disableAll(self):
self.detailButton['state'] = DGG.DISABLED
self.ignoreButton['state'] = DGG.DISABLED
if base.cr.productName not in ['JP',
'DE',
'BR',
'FR']:
self.reportButton['state'] = DGG.DISABLED
self.goToButton['state'] = DGG.DISABLED
self.secretsButton['state'] = DGG.DISABLED
self.whisperButton['state'] = DGG.DISABLED
self.friendButton['state'] = DGG.DISABLED
self.closeButton['state'] = DGG.DISABLED
def cleanup(self):
self.unsetup()
self.ignore('playerOnline')
self.ignore('playerOffline')
self.ignore(OTPGlobals.PlayerFriendUpdateEvent)
self.ignore(OTPGlobals.PlayerFriendRemoveEvent)
AvatarPanelBase.AvatarPanelBase.cleanup(self)
def unsetup(self):
if not hasattr(self, 'frame') or self.frame == None:
return
PlayerDetailPanel.unloadPlayerDetail()
self.frame.destroy()
del self.frame
self.frame = None
base.localAvatar.obscureFriendsListButton(-1)
self.laffMeter = None
self.ignore('updateLaffMeter')
if hasattr(self.avatar, 'bFake') and self.avatar.bFake:
self.avatar.delete()
return
def __handleGoto(self):
if base.localAvatar.isTeleportAllowed():
base.localAvatar.chatMgr.noWhisper()
messenger.send('gotoAvatar', [self.avId, self.avName, self.avDisableName])
def __handleWhisper(self):
if self.noAv:
base.localAvatar.chatMgr.whisperTo(self.listName, 0, self.playerId)
else:
base.localAvatar.chatMgr.whisperTo(self.avName, self.avId, self.playerId)
def __handleSecrets(self):
base.localAvatar.chatMgr.noWhisper()
ToontownFriendSecret.showFriendSecret(ToontownFriendSecret.BothSecrets)
def __handleFriend(self):
base.localAvatar.chatMgr.noWhisper()
self.__getAvInfo()
messenger.send('friendAvatar', [self.avId, self.avName, self.avDisableName])
def __getAvInfo(self):
if self.playerId:
self.avId = self.playerInfo.avatarId
if self.avId:
avatar = base.cr.playerFriendsManager.identifyFriend(self.avId)
if avatar:
self.avName = avatar.getName()
if not self.avDisableName:
self.avDisableName = avatar.uniqueName('disable')
def __handleDetails(self):
base.localAvatar.chatMgr.noWhisper()
self.__getAvInfo()
messenger.send('playerDetails', [self.avId, self.avName, self.playerId])
def handleDisableAvatar(self):
pass
def __handlePlayerChanged(self, playerId, info = None):
if playerId == self.playerId:
self.unsetup()
self.setup(playerId)
def __handlePlayerUnfriend(self, playerId):
if playerId == self.playerId:
self.__handleClose()
def __handleClose(self):
self.cleanup()
AvatarPanelBase.currentAvatarPanel = None
if self.friendsListShown:
self.FriendsListPanel.showFriendsList()
return
def getAvId(self):
if hasattr(self, 'avatar'):
if self.avatar:
return self.avatar.doId
return None
def getPlayerId(self):
if hasattr(self, 'playerId'):
return self.playerId
return None
def isHidden(self):
if not hasattr(self, 'frame') or not self.frame:
return 1
return self.frame.isHidden()
def getType(self):
return 'player'
|
# -*- coding:utf-8 -*-
from calendar import monthrange
from datetime import datetime, timedelta
__author__ = [
'"liubo" <liubo@hi-wifi.cn>'
]
def get_first_time_of_month(year=None, month=None):
if not year or not month:
now = datetime.now()
if not year:
year = now.year
if not month:
month = now.month
return datetime(year, month, 1, hour=0, minute=0, second=0)
def get_last_time_of_month(year=None, month=None):
if not year or not month:
now = datetime.now()
if not year:
year = now.year
if not month:
month = now.month
first_day, last_day = monthrange(year, month)
return datetime(year, month, last_day, hour=23, minute=59, second=59)
def get_tomorrow(year=None, month=None, day=None):
if not year or not month or not day:
now = datetime.now()
if not year:
year = now.year
if not month:
month = now.month
if not day:
day = now.day
return datetime(year, month, day) + timedelta(days=1)
def is_same_day(date1, date2):
if date1 and date2:
return date1.year == date2.year and date1.month == date2.month and date1.day == date2.day
return False
def last_time_of_day(date):
return datetime(date.year, date.month, date.day, hour=23, minute=59, second=59)
def get_days(start, end):
'''
获取开始和结束之间的所有日期
'''
if start > end:
temp = start
start = end
end = temp
ret = []
while start.year != end.year or start.month != end.month or start.day != end.day:
ret.append(start)
start = start + timedelta(days=1)
ret.append(end)
return ret
def week_start_end(date=datetime.now()):
'''
返回date所在周的开始和结束时间
'''
week_day = date.weekday() # week_day从0到6,0为周一
start_day = date - timedelta(days=week_day)
end_day = date + timedelta(days=(6 - week_day))
start_day = datetime(year=start_day.year, month=start_day.month, day=start_day.day, hour=0, minute=0, second=0)
end_day = datetime(year=end_day.year, month=end_day.month, day=end_day.day, hour=23, minute=59, second=59)
return start_day, end_day
|
/**
* Created by Chen on 2016/3/7.
*/
describe('Test of sub structure search', function(){
var MB = TestMolBuilder;
var allMols = {};
var allMolNames = [
'benzene', 'PhEt', 'hexane',
'BnBr', 'iPrPh', 'diene',
'cisButene', 'transButene',
'cisDiene', 'transDiene',
'cisDiMeHexane', 'transDiMeHexane1', 'transDiMeHexane2',
'cisDiEtHexane', 'transDiEtHexane1', 'transDiEtHexane2',
'subgroup1',
'github112_qry', 'github112_tgt'
];
beforeAll(function(done){ // load all essential molecules
var allUrls = [];
allMolNames.forEach(function(name){
var url = 'searchTest/' + name + '.mol';
allUrls.push(url);
});
MB.loadExternalFiles(allUrls, function(mols){
mols.forEach(function(mol, index)
{
var name = allMolNames[index];
allMols[name] = mol;
});
//console.log(allMols);
done();
});
});
var testSearch = function(title, targetMolName, srcMolNames, options, expectResults)
{
it(title, function(){
var targetMol = allMols[targetMolName];
expect(targetMol).not.toBeNull();
for (var i = 0, l = srcMolNames.length; i < l; ++i)
{
var srcMol = allMols[srcMolNames[i]];
expect(srcMol).not.toBeNull();
try
{
//console.log(srcMolNames[i], srcMol.getClassName());
var result = !!srcMol.search(targetMol, options);
}
catch(e)
{
//console.log('fail', srcMolNames[i]);
throw e;
}
expect(result).toEqual(expectResults[i]);
}
});
};
testSearch('Search base on skeletal 1',
'benzene',
['BnBr', 'iPrPh', 'cisDiEtHexane', 'transDiEtHexane1', 'transDiEtHexane2'],
{'level': Kekule.StructureComparationLevel.SKELETAL},
[true, true, true, true, true]
);
testSearch('Search base on skeletal 2',
'hexane',
['BnBr', 'iPrPh', 'cisDiEtHexane', 'transDiEtHexane1', 'transDiEtHexane2'],
{'level': Kekule.StructureComparationLevel.SKELETAL},
[true, true, true, true, true]
);
testSearch('Search base on skeletal 3',
'PhEt',
['BnBr', 'iPrPh', 'cisDiEtHexane', 'transDiEtHexane1', 'transDiEtHexane2'],
{'level': Kekule.StructureComparationLevel.SKELETAL},
[true, true, true, true, true]
);
testSearch('Search base on constitution 1',
'benzene',
['BnBr', 'iPrPh', 'cisDiEtHexane', 'transDiEtHexane1', 'transDiEtHexane2'],
{'level': Kekule.StructureComparationLevel.CONSTITUTION},
[true, true, false, false, false]
);
testSearch('Search base on constitution 2',
'hexane',
['BnBr', 'iPrPh', 'cisDiEtHexane', 'transDiEtHexane1', 'transDiEtHexane2'],
{'level': Kekule.StructureComparationLevel.CONSTITUTION},
[false, false, true, true, true]
);
testSearch('Search base on constitution 3',
'PhEt',
['BnBr', 'iPrPh', 'cisDiEtHexane', 'transDiEtHexane1', 'transDiEtHexane2'],
{'level': Kekule.StructureComparationLevel.CONSTITUTION},
[false, true, false, false, false]
);
testSearch('Search base on constitution 4',
'transButene',
['transDiene', 'cisDiene'],
{'level': Kekule.StructureComparationLevel.CONSTITUTION},
[true, true]
);
testSearch('Search base on constitution 5',
'cisButene',
['transDiene', 'cisDiene'],
{'level': Kekule.StructureComparationLevel.CONSTITUTION},
[true, true]
);
testSearch('Search base on constitution 6',
'github112_qry',
['github112_tgt'],
{'level': Kekule.StructureComparationLevel.CONSTITUTION},
[true, true]
);
testSearch('Search base on configuration 1',
'cisDiMeHexane',
['cisDiEtHexane', 'transDiEtHexane1', 'transDiEtHexane2'],
{'level': Kekule.StructureComparationLevel.CONFIGURATION},
[true, false, false]
);
testSearch('Search base on configuration 2',
'transDiMeHexane1',
['cisDiEtHexane', 'transDiEtHexane1', 'transDiEtHexane2'],
{'level': Kekule.StructureComparationLevel.CONFIGURATION},
[false, true, false]
);
testSearch('Search base on configuration 3',
'transDiMeHexane2',
['cisDiEtHexane', 'transDiEtHexane1', 'transDiEtHexane2'],
{'level': Kekule.StructureComparationLevel.CONFIGURATION},
[false, false, true]
);
testSearch('Search base on configuration 4',
'transButene',
['transDiene', 'cisDiene'],
{'level': Kekule.StructureComparationLevel.CONFIGURATION},
[true, false]
);
testSearch('Search base on configuration 5',
'cisButene',
['transDiene', 'cisDiene'],
{'level': Kekule.StructureComparationLevel.CONFIGURATION},
[false, true]
);
testSearch('Search on molecule with subgroup-1',
'benzene',
['subgroup1'],
{'level': Kekule.StructureComparationLevel.CONFIGURATION},
[true]
);
testSearch('Search on molecule with subgroup-2',
'PhEt',
['subgroup1'],
{'level': Kekule.StructureComparationLevel.CONFIGURATION},
[true]
);
testSearch('Search on molecule with subgroup-3',
'iPrPh',
['subgroup1'],
{'level': Kekule.StructureComparationLevel.CONFIGURATION},
[true]
);
testSearch('Search on molecule with subgroup-3',
'transDiene',
['subgroup1'],
{'level': Kekule.StructureComparationLevel.CONFIGURATION},
[false]
);
});
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable
from paddle.fluid.framework import ParamBase, EagerParamBase
from paddle.jit import ProgramTranslator
from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode
class L1(fluid.Layer):
def __init__(self):
super(L1, self).__init__()
self._param_attr = fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1))
self.w1 = self.create_parameter(attr=self._param_attr,
shape=[2, 2],
dtype='float32',
is_bias=False)
self.w2 = self.create_parameter(attr=self._param_attr,
shape=[2, 2],
dtype='float32',
is_bias=False)
def forward(self):
return self.w1 + self.w2
class L2(fluid.Layer):
def __init__(self):
super(L2, self).__init__()
self.layer1 = L1()
self.layer2 = L1()
def forward(self):
return self.layer1() + self.layer2()
class L3(fluid.Layer):
def __init__(self):
super(L3, self).__init__()
self.layer1 = L2()
self.layer2 = L2()
def forward(self):
return self.layer1() + self.layer2()
class TestBaseLayer(unittest.TestCase):
def func_test_one_level(self):
with fluid.dygraph.guard():
l = L1()
ret = l()
expected_names = ['l1.w1', 'l1.w2']
idx = 0
for name, _ in l.named_parameters(prefix='l1'):
self.assertEqual(name, expected_names[idx])
idx += 1
self.assertTrue(np.allclose(ret.numpy(), 0.2 * np.ones([2, 2])))
def test_one_level(self):
with _test_eager_guard():
self.func_test_one_level()
self.func_test_one_level()
def func_test_three_level(self):
with fluid.dygraph.guard():
l = L3()
expected_names = [
'l3.layer1.layer1.w1',
'l3.layer1.layer1.w2',
'l3.layer1.layer2.w1',
'l3.layer1.layer2.w2',
'l3.layer2.layer1.w1',
'l3.layer2.layer1.w2',
'l3.layer2.layer2.w1',
'l3.layer2.layer2.w2',
]
idx = 0
for name, _ in l.named_parameters(prefix='l3'):
self.assertEqual(name, expected_names[idx])
idx += 1
ret = l()
self.assertTrue(np.allclose(ret.numpy(), 0.8 * np.ones([2, 2])))
def test_three_level(self):
with _test_eager_guard():
self.func_test_three_level()
self.func_test_three_level()
def func_test_add_parameter_with_error(self):
with fluid.dygraph.guard():
net = fluid.Layer()
param = net.create_parameter(shape=[1])
with self.assertRaises(TypeError):
net.add_parameter(10, param)
with self.assertRaises(KeyError):
net.add_parameter("param.name", param)
with self.assertRaises(KeyError):
net.add_parameter("", param)
with self.assertRaises(KeyError):
net.test_param = 10
net.add_parameter("test_param", param)
with self.assertRaises(TypeError):
net.add_parameter("no_param", 10)
load_param = net.create_parameter(shape=[1])
net._loaddict_holder[load_param.name] = load_param
net.add_parameter("load_param", load_param)
def test_add_parameter_with_error(self):
with _test_eager_guard():
self.func_test_add_parameter_with_error()
self.func_test_add_parameter_with_error()
class BufferLayer(fluid.Layer):
def __init__(self):
super(BufferLayer, self).__init__()
buffer_var = to_variable(np.zeros([2, 4]).astype('int32'))
self.register_buffer("layer_buffer", buffer_var)
def forward(self):
pass
class BufferNet(fluid.Layer):
def __init__(self):
super(BufferNet, self).__init__()
self.buffer_layer = BufferLayer()
self.w1 = self.create_parameter(shape=[2, 2],
dtype='float32',
is_bias=False)
buffer_var = to_variable(np.ones([2, 4]).astype('int32'))
self.register_buffer("net_buffer", buffer_var)
self.new_buffer = to_variable(np.ones([4, 2]).astype('int32'))
def forward(self):
pass
class TestBuffer(unittest.TestCase):
def func_test_buffers_and_named_buffers(self):
def names(named_buffers):
return [name for name, _ in named_buffers]
with fluid.dygraph.guard():
layer = BufferLayer()
net = BufferNet()
self.assertEqual(len(layer.buffers()), 1)
self.assertEqual(names(layer.named_buffers()), ['layer_buffer'])
self.assertEqual(len(net.buffers()), 3)
self.assertEqual(
names(net.named_buffers()),
['net_buffer', 'new_buffer', 'buffer_layer.layer_buffer'])
self.assertEqual(len(net.buffers(include_sublayers=False)), 2)
self.assertEqual(names(net.named_buffers(include_sublayers=False)),
['net_buffer', 'new_buffer'])
def test_buffers_and_named_buffers(self):
with _test_eager_guard():
self.func_test_buffers_and_named_buffers()
self.func_test_buffers_and_named_buffers()
def func_test_register_buffer_with_error(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var = to_variable(np.zeros([1]))
with self.assertRaisesRegexp(TypeError,
"name of buffer should be a string"):
net.register_buffer(12, var)
with self.assertRaisesRegexp(TypeError,
"buffer should be a Paddle.Tensor"):
if in_dygraph_mode():
net.register_buffer("buffer_name",
EagerParamBase([2, 2], 'float32'))
else:
net.register_buffer("buffer_name",
ParamBase([2, 2], 'float32'))
with self.assertRaisesRegexp(KeyError,
"name of buffer can not contain"):
net.register_buffer("buffer.name", var)
with self.assertRaisesRegexp(KeyError,
"name of buffer can not be empty"):
net.register_buffer("", var)
net.attr_name = 10
with self.assertRaisesRegexp(KeyError, "already exists"):
net.register_buffer("attr_name", var)
del net.attr_name
if in_dygraph_mode():
net.attr_name = EagerParamBase([2, 2], 'float32')
else:
net.attr_name = ParamBase([2, 2], 'float32')
with self.assertRaisesRegexp(KeyError, "already exists"):
net.register_buffer("attr_name", var)
def test_register_buffer_with_error(self):
with _test_eager_guard():
self.func_test_register_buffer_with_error()
self.func_test_register_buffer_with_error()
def func_test_register_buffer_same_name(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
var2 = to_variable(np.zeros([2]))
var3 = to_variable(np.zeros([3]))
net.register_buffer("buffer_name", var1)
self.assert_var_base_equal(net.buffer_name, var1)
net.register_buffer("buffer_name", var2)
self.assert_var_base_equal(net.buffer_name, var2)
net.register_buffer("buffer_name", var3)
self.assert_var_base_equal(net.buffer_name, var3)
def test_register_buffer_same_name(self):
with _test_eager_guard():
self.func_test_register_buffer_same_name()
self.func_test_register_buffer_same_name()
def func_test_buffer_not_persistable(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
net.register_buffer("buffer_name", var1, persistable=False)
self.assertEqual(len(net.buffers()), 1)
self.assertEqual(len(net.state_dict()), 0)
def test_buffer_not_persistable(self):
with _test_eager_guard():
self.func_test_buffer_not_persistable()
self.func_test_buffer_not_persistable()
def func_test_buffer_not_persistable_del(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
net.register_buffer("buffer_name", var1, persistable=False)
del net.buffer_name
self.assertEqual(len(net.buffers()), 0)
def test_buffer_not_persistable_del(self):
with _test_eager_guard():
self.func_test_buffer_not_persistable_del()
self.func_test_buffer_not_persistable_del()
def func_test_buffer_not_persistable_overwrite(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
var2 = to_variable(np.zeros([2]))
net.register_buffer("buffer_name", var1, persistable=False)
net.register_buffer("buffer_name", var2)
# Allow to overwrite a non-persistable buffer with a persistable var.
self.assertEqual(len(net.buffers()), 1)
self.assertEqual(len(net.state_dict()), 1)
net.register_buffer("buffer_name", var1, persistable=False)
self.assertEqual(len(net.buffers()), 1)
self.assertEqual(len(net.state_dict()), 0)
def test_buffer_not_persistable_overwrite(self):
with _test_eager_guard():
self.func_test_buffer_not_persistable_overwrite()
self.func_test_buffer_not_persistable_overwrite()
def func_test_buffer_not_persistable_assign(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
net.register_buffer("buffer_name", var1, persistable=False)
# Assigning Nones will remove the buffer, but allow to re-assign
# to remark it as buffer.
net.buffer_name = None
self.assertEqual(len(net.buffers()), 0)
self.assertEqual(len(net.state_dict()), 0)
net.buffer_name = var1
self.assertEqual(len(net.buffers()), 1)
self.assertEqual(len(net.state_dict()), 0)
# Re-assign a ParamBase will remove the buffer.
if in_dygraph_mode():
net.buffer_name = EagerParamBase([2, 2], 'float32')
else:
net.buffer_name = ParamBase([2, 2], 'float32')
self.assertEqual(len(net.buffers()), 0)
self.assertEqual(len(net.state_dict()), 1)
def test_buffer_not_persistable_assign(self):
with _test_eager_guard():
self.func_test_buffer_not_persistable_assign()
self.func_test_buffer_not_persistable_assign()
def func_test_buffer_not_persistable_load(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
net.register_buffer("buffer_name", var1, persistable=False)
net.load_dict({})
def test_buffer_not_persistable_load(self):
with _test_eager_guard():
self.func_test_buffer_not_persistable_load()
self.func_test_buffer_not_persistable_load()
def func_test_buffer_state_dict(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([2, 3]))
var2 = to_variable(np.zeros([3, 2]))
net.register_buffer("buffer_var1", var1)
net.register_buffer("buffer_var2", var2, persistable=False)
self.assertEqual(len(net.state_dict()), 1)
self.assertEqual([name for name, _ in net.state_dict().items()],
["buffer_var1"])
# load state_dict
net_load = fluid.Layer()
var = to_variable(np.ones([2, 3]))
net_load.register_buffer("buffer_var1", var)
net_load.load_dict(net.state_dict())
self.assert_var_base_equal(net_load.buffer_var1, var1)
def test_buffer_state_dict(self):
with _test_eager_guard():
self.func_test_buffer_state_dict()
self.func_test_buffer_state_dict()
def assert_var_base_equal(self, var1, var2):
self.assertTrue(np.array_equal(var1.numpy(), var2.numpy()))
class BufferNetWithModification(paddle.nn.Layer):
def __init__(self, shape):
super(BufferNetWithModification, self).__init__()
self.buffer1 = paddle.zeros(shape, 'int32')
self.buffer2 = paddle.zeros(shape, 'int32')
@paddle.jit.to_static
def forward(self, x):
self.buffer1 += x
self.buffer2 = self.buffer1 + x
out = self.buffer1 + self.buffer2
return out
class TestModifiedBuffer(unittest.TestCase):
def funcsetUp(self):
paddle.disable_static()
self.prog_trans = ProgramTranslator()
self.shape = [10, 16]
def _run(self, to_static=False):
self.prog_trans.enable(to_static)
x = paddle.ones([1], 'int32')
net = BufferNetWithModification(self.shape)
out = net(x)
return out, net.buffer1, net.buffer2
def func_test_modified(self):
self.funcsetUp()
dy_outs = self._run(False)
st_outs = self._run(True)
for i in range(len(dy_outs)):
self.assertTrue(
np.array_equal(dy_outs[i].numpy(), st_outs[i].numpy()))
def test_modified(self):
with _test_eager_guard():
self.func_test_modified()
self.func_test_modified()
class TestLayerTo(unittest.TestCase):
def funcsetUp(self):
paddle.disable_static()
self.linear = paddle.nn.Linear(2, 2)
self.new_grad = np.random.random([2, 2])
self.linear.weight._set_grad_ivar(paddle.to_tensor(self.new_grad))
buffer = paddle.to_tensor([0.0], dtype='float32')
self.linear.register_buffer("buf_name", buffer, persistable=True)
sublayer = paddle.nn.Conv1D(3, 2, 3)
self.linear.add_sublayer("1", sublayer)
def func_test_to_api(self):
self.linear.to(dtype='double')
self.assertEqual(self.linear.weight.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertEqual(self.linear.buf_name.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertTrue(
np.allclose(self.linear.weight.grad.numpy(), self.new_grad))
self.assertEqual(self.linear.weight._grad_ivar().dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.linear.to()
self.assertEqual(self.linear.weight.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertEqual(self.linear.buf_name.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertTrue(
np.allclose(self.linear.weight.grad.numpy(), self.new_grad))
self.assertEqual(self.linear.weight._grad_ivar().dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
for p in self.linear.parameters():
if in_dygraph_mode():
self.assertTrue(
isinstance(p, paddle.fluid.framework.EagerParamBase))
else:
self.assertTrue(isinstance(p, paddle.fluid.framework.ParamBase))
if paddle.fluid.is_compiled_with_cuda():
self.linear.to(device=paddle.CUDAPlace(0))
self.assertTrue(self.linear.weight.place.is_gpu_place())
self.assertEqual(self.linear.weight.place.gpu_device_id(), 0)
self.assertTrue(self.linear.buf_name.place.is_gpu_place())
self.assertEqual(self.linear.buf_name.place.gpu_device_id(), 0)
self.assertTrue(
self.linear.weight._grad_ivar().place.is_gpu_place())
self.assertEqual(
self.linear.weight._grad_ivar().place.gpu_device_id(), 0)
self.linear.to(device='gpu:0')
self.assertTrue(self.linear.weight.place.is_gpu_place())
self.assertEqual(self.linear.weight.place.gpu_device_id(), 0)
self.assertTrue(self.linear.buf_name.place.is_gpu_place())
self.assertEqual(self.linear.buf_name.place.gpu_device_id(), 0)
self.assertTrue(
self.linear.weight._grad_ivar().place.is_gpu_place())
self.assertEqual(
self.linear.weight._grad_ivar().place.gpu_device_id(), 0)
for p in self.linear.parameters():
if in_dygraph_mode():
self.assertTrue(
isinstance(p, paddle.fluid.framework.EagerParamBase))
else:
self.assertTrue(
isinstance(p, paddle.fluid.framework.ParamBase))
self.linear.to(device=paddle.CPUPlace())
self.assertTrue(self.linear.weight.place.is_cpu_place())
self.assertTrue(self.linear.buf_name.place.is_cpu_place())
self.assertTrue(self.linear.weight._grad_ivar().place.is_cpu_place())
self.linear.to(device='cpu')
self.assertTrue(self.linear.weight.place.is_cpu_place())
self.assertTrue(self.linear.buf_name.place.is_cpu_place())
self.assertTrue(self.linear.weight._grad_ivar().place.is_cpu_place())
self.assertRaises(ValueError, self.linear.to, device=1)
self.assertRaises(AssertionError, self.linear.to, blocking=1)
def func_test_to_api_paddle_dtype(self):
self.linear.to(dtype=paddle.float64)
self.assertEqual(self.linear.weight.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertEqual(self.linear.buf_name.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertTrue(
np.allclose(self.linear.weight.grad.numpy(), self.new_grad))
self.assertEqual(self.linear.weight._grad_ivar().dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.linear.to()
self.assertEqual(self.linear.weight.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertEqual(self.linear.buf_name.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertTrue(
np.allclose(self.linear.weight.grad.numpy(), self.new_grad))
self.assertEqual(self.linear.weight._grad_ivar().dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
for p in self.linear.parameters():
if in_dygraph_mode():
self.assertTrue(
isinstance(p, paddle.fluid.framework.EagerParamBase))
else:
self.assertTrue(isinstance(p, paddle.fluid.framework.ParamBase))
def func_test_to_api_numpy_dtype(self):
self.linear.to(dtype=np.float64)
self.assertEqual(self.linear.weight.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertEqual(self.linear.buf_name.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertTrue(
np.allclose(self.linear.weight.grad.numpy(), self.new_grad))
self.assertEqual(self.linear.weight._grad_ivar().dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.linear.to()
self.assertEqual(self.linear.weight.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertEqual(self.linear.buf_name.dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
self.assertTrue(
np.allclose(self.linear.weight.grad.numpy(), self.new_grad))
self.assertEqual(self.linear.weight._grad_ivar().dtype,
paddle.fluid.core.VarDesc.VarType.FP64)
for p in self.linear.parameters():
if in_dygraph_mode():
self.assertTrue(
isinstance(p, paddle.fluid.framework.EagerParamBase))
else:
self.assertTrue(isinstance(p, paddle.fluid.framework.ParamBase))
def test_main(self):
with _test_eager_guard():
self.funcsetUp()
self.func_test_to_api()
self.func_test_to_api_paddle_dtype()
self.func_test_to_api_numpy_dtype()
self.funcsetUp()
self.func_test_to_api()
self.func_test_to_api_paddle_dtype()
self.func_test_to_api_numpy_dtype()
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
|
from typing import List, Tuple, Optional, Union
import wx
import wx.adv
import wx.lib.masked.numctrl
import wx.svg
from asn1editor.interfaces.BitstringInterface import BitstringInterface
from asn1editor.interfaces.OptionalInterface import OptionalInterface
from asn1editor.interfaces.ValueInterface import ValueInterface
from asn1editor.view.AbstractView import AbstractView, ContainerView, ListView, ChoiceView
from asn1editor.view.AbstractViewFactory import AbstractViewFactory, TypeInfo, Styles
from asn1editor.wxPython import Resources
from asn1editor.wxPython.Labels import Labels
from asn1editor.wxPython.WxPythonComplexViews import WxPythonContainerView, WxPythonListView, WxPythonChoiceView
from asn1editor.wxPython.WxPythonDateTimeViews import WxPythonDateView, WxPythonTimeView, WxPythonDateTimeView
from asn1editor.wxPython.WxPythonViews import WxPythonValueView, WxPythonBooleanView, \
WxPythonBitstringView, WxPythonHexStringView, WxPythonValueSelectionView, ControlList
class WxPythonViewFactory(AbstractViewFactory):
def __init__(self, window: wx.ScrolledWindow, labels: Labels):
self._window = window
self._labels = labels
def get_enumerated_view(self, type_info: TypeInfo, choices: List[str]) -> Tuple[AbstractView, ValueInterface, OptionalInterface]:
controls = self._get_controls(type_info, ':', 'enumerated')
controls['value'] = wx.ComboBox(self._window, choices=choices, style=wx.CB_READONLY)
self._apply_style(controls)
view = WxPythonValueSelectionView(type_info, controls)
return view, view, view if type_info.optional else None
def get_text_view(self, type_info: TypeInfo, text: str) -> AbstractView:
controls = self._get_controls(type_info)
controls['value'] = wx.StaticText(self._window, wx.ID_ANY, text)
self._apply_style(controls)
view = WxPythonValueView(type_info, controls)
return view
def get_container_view(self, type_info: TypeInfo) -> Tuple[ContainerView, OptionalInterface]:
controls = self._get_controls(type_info, icon=WxPythonContainerView.icon)
view = WxPythonContainerView(type_info, controls, self._window)
return view, view if type_info.optional else None
def get_list_view(self, type_info: TypeInfo, minimum: int, maximum: int) -> Tuple[ListView, ValueInterface, OptionalInterface]:
controls = self._get_controls(type_info, icon=WxPythonListView.icon)
num_elements = wx.SpinCtrl(self._window)
if minimum is not None:
num_elements.SetMin(minimum)
else:
minimum = 0
if maximum is not None:
num_elements.SetMax(maximum)
else:
maximum = 'infinite'
num_elements.SetToolTip(f"Minimum elements: {minimum}, maximum elements: {maximum}")
controls['value'] = num_elements
controls['num_elements'] = wx.StaticText(self._window, wx.ID_ANY, "Elements:")
self._apply_style(controls)
view = WxPythonListView(type_info, controls, self._window)
return view, view, view if type_info.optional else None
def get_number_view(self, type_info: TypeInfo, minimum: Optional[Union[int, float]],
maximum: Optional[Union[int, float]], float_: bool) -> Tuple[AbstractView, ValueInterface, OptionalInterface]:
controls = self._get_controls(type_info, ':', 'float' if float_ else 'integer')
edit = wx.lib.masked.numctrl.NumCtrl(self._window)
tool_tip = []
if isinstance(minimum, int) or isinstance(minimum, float):
edit.SetAllowNegative(minimum < 0)
edit.SetMin(minimum)
tool_tip.append(f'Minimum: {minimum}')
if isinstance(maximum, int) or isinstance(maximum, float):
edit.SetMax(maximum)
tool_tip.append(f'Maximum: {maximum}')
if float_:
edit.SetFractionWidth(6)
if len(tool_tip):
edit.SetToolTip(', '.join(tool_tip))
controls['value'] = edit
self._apply_style(controls)
view = WxPythonValueView(type_info, controls)
return view, view, view if type_info.optional else None
def get_boolean_view(self, type_info: TypeInfo) -> Tuple[AbstractView, ValueInterface, OptionalInterface]:
controls = self._get_controls(type_info, ':', 'bool')
controls['value'] = wx.CheckBox(self._window)
self._apply_style(controls)
view = WxPythonBooleanView(type_info, controls)
return view, view, view if type_info.optional else None
def get_string_view(self, type_info: TypeInfo, minimum: Optional[int], maximum: Optional[int]):
controls = self._get_controls(type_info, ':', 'string')
edit = wx.TextCtrl(self._window)
if maximum:
edit.SetMaxLength(maximum)
else:
maximum = 'infinite'
if minimum is None:
minimum = '0'
edit.SetToolTip(f"Minimum characters: {minimum}, maximum characters: {maximum}")
controls['value'] = edit
self._apply_style(controls)
view = WxPythonValueView(type_info, controls)
return view, view, view if type_info.optional else None
def get_hex_string_view(self, type_info: TypeInfo, minimum: Optional[int], maximum: Optional[int]):
controls = self._get_controls(type_info, ':', 'string')
controls['selector'] = wx.CheckBox(self._window, label='Hex')
controls['selector'].SetValue(True)
controls['value'] = wx.TextCtrl(self._window)
self._apply_style(controls)
view = WxPythonHexStringView(type_info, controls, minimum, maximum)
return view, view, view if type_info.optional else None
def get_choice_view(self, type_info: TypeInfo, choices: List[str]) -> Tuple[ChoiceView, ValueInterface, OptionalInterface]:
controls = self._get_controls(type_info, icon=WxPythonChoiceView.icon)
controls['value'] = wx.ComboBox(self._window, choices=choices, style=wx.CB_READONLY)
self._apply_style(controls)
view = WxPythonChoiceView(type_info, controls)
return view, view, view if type_info.optional else None
def get_bitstring_view(self, type_info: TypeInfo, number_of_bits: int, named_bits: List[Tuple[str, int]]) -> \
Tuple[AbstractView, BitstringInterface, OptionalInterface]:
controls = self._get_controls(type_info, icon='bitstring')
checkboxes: List[Tuple[int, wx.CheckBox]] = []
check_all = wx.Button(self._window, label=str('all_boxes'))
style = type_info.style
if named_bits:
for name, bit in named_bits:
bit_checkbox = wx.CheckBox(self._window, label=f"{bit}: {name}")
if style & Styles.READ_ONLY:
bit_checkbox.Enable(False)
checkboxes.append((bit, bit_checkbox))
else:
for bit in range(number_of_bits):
bit_checkbox = wx.CheckBox(self._window, label=str(bit))
if style & Styles.READ_ONLY:
bit_checkbox.Enable(False)
checkboxes.append((bit, bit_checkbox))
controls['checkboxes'] = checkboxes
controls['check_all'] = check_all
view = WxPythonBitstringView(type_info, controls, self._window)
return view, view, view if type_info.optional else None
def get_date_view(self, type_info: TypeInfo) -> Tuple[AbstractView, ValueInterface, OptionalInterface]:
controls = self._get_controls(type_info, ':', 'date')
controls['value'] = wx.adv.DatePickerCtrl(self._window)
self._apply_style(controls)
view = WxPythonDateView(type_info, controls)
return view, view, view if type_info.optional else None
def get_time_view(self, type_info: TypeInfo) -> Tuple[AbstractView, ValueInterface, OptionalInterface]:
controls = self._get_controls(type_info, ':', 'date')
controls['value'] = wx.adv.TimePickerCtrl(self._window)
self._apply_style(controls)
view = WxPythonTimeView(type_info, controls)
return view, view, view if type_info.optional else None
def get_datetime_view(self, type_info: TypeInfo) -> Tuple[AbstractView, ValueInterface, OptionalInterface]:
controls = self._get_controls(type_info, ':', 'date')
controls['value'] = wx.adv.DatePickerCtrl(self._window)
controls['time'] = wx.adv.TimePickerCtrl(self._window)
self._apply_style(controls)
view = WxPythonDateTimeView(type_info, controls)
return view, view, view if type_info.optional else None
def _get_controls(self, type_info: TypeInfo, suffix: str = '', icon: str = None) -> ControlList:
controls = {}
label = self._labels.get_label(type_info, suffix)
tooltip = self._labels.get_tooltip(type_info)
if type_info.optional:
control = wx.CheckBox(self._window, wx.ID_ANY, label)
controls['optional'] = control
else:
control = wx.StaticText(self._window, wx.ID_ANY, label)
controls['name'] = control
control.SetToolTip(tooltip)
if icon is not None:
controls['icon'] = self._get_svg(icon, tooltip)
if type_info.style is not None:
controls['style'] = type_info.style
else:
controls['style'] = 0
return controls
def _get_svg(self, bitmap_name: str, icon_tooltip: str = None) -> wx.StaticBitmap:
bitmap = Resources.image_list.get_bitmap(bitmap_name)
if bitmap is None:
bitmap = Resources.get_bitmap_from_svg(bitmap_name)
static_bitmap = wx.StaticBitmap(self._window, bitmap=bitmap)
static_bitmap.SetToolTip(icon_tooltip)
return static_bitmap
@staticmethod
def _apply_style(controls: ControlList):
if controls.get('style') & Styles.READ_ONLY and 'value' in controls:
controls['value'].Enable(False)
|
'''
img tools
包含图片处理常见工具.
function:
1. get_img(getpath, gray=False, scale_percent=100) 根据路径返回img/灰度选择/缩放
2. save_img(savepath, img) 保存图片
3. plot_line_chart(y1, y2, y3) 画折线图 暂不完善
4. cut_pic(img,pattern=0, up = 0, down = 0, left = 0, right = 0) 切割图片/比例切割/像素切割
5. plot_3d_line(x, z, y, over, x_max, y_max, z_max) 三维空间画线
6. plot_3d_dot(location, over, x_max, y_max, z_max) 三维空间中画点
'''
import sys
sys.path.append(r'./') # 为了能找到自写函数
import cv2
import matplotlib.pyplot as plt
import numpy as np
def get_img(getpath, gray=False, scale_percent=100):
'''
根据路径返回img\n
getpath:图片路径\n
gray:是否显示为灰度图;default=False\n
scale_percent:放缩比例;default=100
'''
img = cv2.imread(getpath)
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
if scale_percent != 100:
# percent of original size
dim = (width, height)
# resize image
img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
if gray:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图 解决cv读取灰度图成为三通道的问题
img = img.reshape(width, height, 1)
return img
def save_img(savepath, img):
'''
保存图片
'''
cv2.imwrite(savepath, img)
def plot_line_chart(y1, y2, y3):
'''
画折线图 暂不完善
'''
plt.figure(figsize=(20,2))
plt.title('太阳风速度预测') # 折线图标题
plt.rcParams['font.sans-serif'] = ['SimHei'] # 显示汉字
plt.xlabel('time') # x轴标题
plt.ylabel('太阳风速 km/s') # y轴标题
plt.plot(y1, color='#800080', label='true', linewidth=1) # 绘制折线图,添加数据点,设置点的大小
# plt.plot(y2, color='#00a8e1', label='p1', linewidth=1)
plt.plot(y3, color='#99cc00', label='p2', linewidth=1)
plt.legend(['True', 'P2']) # 设置折线名称
# plt.legend(['True', 'P1', 'P2']) # 设置折线名称
plt.show() # 显示折线图
def cut_pic(img,pattern=0, up = 0, down = 0, left = 0, right = 0):
'''
切割图片\n
(img, 切割模式(0:比例,1:像素),图片上部分, 图片下, 左, 右)\n
eg.\n
>>>(img, 1, 50, 50, 50, 50)\n
上下左右各切50像素\n
>>>(img, 0, 0.2, 0.2, 0.2, 0.2)\n
上下左右各切20%\n
'''
h, w = len(img), len(img[0])
if pattern:
img = img[up:h - down, left:w-right]
else:
img = img[int(h*up):int(h*(1-down)), int(w*left):int(w*(1-right))]
return img
def plot_3d_line(x, z, y, over, x_max, y_max, z_max):
from matplotlib.font_manager import FontProperties # 把这两行放在主函数能提高运行效率
font_set = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=12)
# if x > x_max:x_max = x
# if y > y_max:y_max = y
# if z > z_max:z_max = z
'''
画3d图像
'''
plt.ion()
ax = plt.axes(projection='3d')
# 坐标轴建立,由于无法直接建立右手坐标系,根据调试设置右手坐标系,此部分有一点凌乱。
ax.set_xlim(0, x_max)
ax.set_zlim(0, y_max)
ax.set_ylim(z_max, 0)
ax.plot3D(x,z,y,'red') #绘制空间曲线
plt.title('导弹发射轨迹', fontproperties=font_set)
plt.xlabel('x水平距离(米)', fontproperties=font_set)
plt.ylabel('z水平距离(米)', fontproperties=font_set)
# plt.zlabel('导弹运行高度(米)', fontproperties=font_set)
plt.show()
if not over:
plt.pause(0.5)
plt.clf()
else:
plt.pause(1000)
def plot_3d_dot(location, over, x_max, y_max, z_max):
from matplotlib.font_manager import FontProperties
font_set = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=12)
# if x > x_max:x_max = x
# if y > y_max:y_max = y
# if z > z_max:z_max = z
'''
画3d图像
'''
plt.ion()
ax = plt.axes(projection='3d')
# 坐标轴建立,由于无法直接建立右手坐标系,根据调试设置右手坐标系,此部分有一点凌乱。
ax.set_xlim(0, x_max)
ax.set_zlim(0, y_max)
ax.set_ylim(z_max, 0)
ax.plot(location[0],location[2],location[1],'red', marker='o') #绘制空间曲线
plt.title('导弹发射轨迹', fontproperties=font_set)
plt.xlabel('x水平距离(米)', fontproperties=font_set)
plt.ylabel('z水平距离(米)', fontproperties=font_set)
# plt.zlabel('导弹运行高度(米)', fontproperties=font_set)
plt.show()
if not over:
plt.pause(0.5)
plt.clf()
else:
plt.pause(1000)
if __name__ == '__main__':
print("Welcome to MyTools!")
from utils.txtTool import *
# l1 = txtReadNumArray("./example/data/true.txt")
# l2 = txtReadNumArray("./example/data/p1.txt")
# l3 = txtReadNumArray("./example/data/p2.txt")
# plot_line_chart(l1[120:8000],l2[0:8000],l3[0:8000])
# imgpath = "./example/test.jpg"
# img = get_img(imgpath, gray=True, scale_percent=25)
# print(img.shape)
# img = img[50:462, 50:462]
# img = cut_pic(img, 1, 50, 50, 50, 50)
# img = cut_pic(img, 1, 14, 14, 14, 14)
# img = 255.-img
# from mathTool import *
# img = standardization(img)
# print(img[0:5,0:5])
# print(img[40:50,40:50])
# from circle import *
# box = box_circle(100, (49,49), 50, 0., 1.)
# img = img*box
# print(img[0:5,0:5])
# print(img[40:50,40:50])
# cv2.imshow("img", img)
# cv2.waitKey (0)
# cv2.destroyAllWindows()
plot_3d_line([1, 2, 3], [1, 2, 3], [1, 2, 3], True, 10, 10, 10)
plot_3d_dot([1, 5, 10], True, 10, 10, 10)
|
class InvalidKeyException(Exception):
def __init__(self):
Exception.__init__(self, "The Caesarean key must be in the range: [1, 25].")
class InvalidModeException(Exception):
def __init__(self):
Exception.__init__(self, "The mode selected must be:"
"\n1 - Encryption"
"\n2 - Decryption")
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import lsp_admin_group
class lsp_sec_path_config_admin_groups(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-lsp-name-extensive/output/lsp/show-mpls-lsp-extensive-info/show-mpls-lsp-sec-path-info/sec-path/lsp-sec-path-config-admin-groups. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_admin_group',)
_yang_name = 'lsp-sec-path-config-admin-groups'
_rest_name = 'lsp-sec-path-config-admin-groups'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__lsp_admin_group = YANGDynClass(base=lsp_admin_group.lsp_admin_group, is_container='container', presence=False, yang_name="lsp-admin-group", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-lsp-name-extensive', u'output', u'lsp', u'show-mpls-lsp-extensive-info', u'show-mpls-lsp-sec-path-info', u'sec-path', u'lsp-sec-path-config-admin-groups']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-lsp-name-extensive', u'output', u'lsp', u'sec-path', u'lsp-sec-path-config-admin-groups']
def _get_lsp_admin_group(self):
"""
Getter method for lsp_admin_group, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group (container)
"""
return self.__lsp_admin_group
def _set_lsp_admin_group(self, v, load=False):
"""
Setter method for lsp_admin_group, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_admin_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_admin_group() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=lsp_admin_group.lsp_admin_group, is_container='container', presence=False, yang_name="lsp-admin-group", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_admin_group must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=lsp_admin_group.lsp_admin_group, is_container='container', presence=False, yang_name="lsp-admin-group", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__lsp_admin_group = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_admin_group(self):
self.__lsp_admin_group = YANGDynClass(base=lsp_admin_group.lsp_admin_group, is_container='container', presence=False, yang_name="lsp-admin-group", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
lsp_admin_group = __builtin__.property(_get_lsp_admin_group, _set_lsp_admin_group)
_pyangbind_elements = {'lsp_admin_group': lsp_admin_group, }
|
(function(window){var svgSprite='<svg><symbol id="icon-xinxihuan002" viewBox="0 0 1024 1024"><path d="M883.904 199.36c-49.28-49.152-108.352-73.664-177.536-73.664-38.72 0-74.816 8.192-108.096 24.512C564.352 166.592 535.872 178.496 512 206.656 488.896 178.432 460.416 166.592 426.56 150.208c-33.92-16.384-70.144-24.64-108.992-24.64-69.12 0-128.32 24.64-177.408 73.728C91.008 248.512 66.432 307.648 66.432 376.704c0 38.656 8.064 75.008 24.576 108.928C107.328 519.36 129.728 548.224 158.08 572.096l204.352 170.944 149.632 155.392 154.048-157.44 221.248-187.712c46.72-49.28 70.208-107.904 70.208-176.512C957.504 307.712 932.8 248.512 883.904 199.36z" ></path></symbol><symbol id="icon-changyongdianhua" viewBox="0 0 1024 1024"><path d="M686.22709 925.180344c-55.872518-8.66331-119.837342-41.290405-184.974875-94.353948-60.350506-49.1627-120.426767-114.455775-173.732833-188.82046-52.340065-73.014944-94.687546-149.800771-122.462123-222.056422-29.132504-75.78708-40.404222-142.470829-32.593326-192.843076 4.790096-30.895661 31.215956-64.334237 72.500221-91.743494 34.780131-23.089881 74.985832-37.528732 95.602382-34.332946 16.683984 2.586918 44.715411 42.552142 73.155137 104.298438 26.113751 56.698327 42.265616 109.418039 40.427758 121.270994-2.940982 18.972097-13.419637 31.639604-24.512277 45.050032-11.394515 13.775748-23.175839 28.021194-26.485211 49.366338-3.62762 23.39892 28.808116 78.577636 60.371996 122.416074 29.958312 41.606607 72.677253 91.3925 94.475721 94.77248 18.571984 2.879584 37.016055-3.561106 54.851259-9.788947 18.435884-6.437619 37.500079-13.094226 57.311287-10.022261 16.458857 2.552126 60.839647 36.994566 107.927081 83.757612 54.56064 54.185087 77.435627 89.308025 76.251661 96.940866-5.647627 36.424584-29.277814 71.609944-64.832588 96.53359C756.178255 918.991388 717.568912 930.040025 686.22709 925.180344z" ></path></symbol><symbol id="icon-sousuo" viewBox="0 0 1024 1024"><path d="M441.202561 155.004283c38.164205 0 75.16798 7.467065 109.983927 22.192442 33.646308 14.231119 63.87068 34.611286 89.834005 60.57461 25.963325 25.963325 46.343491 56.187697 60.57461 89.834005 14.7264 34.815947 22.192442 71.819722 22.192442 109.983927 0 38.164205-7.467065 75.169003-22.192442 109.98495-14.231119 33.645284-34.611286 63.869656-60.57461 89.834005-25.963325 25.963325-56.187697 46.343491-89.834005 60.57461-34.815947 14.725376-71.819722 22.192442-109.983927 22.192442s-75.16798-7.467065-109.983927-22.192442c-33.645284-14.231119-63.869656-34.611286-89.834005-60.57461-25.963325-25.963325-46.343491-56.187697-60.57461-89.834005-14.7264-34.815947-22.192442-71.819722-22.192442-109.98495 0-38.164205 7.466042-75.16798 22.192442-109.983927 14.231119-33.646308 34.611286-63.87068 60.57461-89.834005s56.187697-46.344514 89.834005-60.57461C366.034581 162.471348 403.038356 155.004283 441.202561 155.004283M441.202561 119.6818c-175.574784 0-317.90849 142.332682-317.90849 317.90849 0 175.576831 142.332682 317.90849 317.90849 317.90849S759.111051 613.167121 759.111051 437.589266C759.111051 262.014482 616.777345 119.6818 441.202561 119.6818L441.202561 119.6818z" ></path><path d="M718.874651 706.431248c1.669012 0 4.106528 0.448208 6.244214 2.585895l137.374764 137.374764c3.443425 3.443425 3.443425 9.045003 0 12.488429-2.137687 2.137687-4.576225 2.585895-6.245238 2.585895s-4.107551-0.448208-6.244214-2.585895L712.629413 721.504548c-2.137687-2.137687-2.585895-4.574179-2.585895-6.243191 0-1.667989 0.448208-4.105504 2.587941-6.244214C714.768123 706.880479 717.206662 706.431248 718.874651 706.431248M718.874651 671.108764c-11.299347 0-22.599717 4.311189-31.221072 12.932544-17.244756 17.242709-17.244756 45.199435 0 62.442144l137.373741 137.374764c8.621355 8.621355 19.921725 12.932544 31.222095 12.932544 11.30037 0 22.600741-4.311189 31.222095-12.932544 17.242709-17.242709 17.242709-45.199435 0-62.442144L750.095723 684.041308C741.474368 675.41893 730.175021 671.108764 718.874651 671.108764L718.874651 671.108764z" ></path></symbol><symbol id="icon-caidan" viewBox="0 0 1024 1024"><path d="M377.724 478.795h415.79v66.41h-415.79v-66.41z" ></path><path d="M377.724 266.021h415.79v66.41h-415.79v-66.41z" ></path><path d="M230.486 247.525h103.402v103.402h-103.402v-103.402z" ></path><path d="M230.486 460.3h103.402v103.402h-103.402v-103.402z" ></path><path d="M230.486 673.073h103.402v103.402h-103.402v-103.402z" ></path><path d="M377.724 691.569h415.79v66.41h-415.79v-66.41z" ></path></symbol><symbol id="icon-comment" viewBox="0 0 1024 1024"><path d="M772.892588 181.380547 251.50102 181.380547C179.512603 181.380547 121.150898 239.742251 121.150898 311.730669L121.150898 702.781036C121.150898 774.769453 179.512603 833.131159 251.50102 833.131159L381.851142 833.131159 510.255995 963.481281 642.551386 833.131159 772.892588 833.131159C844.889034 833.131159 903.24271 774.769453 903.24271 702.781036L903.24271 311.73067C903.24271 239.743143 844.889925 181.380547 772.892588 181.380547L772.892588 181.380547ZM512.192343 442.080792C548.186106 442.080792 577.367847 471.261644 577.367847 507.255407 577.367847 543.249169 548.186997 572.430914 512.192343 572.430914 476.197683 572.430914 447.017725 543.250062 447.017725 507.255407 447.017725 471.262536 476.198581 442.080792 512.192343 442.080792L512.192343 442.080792ZM316.67653 572.430914C280.682767 572.430914 251.501911 543.250062 251.501911 507.255407 251.501911 471.261644 280.682767 442.080792 316.67653 442.080792 352.670292 442.080792 381.851142 471.261644 381.851142 507.255407 381.851142 543.250062 352.670292 572.430914 316.67653 572.430914L316.67653 572.430914ZM707.726896 572.430914C671.733134 572.430914 642.552278 543.250062 642.552278 507.255407 642.552278 471.261644 671.733134 442.080792 707.726896 442.080792 743.720659 442.080792 772.901509 471.261644 772.901509 507.255407 772.901509 543.250062 743.720659 572.430914 707.726896 572.430914L707.726896 572.430914Z" ></path></symbol><symbol id="icon-cplay1" viewBox="0 0 1024 1024"><path d="M512 1024C229.2 1024 0 794.8 0 512S229.2 0 512 0s512 229.2 512 512S794.8 1024 512 1024zM512 32C245.9 32 32 245.9 32 512s213.9 480 480 480c266.2 0 480-213.9 480-480C992 245.9 778.1 32 512 32zM768 512 384 736 384 288 768 512z" ></path></symbol><symbol id="icon-github" viewBox="0 0 1024 1024"><path d="M530.01791-7.557299C672.947218-4.362634 790.76646 44.643525 885.072968 139.46118 979.315583 234.342728 1028.002276 354.462128 1031.133048 496.752503 1029.599609 610.546467 996.566773 708.558787 933.759661 795.453672 870.888656 882.412451 789.233021 940.874819 687.13153 977.230106 674.544551 978.827438 665.088342 977.230106 660.424132 972.502002 655.696028 966.176565 652.565256 959.851129 652.565256 953.525692L654.098695 814.429982C654.098695 790.725568 650.967924 771.749259 643.109048 755.967614 636.847505 740.122076 628.988629 729.068536 619.532421 721.20966 677.675322 718.014995 729.55668 699.038685 776.646041 664.280731 822.201963 631.056216 847.375922 566.268412 850.506694 471.450757 850.506694 444.551679 845.77859 419.249933 836.322382 397.142851 826.930067 374.971877 814.343087 354.462128 798.625336 337.083151 801.820001 330.757715 806.484211 314.97607 808.081544 291.207763 811.212315 267.503349 806.484211 237.473499 793.897232 202.715545 793.897232 201.118213 782.907584 201.118213 760.92829 204.248984 738.948995 207.443649 704.382721 224.822626 655.696028 254.852476 614.86821 243.798936 573.976499 237.473499 530.01791 237.473499 486.059321 237.473499 445.16761 243.798936 404.339793 254.852476 357.186538 223.225294 321.086825 207.443649 299.10753 204.248984 277.064343 201.118213 266.074695 201.118213 266.074695 202.715545 253.551609 237.473499 248.823505 267.503349 251.954276 291.207763 255.085048 314.97607 258.21582 330.757715 261.410484 337.083151 245.692733 354.462128 231.508421 373.438438 223.649545 397.142851 214.25723 419.249933 209.529126 444.551679 209.529126 472.984196 212.659898 567.865744 236.236525 631.056216 281.792447 665.878064 327.348368 700.636018 379.229726 719.612327 437.308734 722.743099 429.449859 729.068536 423.188316 736.991305 418.460211 748.044845 412.198668 759.098386 409.067897 773.346591 405.937125 789.128236 388.622041 798.648337 366.642747 801.779109 336.804576 800.24567 306.966406 798.648337 281.792447 781.26936 259.813152 746.447513 248.823505 729.068536 237.833857 716.417663 223.649545 708.558787 209.529126 700.636018 197.00604 695.907914 181.288289 694.310581 168.701309 692.713249 160.842433 694.310581 157.711662 700.636018 154.58089 705.364122 160.842433 713.286891 176.560184 724.340431 192.277936 735.393972 204.801022 746.447513 212.659898 759.098386 220.518774 771.749259 226.84421 785.997464 233.105753 798.648337 240.964629 819.158086 258.21582 836.600956 286.520551 850.785269 313.227949 865.033474 352.522328 866.630807 402.74246 857.110705L404.339793 950.39492C404.339793 958.317689 401.209021 964.643126 396.480917 969.37123 391.752813 975.696667 382.360498 977.230106 369.773518 974.099334 267.672028 939.34138 185.952499 877.684347 123.145387 792.322901 60.274382 706.961454 28.902772 607.351802 27.30544 495.155171 30.436212 351.267463 79.122905 232.745395 173.429413 137.927741 267.672028 43.046193 387.024709-5.959967 528.420578-9.090738L530.01791-7.557299Z" ></path></symbol><symbol id="icon-changyongdizhi" viewBox="0 0 1024 1024"><path d="M512 64C326.4 64 179.2 211.2 179.2 396.8c0 64 19.2 115.2 44.8 166.4 0 6.4 256 384 256 384C486.4 953.6 499.2 960 512 960c12.8 0 25.6-6.4 32-12.8 0 0 256-377.6 256-377.6 32-51.2 51.2-108.8 51.2-172.8C851.2 211.2 697.6 64 512 64zM736 537.6 736 537.6C736 544 736 544 736 537.6L627.2 704c0 0 0 0 0 0L512 870.4 396.8 704c0 0 0 0 0 0L288 544l0 0C262.4 499.2 243.2 448 243.2 396.8 243.2 249.6 364.8 128 512 128s268.8 115.2 268.8 262.4C780.8 448 761.6 499.2 736 537.6L736 537.6zM512 262.4C435.2 262.4 377.6 320 377.6 396.8S435.2 524.8 512 524.8s134.4-57.6 134.4-134.4S588.8 262.4 512 262.4zM512 460.8c-38.4 0-70.4-32-70.4-64s32-64 70.4-64 70.4 32 70.4 64S550.4 460.8 512 460.8z" ></path></symbol><symbol id="icon-changyonggongneng" viewBox="0 0 1024 1024"><path d="M613.3682 841.981457c-3.40761-0.48607-6.813173-0.989537-10.220782-1.471514-15.431457-2.148943-29.118178-8.251941-41.36613-17.869996-14.44192-11.340279-24.279986-25.750477-29.971615-43.156916-2.849908-8.733918-4.229325-17.704221-4.244674-26.801413-0.107447-67.574025-0.054235-135.156236-0.054235-202.730261 0-1.455141 0-2.910283 0-4.795213 1.723248 0 3.155876 0 4.588505 0 68.563562 0 137.12917-0.211824 205.686592 0.138146 18.685571 0.100284 35.725666 6.385431 50.823526 17.750269 11.294231 8.496511 20.026102 19.075451 26.291806 31.602765 5.058203 10.113335 8.352225 20.835538 8.710382 32.282241 0.021489 0.633427 0.463558 1.243317 0.709151 1.866511 0 42.928718 0 85.865623 0 128.792295-0.245593 0.747014-0.587378 1.471514-0.725524 2.240017-1.205455 6.509251-1.798972 13.193487-3.64911 19.510356-5.220908 17.786085-15.745612 32.106232-30.217209 43.552935-12.08627 9.555633-25.76685 15.485693-41.051975 17.618263-3.40761 0.473791-6.807033 0.975211-10.212596 1.471514C696.762539 841.981457 655.067928 841.981457 613.3682 841.981457L613.3682 841.981457 613.3682 841.981457z" ></path><path d="M824.32789 403.052682c-0.49528 3.041266-1.004887 6.074346-1.471514 9.114588-2.354628 15.386432-8.490372 29.073152-18.151405 41.275056-7.758708 9.784854-17.178241 17.634636-28.1532 23.387663-9.092076 4.770653-18.807345 8.29185-29.172413 9.054213-8.42795 0.617054-16.910135 0.776689-25.368784 0.784876-63.268976 0.060375-126.537951 0.029676-189.797717 0.029676-1.446955 0-2.88777 0-4.694929 0 0-1.676176 0-2.994194 0-4.320399 0-68.199265-0.205685-136.39853 0.12996-204.596771 0.098237-20.149922 6.951319-38.324864 19.74674-54.168714 14.272052-17.680684 32.881898-27.770484 54.876842-32.191166 1.476631-0.296759 2.963495-0.533143 4.451382-0.792039 46.134737 0 92.27152 0 138.41649 0 0.49528 0.243547 0.967024 0.579191 1.49505 0.723477 7.986905 2.187829 16.247033 3.65832 23.905456 6.6607 15.043624 5.9055 27.107381 15.996322 36.58729 29.089525 8.276501 11.423167 13.703093 24.043602 15.737426 38.057781 0.442068 3.041266 0.975211 6.074346 1.471514 9.107425C824.32789 317.189106 824.32789 360.124987 824.32789 403.052682L824.32789 403.052682 824.32789 403.052682z" ></path><path d="M172.959725 625.108081c0.983397-4.527107 1.829671-9.092076 2.964518-13.580297 3.13234-12.361539 8.824993-23.410176 16.880459-33.333176 11.187807-13.787004 25.210172-23.296589 42.037419-28.760021 8.702196-2.828418 17.657148-4.252861 26.809599-4.252861 67.330478-0.023536 134.660956-0.014326 201.991434-0.014326 1.600451 0 3.200902 0 5.395894 0 0 1.400906 0 2.696412 0 3.985778 0 70.164013 0.151449 140.329049-0.138146 210.501248-0.054235 12.673647-4.359284 24.547069-10.509355 35.773761-4.938476 8.999978-11.149944 16.886599-18.717294 23.647583-11.744485 10.492982-25.330922 17.656125-40.894386 20.635993-4.632507 0.883113-9.403161 1.029446-14.105253 1.570775-0.518816 0.060375-0.998747 0.463558-1.502213 0.707105-41.447994 0-82.898035 0-124.349099 0-0.74599-0.243547-1.485841-0.699941-2.238994-0.707105-12.201903-0.176009-23.655769-3.467985-34.424021-8.918114-10.897188-5.516644-20.491707-12.763698-28.356838-22.344914-8.345062-10.157337-14.479783-21.35947-17.841344-34.033118-1.272993-4.824889-2.018983-9.785877-3.00238-14.678304C172.959725 715.911156 172.959725 670.506037 172.959725 625.108081L172.959725 625.108081zM261.757121 575.550385 261.757121 575.550385z" ></path><path d="M389.833101 190.621479c4.64888 0.983397 9.358135 1.768273 13.94664 2.995217 12.672624 3.391237 23.898293 9.503445 34.048467 17.878183 10.752902 8.871041 18.497283 19.86749 24.106024 32.419364 4.267187 9.549493 7.062859 19.593244 7.071046 30.148647 0.061398 69.311599 0.031722 138.622174 0.031722 207.933773 0 1.447978 0 2.895957 0 4.702092-1.548262 0-2.744507 0-3.941775 0-69.928652 0-139.857305 0.152473-209.778794-0.137123-13.602809-0.053212-26.459628-4.53427-38.065967-11.644201-6.362918-3.901867-12.658298-8.3072-17.908882-13.557784-8.931416-8.93858-16.102746-19.288299-20.515243-31.359218-2.666736-7.30129-4.823865-14.670118-5.219885-22.482037-0.01535-0.26606-0.419556-0.51063-0.647753-0.762363 0-45.396933 0-90.793866 0-136.191822 0.243547-0.49528 0.601704-0.968048 0.716314-1.493004 1.348718-6.348592 2.020006-12.925381 4.107551-19.022239 5.685489-16.598027 15.851013-30.185486 29.674856-40.961924 8.81783-6.873548 18.473747-12.223393 29.454846-14.875802 5.075599-1.226944 10.166547-2.39249 15.249309-3.589758C298.047652 190.621479 343.939865 190.621479 389.833101 190.621479L389.833101 190.621479zM381.548414 220.396619 381.548414 220.396619zM438.576245 269.856078 438.576245 269.856078z" ></path></symbol><symbol id="icon-youxiang" viewBox="0 0 1024 1024"><path d="M510.160095 422.618297l409.145447-253.15572c1.056052-0.653893 2.14792-1.192152 3.25309-1.679245-1.465374-0.12996-2.945075-0.209778-4.443196-0.209778L112.552425 167.573553 510.160095 422.618297z" ></path><path d="M959.196065 189.120288c0.977257 9.673313-3.450589 19.514449-12.273535 24.973788L523.715833 475.948923c-4.232394 2.618641-9.021467 3.926426-13.807471 3.926426-4.929266 0-9.853415-1.38658-14.167674-4.1536L63.478753 198.448748c-1.020236-0.654916-1.956561-1.38658-2.854001-2.152013-3.084245 6.486738-4.817726 13.725606-4.817726 21.35333l0 588.698846c0 27.542286 22.534226 50.076512 50.076512 50.076512l805.03908 0c31.629371 0 57.270355-25.640983 57.270355-57.270355L968.192973 217.650065C968.192973 207.067033 964.855972 197.22999 959.196065 189.120288z" ></path></symbol><symbol id="icon-zanting" viewBox="0 0 1024 1024"><path d="M510.979 64.899c-245.849 0-445.151 199.302-445.151 445.156 0 245.849 199.302 445.151 445.151 445.151 245.853 0 445.156-199.302 445.156-445.151 0-245.854-199.303-445.156-445.156-445.156z m-58.554 687.007H294.582V268.194h157.843v483.712z m274.951 0H569.534V268.194h157.843v483.712z" ></path></symbol><symbol id="icon-changyong" viewBox="0 0 1024 1024"><path d="M785.408 393.92l-62.016-61.952c-4.736-5.696-47.616-55.552-115.52-55.552h-30.912a23.232 23.232 0 0 0-21.44 14.208c-9.536 22.336-34.24 24.576-45.44 24.576-20.544 0-40.128-21.248-46.592-29.568a23.232 23.232 0 0 0-18.496-9.152h-30.976c-67.904 0-110.784 49.856-114.496 54.464l-62.976 62.976a27.84 27.84 0 0 0-8.576 19.968c0 14.784 11.904 26.56 18.304 32.896l47.04 40.576c6.272 6.336 16.832 16.896 31.36 16.896 7.616 0 14.72-2.88 19.328-7.552l17.216-15.424-5.376 223.04c0 37.632 21.76 43.264 34.688 43.264H632c12.864 0 34.624-5.632 34.624-43.264l-4.864-144.832 0.768 0.448-1.856-78.72 16.384 14.72c4.224 4.096 10.624 10.496 21.504 10.496 11.008 0 17.472-6.528 20.736-9.792l66.112-59.584c4.608-4.544 10.624-11.776 10.624-21.632 0-9.728-5.952-16.896-10.624-21.504z m-20.224 21.76l-65.6 59.008-1.024 1.024-1.344-1.28-67.648-60.736 7.744 291.008c0 13.44-1.984 13.44-5.248 13.44l-159.68-0.064-81.856 0.064c-3.328 0-5.312 0-5.312-13.44l7.104-291.008-67.584 61.056c-2.368 0-8.512-6.208-11.392-8.96l-47.04-40.576c-6.976-6.912-8.704-10.112-8.96-10.496l64-64c3.776-4.608 38.336-44.8 92.736-44.8h28.032c7.552 9.152 34.368 38.656 66.304 38.656l4.032 0.128c11.904 0 50.816-2.88 68.544-38.784H608c54.4 0 88.896 40.192 93.632 45.824l62.976 62.976c0.256 0.32 0.64 0.576 0.768 0.896-0.064-0.064-0.064 0-0.192 0.064z" ></path></symbol><symbol id="icon-changyong1" viewBox="0 0 1024 1024"><path d="M711.8336 290.2016L512.256 28.6208 312.6784 290.2016 0.4608 397.8752l188.8256 269.3632 6.656 328.1408 316.2624-95.1296 316.2624 95.1296 6.656-328.1408L1024 397.8752l-312.1664-107.6736z m15.5648 350.7712l-4.4032 212.3264-210.8416-61.5424-210.8416 61.5424-4.4544-212.3264-125.9008-174.2848 208.128-69.6832 133.0688-169.2672 133.0688 169.2672 208.128 69.6832-125.952 174.2848z" fill="#0B0307" ></path></symbol><symbol id="icon-yanjing" viewBox="0 0 1024 1024"><path d="M511.948 248.808C218.807 248.808 63.383 526.8 63.383 526.8s167.818 247.09 448.565 247.09c280.768 0 448.604-247.09 448.604-247.09S822.67 248.808 511.948 248.808z m0.276 420.713c-89.385 0-161.85-70.712-161.85-157.925 0-87.213 72.465-157.925 161.85-157.925 89.384 0 161.849 70.712 161.849 157.925 0 87.213-72.466 157.925-161.85 157.925z m-0.256-222.322c-39.135 0-70.858 31.468-70.858 70.274 0 38.805 31.723 70.273 70.858 70.273s70.858-31.468 70.858-70.273c0-38.806-31.724-70.274-70.858-70.274z" ></path></symbol><symbol id="icon-xin" viewBox="0 0 1024 1024"><path d="M703.260022 159.15686c101.722776 0 188.873672 75.03495 202.713888 174.540221 9.642614 69.13252-9.594519 127.371946-58.719357 178.078815-103.370299 106.673531-288.779056 292.454772-324.899718 328.616366-3.709485 3.717671-7.937786 4.496407-10.820439 4.496407-2.888793 0-7.109931-0.778736-10.802021-4.496407-81.292468-81.608669-228.867548-229.994208-314.192865-315.994907-38.328957-38.612413-60.877509-80.081896-68.929905-126.722147-14.417361-83.47518 25.210172-168.079066 98.611925-210.523761 31.53625-18.24555 67.00609-27.889187 102.591563-27.889187 51.875484 0 101.859899 20.162202 140.725068 56.787354 17.142425 16.160051 34.172287 21.070898 48.546669 21.070898 18.48398 0 32.573883-8.125051 36.819581-10.876721 9.090029-5.924943 17.857716-12.029988 26.526143-18.060331 15.624862-10.843976 30.332842-21.096481 44.303019-28.125571 27.492144-13.863752 56.957223-20.901029 87.526449-20.901029z m0-67.394947c-39.385009 0-79.44233 8.863878-117.469412 28.027334-26.671453 13.450337-50.601468 32.485879-77.257571 49.888224 0 0-1.737574-1.355881-3.319606-2.848885-52.476165-49.449225-119.05963-74.968435-186.407503-74.968435-46.388517 0-93.142353 12.110829-135.895064 36.833906C84.687798 185.504948 32.414247 297.487392 51.715849 409.225265c11.087523 64.206324 42.346457 117.274984 87.525425 162.843832 85.358063 85.998653 232.965889 234.435357 314.26552 316.083935 16.056698 16.095583 37.030381 24.13263 58.020439 24.13263 20.974707 0 41.916668-8.004301 57.947783-24.058952 83.483366-83.565231 237.039671-237.925854 325.621149-329.315284 62.49331-64.496943 89.716324-144.131655 77.134774-234.556107-19.261693-138.330532-140.504034-232.593406-268.970917-232.593406" fill="" ></path></symbol><symbol id="icon-dianzan1" viewBox="0 0 1024 1024"><path d="M782.036 447.65h-172.29c66.761-246.563-45.998-259.156-45.998-259.156-47.791 0-37.884 37.791-41.498 44.092 0 120.58-128.074 215.065-128.074 215.065v341.946c0 33.754 45.998 45.91 64.042 45.91h258.866c24.363 0 44.202-63.892 44.202-63.892 64.043-217.77 64.043-282.564 64.043-282.564 0.001-44.993-43.293-41.398-43.293-41.398v0 0zM782.036 447.65z" fill="#b7b7b7" ></path><path d="M323.472 447.763h-103.167c-21.306 0-21.635 20.927-21.635 20.927l21.306 344.864c0 21.95 21.987 21.95 21.987 21.95h89.291c18.602 0 18.437-14.52 18.437-14.52v-347.069c0-26.486-26.221-26.151-26.221-26.151v0 0zM323.472 447.763z" fill="#b7b7b7" ></path></symbol><symbol id="icon-qq" viewBox="0 0 1024 1024"><path d="M511.997 47.862c-256.325 0-464.113 207.789-464.113 464.113 0 256.3 207.788 464.114 464.113 464.114S976.11 768.276 976.11 511.975c0-256.324-207.788-464.113-464.113-464.113z m205.638 577.48s-14.692 40.084-41.648 76.09c0 0 48.158 16.365 44.091 58.905 0 0 1.612 47.45-102.85 44.176 0 0-73.49-5.728-95.522-36.811h-19.42c-22.032 31.095-95.497 36.811-95.497 36.811-104.5 3.274-102.863-44.176-102.863-44.176-4.079-42.54 44.079-58.906 44.079-58.906-26.93-36.005-41.624-76.09-41.624-76.09-65.318 105.55-58.784-14.73-58.784-14.73 12.262-71.18 63.67-117.81 63.67-117.81-7.353-64.647 19.59-76.09 19.59-76.09 5.654-199.984 177.535-196.491 181.138-196.393 3.615-0.097 175.471-3.591 181.138 196.38 0 0 26.93 11.456 19.59 76.09 0 0 51.432 46.631 63.67 117.812 0.002 0.012 6.524 120.29-58.758 14.741z" fill="" ></path></symbol><symbol id="icon-xiaohuojian" viewBox="0 0 1024 1024"><path d="M736.804 448.468c3.26-257.385-208.513-374.677-224.807-384.45-13.029 6.516-228.062 123.804-224.801 384.45-42.357 29.323-87.969 78.195-81.452 162.903 6.516 84.708 91.225 143.354 123.804 140.098 32.584-3.26 22.805-26.067 22.805-26.067l9.778-45.613s48.868 71.68 61.9 71.68h175.933c16.29 0 61.902-71.68 61.902-71.68l9.777 45.613s-9.777 22.807 22.805 26.067c32.58 3.256 117.288-55.39 123.805-140.098 6.518-84.708-39.092-133.58-81.449-162.903z m-224.807-3.257c-6.511 0-84.708-3.261-94.48-94.486 3.255-87.968 87.968-94.48 94.48-97.741 6.517 0 91.23 9.773 94.486 97.74-9.774 91.227-87.969 94.487-94.486 94.487z m0 0M463.129 891.563c0 9.772-9.774 19.549-19.55 19.549-9.773 0-19.546-9.777-19.546-19.55v-94.481c0-9.777 9.773-19.55 19.546-19.55 9.776 0 19.55 9.773 19.55 19.55v94.482z m0 0M534.804 940.436c0 9.772-9.773 19.546-19.546 19.546-9.772 0-19.55-9.774-19.55-19.546V800.338c0-9.773 9.778-19.55 19.55-19.55 9.773 0 19.546 9.777 19.546 19.55v140.098z m0 0M599.966 868.756c0 9.774-9.772 19.55-19.546 19.55-9.776 0-19.549-9.776-19.549-19.55v-68.418c0-9.773 9.773-19.55 19.55-19.55 9.773 0 19.545 9.777 19.545 19.55v68.418z m0 0" fill="" ></path></symbol><symbol id="icon-chuyidong" viewBox="0 0 1024 1024"><path d="M512 464.265846L337.053538 289.28a33.752615 33.752615 0 1 0-47.734153 47.734154L464.226462 512l-174.985847 174.946462a33.752615 33.752615 0 1 0 47.734154 47.734153L512 559.773538l174.946462 174.985847a33.752615 33.752615 0 1 0 47.734153-47.734154L559.773538 512l174.985847-174.946462a33.752615 33.752615 0 1 0-47.734154-47.734153L512 464.226462z" ></path></symbol></svg>';var script=function(){var scripts=document.getElementsByTagName("script");return scripts[scripts.length-1]}();var shouldInjectCss=script.getAttribute("data-injectcss");var ready=function(fn){if(document.addEventListener){if(~["complete","loaded","interactive"].indexOf(document.readyState)){setTimeout(fn,0)}else{var loadFn=function(){document.removeEventListener("DOMContentLoaded",loadFn,false);fn()};document.addEventListener("DOMContentLoaded",loadFn,false)}}else if(document.attachEvent){IEContentLoaded(window,fn)}function IEContentLoaded(w,fn){var d=w.document,done=false,init=function(){if(!done){done=true;fn()}};var polling=function(){try{d.documentElement.doScroll("left")}catch(e){setTimeout(polling,50);return}init()};polling();d.onreadystatechange=function(){if(d.readyState=="complete"){d.onreadystatechange=null;init()}}}};var before=function(el,target){target.parentNode.insertBefore(el,target)};var prepend=function(el,target){if(target.firstChild){before(el,target.firstChild)}else{target.appendChild(el)}};function appendSvg(){var div,svg;div=document.createElement("div");div.innerHTML=svgSprite;svgSprite=null;svg=div.getElementsByTagName("svg")[0];if(svg){svg.setAttribute("aria-hidden","true");svg.style.position="absolute";svg.style.width=0;svg.style.height=0;svg.style.overflow="hidden";prepend(svg,document.body)}}if(shouldInjectCss&&!window.__iconfont__svg__cssinject__){window.__iconfont__svg__cssinject__=true;try{document.write("<style>.svgfont {display: inline-block;width: 1em;height: 1em;fill: currentColor;vertical-align: -0.1em;font-size:16px;}</style>")}catch(e){console&&console.log(e)}}ready(appendSvg)})(window)
|
/**
* Adds shortcut methods for JSON API responses:
*
* * `res.apiResponse(data)`
* * `res.apiError(key, err, msg, code)`
* * `res.apiNotFound(err, msg)`
* * `res.apiNotAllowed(err, msg)`
*
* ####Example:
*
* app.all('/api*', keystone.middleware.api);
*
* @param {app.request} req
* @param {app.response} res
* @param {function} next
* @api public
*/
// The exported function returns a closure that retains
// a reference to the keystone instance, so it can be
// passed as middeware to the express app.
exports = module.exports = function(keystone) {
return function initAPI(req, res, next) {
res.apiResponse = function(data) {
if (req.query.callback) {
res.jsonp(data);
} else {
res.json(data);
}
};
res.apiError = function(key, err, msg, code) {
msg = msg || 'Error';
key = key || 'unknown error';
msg += ' (' + key + ')';
if (keystone.get('logger')) {
console.log(msg + (err ? ':' : ''));
if (err) {
console.log(err);
}
}
res.status(code || 500);
res.apiResponse({ error: key || 'error', detail: err });
};
res.apiNotFound = function (err, msg) {
res.apiError('data not found', err, msg || 'not found', 404);
};
res.apiNotAllowed = function (err, msg) {
res.apiError('access allowed', err, msg || 'not allowed', 403);
};
next();
};
};
|
// load cookies library
const Cookies = require('universal-cookie').default;
// setup cookies
const cookies = new Cookies();
let initState = {
locale : cookies.get('os-locale') || 'zh_HK',
locales : [
{
name : "English",
code : "en_US"
},
{
name : "正體中文",
code : "zh_HK"
}
],
__ : function(key) {
// get locale
let locale = cookies.get('os-locale') || 'zh_HK';
// set wordings container
let wordings = {};
// try to process wordings json
try {
// get the text file
let raw = require(`../locales/${locale}.json`);
// save it
wordings = raw;
} catch (error) { /* do nothing */ }
// return translated text, if not exists, return the key by default
return wordings[key] == undefined ? key : wordings[key];
}
};
export default (state = initState, action) => {
switch(action.type) {
case 'SET_LOCALE' : {
// save locale
cookies.set(
'os-locale',
action.payload,
{
samesite : 'None',
secure : false,
path : "/"
}
);
return {
...state,
locale : action.payload
};
}
default:
return state;
}
}
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import torch
from ignite.engine import Engine
from parameterized import parameterized
from monai.handlers import SegmentationSaver
TEST_CASE_1 = [".nii.gz"]
TEST_CASE_1 = [".png"]
class TestHandlerSegmentationSaver(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_1])
def test_saved_content(self, output_ext):
default_dir = os.path.join(".", "tempdir")
shutil.rmtree(default_dir, ignore_errors=True)
# set up engine
def _train_func(engine, batch):
return torch.zeros(8, 1, 2, 2)
engine = Engine(_train_func)
# set up testing handler
saver = SegmentationSaver(output_dir=default_dir, output_postfix="seg", output_ext=output_ext)
saver.attach(engine)
data = [{"filename_or_obj": ["testfile" + str(i) for i in range(8)]}]
engine.run(data, max_epochs=1)
for i in range(8):
filepath = os.path.join("testfile" + str(i), "testfile" + str(i) + "_seg" + output_ext)
self.assertTrue(os.path.exists(os.path.join(default_dir, filepath)))
shutil.rmtree(default_dir)
if __name__ == "__main__":
unittest.main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Written by Martin v. Löwis <loewis@informatik.hu-berlin.de>
"""Generate binary message catalog from textual translation description.
This program converts a textual Uniforum-style message catalog (.po file) into
a binary GNU catalog (.mo file). This is essentially the same function as the
GNU msgfmt program, however, it is a simpler implementation.
Usage: msgfmt.py [OPTIONS] filename.po
Options:
-o file
--output-file=file
Specify the output file to write to. If omitted, output will go to a
file named filename.mo (based off the input file name).
-h
--help
Print this message and exit.
-V
--version
Display version information and exit.
"""
from __future__ import print_function
import os
import sys
import getopt
import struct
import array
import re
import codecs
from email.parser import HeaderParser
__version__ = "1.2"
MESSAGES = {}
def usage(code, msg=''):
print(__doc__, file=sys.stderr)
if msg:
print(msg, file=sys.stderr)
sys.exit(code)
def add(id, str, fuzzy):
"Add a non-fuzzy translation to the dictionary."
global MESSAGES
if not fuzzy and str:
MESSAGES[id] = str
def dequote(s):
if (s[0] == s[-1]) and s.startswith(("'", '"')):
return s[1:-1]
return s
# decode_escapes from http://stackoverflow.com/a/24519338
ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U........ # 8-digit hex escapes
| \\u.... # 4-digit hex escapes
| \\x.. # 2-digit hex escapes
| \\[0-7]{1,3} # Octal escapes
| \\N\{[^}]+\} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)''', re.UNICODE | re.VERBOSE)
def decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def generate():
"Return the generated output."
global MESSAGES
# the keys are sorted in the .mo file
keys = sorted(MESSAGES.keys())
offsets = []
ids = strs = b''
for id in keys:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
offsets.append((len(ids), len(id), len(strs), len(MESSAGES[id])))
ids += id + b'\0'
strs += MESSAGES[id] + b'\0'
output = ''
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
# translated string.
keystart = 7*4+16*len(keys)
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412de, # Magic
0, # Version
len(keys), # # of entries
7*4, # start of key index
7*4+len(keys)*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("i", offsets).tostring()
output += ids
output += strs
return output
def make(filename, outfile):
ID = 1
STR = 2
# Compute .mo name from .po name and arguments
if filename.endswith('.po'):
infile = filename
else:
infile = filename + '.po'
if outfile is None:
outfile = os.path.splitext(infile)[0] + '.mo'
try:
lines = open(infile, 'rb').readlines()
except IOError as msg:
print(msg, file=sys.stderr)
sys.exit(1)
section = None
fuzzy = 0
empty = 0
header_attempted = False
# Start off assuming Latin-1, so everything decodes without failure,
# until we know the exact encoding
encoding = 'latin-1'
# Start off assuming Latin-1, so everything decodes without failure,
# until we know the exact encoding
encoding = 'latin-1'
# Parse the catalog
for lno, l in enumerate(lines):
l = l.decode(encoding)
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
add(msgid, msgstr, fuzzy)
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and 'fuzzy' in l:
fuzzy = 1
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid') and not l.startswith('msgid_plural'):
if section == STR:
add(msgid, msgstr, fuzzy)
if not msgid:
# See whether there is an encoding declaration
p = HeaderParser()
charset = p.parsestr(msgstr.decode(encoding)).get_content_charset()
if charset:
encoding = charset
section = ID
l = l[5:]
msgid = msgstr = b''
is_plural = False
if l.strip() == '""':
# Check if next line is msgstr. If so, this is a multiline msgid.
if lines[lno+1].decode(encoding).startswith('msgstr'):
# If this is the first empty msgid and is followed by msgstr, this is the header, which may contain the encoding declaration.
# Otherwise this file is not valid
if empty > 1:
print("Found multiple empty msgids on line " + str(lno) + ", not valid!")
empty += 1
# This is a message with plural forms
elif l.startswith('msgid_plural'):
if section != ID:
print('msgid_plural not preceded by msgid on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[12:]
msgid += b'\0' # separator of singular and plural
is_plural = True
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
if l.startswith('msgstr['):
if not is_plural:
print('plural without msgid_plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l.split(']', 1)[1]
if msgstr:
msgstr += b'\0' # Separator of the various plural forms
else:
if (l[6:].strip() == '""') and (empty == 1) and (not header_attempted):
header = ""
# parse up until next empty line = end of header
hdrno = lno
while(hdrno < len(lines)-1):
# This is a roundabout way to strip non-ASCII unicode characters from the header.
# As we are only parsing out the encoding, we don't need any unicode chars in it.
l = lines[hdrno+1].decode('unicode_escape').encode('ascii','ignore').decode(encoding)
if l.strip():
header += decode_escapes(dequote(l.strip()))
else:
break
hdrno += 1
# See whether there is an encoding declaration
if(hdrno > lno):
p = HeaderParser()
charset = p.parsestr(str(header)).get_content_charset()
header_attempted = True
if charset:
encoding = charset
if is_plural:
print('indexed msgstr required for plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
l = decode_escapes(dequote(l)) # strip quotes and replace newlines if present
if section == ID:
msgid += l.encode(encoding)
elif section == STR:
msgstr += l.encode(encoding)
else:
print('Syntax error on %s:%d' % (infile, lno), \
'before:', file=sys.stderr)
print(l, file=sys.stderr)
sys.exit(1)
# Add last entry
if section == STR:
add(msgid, msgstr, fuzzy)
# Compute output
output = generate()
try:
open(outfile,"wb").write(output)
except IOError as msg:
print(msg, file=sys.stderr)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hVo:',
['help', 'version', 'output-file='])
except getopt.error as msg:
usage(1, msg)
outfile = None
# parse options
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print("msgfmt.py", __version__)
sys.exit(0)
elif opt in ('-o', '--output-file'):
outfile = arg
# do it
if not args:
print('No input file given', file=sys.stderr)
print("Try `msgfmt --help' for more information.", file=sys.stderr)
return
for filename in args:
make(filename, outfile)
if __name__ == '__main__':
main()
|
var from = require('fromjs');
function Repository() {
var self = this
, contacts = require('./contacts.json')
, dummyPromise = {};
dummyPromise.then = function () {
return dummyPromise;
};
dummyPromise.catch = function () {
return dummyPromise;
};
self.getContact = function (id) {
return {
then: function (callback) {
setTimeout(function () {
callback(from(contacts).singleOrDefault(function (e) { return e.id == id; }, null));
});
return dummyPromise;
},
catch: function () {
return dummyPromise;
}
};
};
self.getAllContacts = function () {
return {
then: function (callback) {
setTimeout(function () {
callback(contacts);
});
return dummyPromise;
},
catch: function () {
return dummyPromise;
}
};
};
}
module.exports = Repository;
|
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.modules.module_util as mutil
class ResidualDenseBlock_5C(nn.Module):
def __init__(self, nf=64, gc=32, bias=True):
super(ResidualDenseBlock_5C, self).__init__()
# gc: growth channel, i.e. intermediate channels
self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)
self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)
self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
# initialization
mutil.initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5 * 0.2 + x
class RRDB(nn.Module):
'''Residual in Residual Dense Block'''
def __init__(self, nf, gc=32):
super(RRDB, self).__init__()
self.RDB1 = ResidualDenseBlock_5C(nf, gc)
self.RDB2 = ResidualDenseBlock_5C(nf, gc)
self.RDB3 = ResidualDenseBlock_5C(nf, gc)
def forward(self, x):
out = self.RDB1(x)
out = self.RDB2(out)
out = self.RDB3(out)
return out * 0.2 + x
class UpsampleConcatSqueeze(nn.Module):
def __init__(self, filters_in, filters_out):
super().__init__()
self.up = nn.ConvTranspose2d(filters_in, filters_in // 2, kernel_size=2, stride=2)
self.conv = nn.Conv2d(filters_in // 2 + filters_out, filters_out, 1)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1) #CHW
x = self.conv(x)
return x
class USRGANLarge(nn.Module):
def __init__(self, in_nc, out_nc, nf, gc=32):
super(USRGANLarge, self).__init__()
self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
# Downsampling
self.rrdb_1_1 = RRDB(nf=nf, gc=gc)
self.rrdb_1_2 = RRDB(nf=nf, gc=gc)
self.rrdb_2_1 = RRDB(nf=nf, gc=gc)
self.rrdb_2_2 = RRDB(nf=nf, gc=gc)
self.change_fn_1 = nn.Conv2d(nf, nf * 2, 1)
self.rrdb_3_1 = RRDB(nf=nf * 2, gc=gc * 2)
self.rrdb_3_2 = RRDB(nf=nf * 2, gc=gc * 2)
self.rrdb_4_1 = RRDB(nf=nf * 2, gc=gc * 2)
self.rrdb_4_2 = RRDB(nf=nf * 2, gc=gc * 2)
self.change_fn_2 = nn.Conv2d(nf * 2, nf * 4, 1)
# Bottleneck
self.rrdb_5_1 = RRDB(nf=nf * 4, gc=gc * 4)
self.rrdb_5_2 = RRDB(nf=nf * 4, gc=gc * 4)
# Upsampling
self.up_1 = UpsampleConcatSqueeze(nf * 4, nf * 2)
self.rrdb_6_1 = RRDB(nf=nf * 2, gc=gc * 2)
self.rrdb_6_2 = RRDB(nf=nf * 2, gc=gc * 2)
self.up_2 = UpsampleConcatSqueeze(nf * 2, nf * 2)
self.rrdb_7_1 = RRDB(nf=nf * 2, gc=gc * 2)
self.rrdb_7_2 = RRDB(nf=nf * 2, gc=gc * 2)
self.up_3 = UpsampleConcatSqueeze(nf * 2, nf)
self.rrdb_8_1 = RRDB(nf=nf, gc=gc)
self.rrdb_8_2 = RRDB(nf=nf, gc=gc)
self.up_4 = UpsampleConcatSqueeze(nf, nf)
self.rrdb_9_1 = RRDB(nf=nf, gc=gc)
self.rrdb_9_2 = RRDB(nf=nf, gc=gc)
self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.HR_rrdb = RRDB(nf=nf, gc=gc)
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.avr_pool = nn.AvgPool2d(2)
def forward(self, x):
x_1 = self.conv_first(x)
# Downsampling
x_1_1 = self.rrdb_1_1(x_1)
x_1_2 = self.rrdb_1_2(x_1_1)
x_1_2_p = self.avr_pool(x_1_2)
x_2_1 = self.rrdb_2_1(x_1_2_p)
x_2_2 = self.rrdb_2_2(x_2_1)
x_2_2_p = self.avr_pool(x_2_2)
x_2_2_p = self.change_fn_1(x_2_2_p)
x_3_1 = self.rrdb_3_1(x_2_2_p)
x_3_2 = self.rrdb_3_2(x_3_1)
x_3_2_p = self.avr_pool(x_3_2)
x_4_1 = self.rrdb_4_1(x_3_2_p)
x_4_2 = self.rrdb_4_2(x_4_1)
x_4_2_p = self.avr_pool(x_4_2)
x_4_2_p = self.change_fn_2(x_4_2_p)
# Bottleneck
x_5_1 = self.rrdb_5_1(x_4_2_p)
x_5_2 = self.rrdb_5_2(x_5_1)
# Upsampling
x_6_1_u = self.up_1(x_5_2, x_4_2)
x_6_1 = self.rrdb_6_1(x_6_1_u)
x_6_2 = self.rrdb_6_2(x_6_1)
x_7_1_u = self.up_2(x_6_2, x_3_2)
x_7_1 = self.rrdb_7_1(x_7_1_u)
x_7_2 = self.rrdb_7_2(x_7_1)
x_8_1_u = self.up_3(x_7_2, x_2_2)
x_8_1 = self.rrdb_8_1(x_8_1_u)
x_8_2 = self.rrdb_8_2(x_8_1)
x_9_1_u = self.up_4(x_8_2, x_1_2)
x_9_1 = self.rrdb_9_1(x_9_1_u)
x_9_2 = self.rrdb_9_2(x_9_1)
x = x_9_2 + x_1
x = self.lrelu(self.upconv1(F.interpolate(x, scale_factor=2, mode='nearest')))
x = self.lrelu(self.upconv2(F.interpolate(x, scale_factor=2, mode='nearest')))
out = self.conv_last(self.HR_rrdb(x))
return out
|