text
stringlengths 3
1.05M
|
|---|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports["default"] = void 0;
var _styledComponents = require("styled-components");
function _templateObject() {
var data = _taggedTemplateLiteral(["\n*{\n box-sizing:border-box;\n padding:0;\n margin:0;\n}\n"]);
_templateObject = function _templateObject() {
return data;
};
return data;
}
function _taggedTemplateLiteral(strings, raw) { if (!raw) { raw = strings.slice(0); } return Object.freeze(Object.defineProperties(strings, { raw: { value: Object.freeze(raw) } })); }
var GlobalStyle = (0, _styledComponents.createGlobalStyle)(_templateObject());
var _default = GlobalStyle;
exports["default"] = _default;
|
################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
"""All file specific to MadLoop"""
|
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"Also available in": "Също достъпно в",
"Archive": "Архив",
"LANGUAGE": "Английски",
"More posts about": "Още публикации относно ",
"Newer posts": "Нови публикации",
"Next post": "Следваща публикация",
"Older posts": "Стари публикации",
"Original site": "Оригиналния сайт",
"Posted": "Публиковано",
"Posts about %s": "Публикации относно %s",
"Posts for year %s": "Публикации за %s година",
"Posts for {month} {year}": "Публикации за {month} {year}",
"Previous post": "Предишна публикация",
"Read in English": "Прочетете на български",
"Read more": "Прочети още",
"Source": "Source",
"Tags": "Тагове",
"old posts page %d": "стари публикации страница %d",
}
|
/**
****************************************************************************
* Copyright 2016 IBM
*
* Custom Nodes in Node-RED
*
* By JeanCarl Bisson (@dothewww)
* More info: https://ibm.biz/node-red-custom-nodes
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
****************************************************************************
*/
msg.headers = {
'content-type': 'application/pdf'
};
return msg;
|
export const availableLangs = ['es', 'en']
export const messages = {
en: {
// Error messages
'emailRequiredField': "'email' is a required field.",
'emailIsEmail': "This is not a valid email address.",
'passwordRequiredField': "'password' is a required field.",
// Success messages
'loginSuccessful': "You've successfully logged in.",
'emailSent': "Your password recovery email was sent."
},
es: {
// Mensajes de error
'emailRequiredField': "'email' es un campo requerido.",
'emailIsEmail': "Este no es un email válido.",
'passwordRequiredField': "'password' es un campo requerido.",
// Mensajes de éxito
'loginSuccessful': "Has iniciado sesión exitosamente.",
'emailSent': "Tu correo de recuperación de contraseña ha sido enviado."
}
}
|
import React from 'react'
import { connect } from 'react-redux'
import { Jumbotron, Container, Row, Col, Card, Spinner } from 'react-bootstrap'
import { carregarListaCompras } from '../reducers/comprasReducer'
class Historico extends React.Component {
constructor(props) {
super(props)
this.renderizarCompra = this.renderizarCompra.bind(this)
}
componentDidMount() {
this.props.carregarListaCompras()
}
renderizarCompra(compra) {
return (
<Col sm={12} style={{ width: '100%', marginTop: 30 }}>
<Card>
<Card.Header style={{
backgroundColor: '#981C1E', fontSize: 20, color: 'white'
}} >
<i className="fas fa-hamburger" style={{ fontSize: 25, marginRight: 15 }}></i>
Compra {compra.id} - R${(compra.preco).toFixed(2)}
</Card.Header>
<Card.Body>
<Card.Title>Itens da compra:</Card.Title>
<Card.Text>
<ul>
{compra.lanchesCompra.map((lancheCompra) => (
<li>
{lancheCompra.quantidade} {lancheCompra.lanche.nome} -
R${(lancheCompra.quantidade * lancheCompra.lanche.preco).toFixed(2)}
</li>
))}
</ul>
</Card.Text>
{compra.promocoesCompra.length > 0 ?
<div>
<Card.Title style={{ marginTop: 50 }}>Promoções aplicadas (em cada lanche):</Card.Title>
<Card.Text>
<ul>
{compra.promocoesCompra.map((promocaoCompra) => (
<li key={promocaoCompra.promocao.id}>
{promocaoCompra.promocao.nome} -
R${(promocaoCompra.desconto).toFixed(2)}
</li>
))}
</ul>
</Card.Text>
</div>
: <div></div>}
</Card.Body>
</Card>
</Col>
)
}
render() {
return (
<div style={{
minHeight: '75vh'
}}>
<Container fluid>
<Row>
<Col>
<Jumbotron style={{
display: 'flex', flexDirection: 'column', justifyContent: 'center', alignItems: 'center'
}}>
<h1>Histórico de compras</h1>
</Jumbotron>
</Col>
</Row>
<Row style={{
display: 'flex', justifyContent: 'center', alignItems: 'center',
marginTop: 50
}}>
{this.props.conteudoEstaPronto ?
this.props.compras.map((e) => this.renderizarCompra(e))
:
<Spinner animation="border" variant="danger" />
}
</Row>
</Container>
</div>
)
}
}
const mapStateToProps = store => ({
compras: store.compras.listaCompras,
conteudoEstaPronto: store.compras.conteudoEstaPronto,
mensagemErro: store.mensagens.erro,
temErro: store.mensagens.temErro
})
const mapDispatchToProps = {
carregarListaCompras
}
export default connect(mapStateToProps, mapDispatchToProps)(Historico)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Frank Nussbaum (frank.nussbaum@uni-jena.de), 2020
Demo for
Nussbaum, F. & Giesen, J. (2020). Pairwise sparse + low-rank models for variables of mixed type.
Journal of Multivariate Analysis, 2020.
If you use this software, please consider citing this article.
"""
# pylint: disable=C0103
from cgmodsel.admm import AdmmCGaussianSL, AdmmGaussianSL
from cgmodsel.dataops import load_prepare_data # function to read data
def load(dataset: dict):
"""
load csv with file path dataset['filename']
return tuple (cat_data, cont_data, meta),
where cat_data is the binary data, cont_data is the quantitative data,
and meta is meta information about the dataset
"""
## parameters for loading function ##
loaddict = {'catuniques': None, 'standardize': True}
# standardize quantitative variables before learning model
# catuniques: values of the binary variables (to support read function)
# recommended to provide this if binary variables are not strings such as 'yes'/'no'
if 'sparams' in dataset:
loaddict.update(dataset['sparams'])
return load_prepare_data(dataset['filename'],
cattype='dummy_red',
**loaddict)
if __name__ == '__main__':
###### data sets
## binary ##
ABILITY = {
'filename': "datasets/ability_proc.csv",
'regparams': (.2, .5),
'sparams': {
'catuniques': [0, 1]
} # values that binary variables take
}
CFMT = {
'filename': "datasets/CFMTkurzBIN.csv",
'regparams': (.15, 1.5),
'sparams': {
'catuniques': [0, 1]
} # values that binary variables take
}
## quantitative ##
LSVT = {
'filename': "datasets/LSVT.csv",
'regparams': (.1, 1),
}
## mixed binary-quantitative ##
ALLBUS = {
'filename': "datasets/allbus2016_proc.csv",
'regparams': (1, 2),
}
HELP = {
'filename': "datasets/HELPmiss_proc.csv",
'regparams': (.5, 2),
}
###### select and load data set
# ********************************* #
# comment out all but one line here #
data = CFMT
# data = LSVT
# data = HELP
# ********************************* #
print('Loading data...(%s)'%(data['filename']))
cat_data, cont_data, meta = load(data) # load the data
###### fit models
## initialize solver and drop data ##
if meta['n_cat'] > 0: # binary variables are present
solver = AdmmCGaussianSL()
solver.drop_data((cat_data, cont_data), meta)
else: # purely Gaussian model
solver = AdmmGaussianSL()
solver.drop_data(cont_data, meta)
## set regularization parameters ##
# you may try different values, any pair of positive reals will do
# e.g., regparams = (.1, 1)
regparams = data['regparams'] # regularization parameters
solver.set_regularization_params(regparams)
## solve the problem, that is, estimate a sparse + low-rank model ##
print('Solving the problem...')
solver.solve(verb=0)
###### model visualization
model = solver.get_canonicalparams() # S + L model instance
model.plot_sl(plottype='pn')
|
// Github: https://github.com/shdwjk/Roll20API/blob/master/Torch/Torch.js
// By: The Aaron, Arcane Scriptomancer
// Contact: https://app.roll20.net/users/104025/the-aaron
var Torch = Torch || (function() {
'use strict';
var version = '0.8.9',
lastUpdate = 1478016891,
schemaVersion = 0.1,
flickerURL = 'https://s3.amazonaws.com/files.d20.io/images/4277467/iQYjFOsYC5JsuOPUCI9RGA/thumb.png?1401938659',
flickerPeriod = 400,
flickerDeltaLocation = 2,
flickerDeltaRadius = 0.1,
flickerDeltaAngle = 5,
flickerInterval = false,
ch = function (c) {
var entities = {
'<' : 'lt',
'>' : 'gt',
"'" : '#39',
'@' : '#64',
'{' : '#123',
'|' : '#124',
'}' : '#125',
'[' : '#91',
']' : '#93',
'"' : 'quot',
'-' : 'mdash',
' ' : 'nbsp'
};
if(_.has(entities,c) ){
return ('&'+entities[c]+';');
}
return '';
},
showHelp = function(who) {
sendChat('',
'/w "'+who+'" '+
'<div style="border: 1px solid black; background-color: white; padding: 3px 3px;">'+
'<div style="font-weight: bold; border-bottom: 1px solid black;font-size: 130%;">'+
'Torch v'+version+
'</div>'+
'<div style="padding-left:10px;margin-bottom:3px;">'+
'<p>Torch provides commands for managing dynamic lighting. Supplying a first argument of <b>help</b> to any of the commands displays this help message, as will calling !torch or !snuff with nothing supplied or selected.</p>'+
'<p>Torch now supports <b><i>Jack Taylor</i></b> inspired flickering lights. Flicker lights are only active on pages where a player is (GMs, drag yourself to other pages if you don'+ch("'")+'t want to move the party.) and are persisted in the state. Flicker lights can be used in addition to regular lights as they are implemented on a separate invisible token that follows the nomal token.</p>'+
'</div>'+
'<b>Commands</b>'+
'<div style="padding-left:10px;">'+
'<b><span style="font-family: serif;">!torch '+ch('[')+ch('<')+'Radius'+ch('>')+' '+ch('[')+ch('<')+'Dim Start'+ch('>')+' '+ch('[')+ch('<')+'All Players'+ch('>')+' '+ch('[')+ch('<')+'Token ID'+ch('>')+ch('|')+ch('<')+'--Angle'+ch('>')+' ... '+ch(']')+ch(']')+ch(']')+ch(']')+'</span></b>'+
'<div style="padding-left: 10px;padding-right:20px">'+
'<p>Sets the light for the selected/supplied tokens. Only GMs can supply token ids to adjust.</p>'+
'<p><b>Note:</b> If you are using multiple '+ch('@')+ch('{')+'target'+ch('|')+'token_id'+ch('}')+' calls in a macro, and need to adjust light on fewer than the supplied number of arguments, simply select the same token several times. The duplicates will be removed.</p>'+
'<ul>'+
'<li style="border-top: 1px solid #ccc;border-bottom: 1px solid #ccc;">'+
'<b><span style="font-family: serif;">'+ch('<')+'Radius'+ch('>')+'</span></b> '+ch('-')+' The radius that the light extends to. (Default: 40)'+
'</li> '+
'<li style="border-top: 1px solid #ccc;border-bottom: 1px solid #ccc;">'+
'<b><span style="font-family: serif;">'+ch('<')+'Dim Start'+ch('>')+'</span></b> '+ch('-')+' The radius at which the light begins to dim. (Default: Half of Radius )'+
'</li> '+
'<li style="border-top: 1px solid #ccc;border-bottom: 1px solid #ccc;">'+
'<b><span style="font-family: serif;">'+ch('<')+'All Players'+ch('>')+'</span></b> '+ch('-')+' Should all players see the light, or only the controlling players (Darkvision, etc). Specify one of <i>1, on, yes, true, sure, yup, or -</i> for yes, anything else for no. (Default: yes)'+
'</li> '+
'<li style="border-top: 1px solid #ccc;border-bottom: 1px solid #ccc;">'+
'<b><span style="font-family: serif;">'+ch('<')+'Token ID'+ch('>')+'</span></b> '+ch('-')+' A Token ID, usually supplied with something like '+ch('@')+ch('{')+'target'+ch('|')+'Target 1'+ch('|')+'token_id'+ch('}')+'.'+
'</li> '+
'<li style="border-top: 1px solid #ccc;border-bottom: 1px solid #ccc;">'+
'<b><span style="font-family: serif;">'+ch('<')+'--Angle'+ch('>')+'</span></b> '+ch('-')+' The angle of the light arc of the light. (Default: 360)'+
'</li> '+
'</ul>'+
'</div>'+
'<b><span style="font-family: serif;">!snuff '+ch('[')+ch('<')+'Token ID'+ch('>')+' ... '+ch(']')+'</span></b>'+
'<div style="padding-left: 10px;padding-right:20px">'+
'<p>Turns off light for the selected/supplied tokens. Only GMs can supply token ids to adjust.</p>'+
'<p><b>Note:</b> If you are using multiple '+ch('@')+ch('{')+'target'+ch('|')+'token_id'+ch('}')+' calls in a macro, and need to adjust light on fewer than the supplied number of arguments, simply select the same token several times. The duplicates will be removed.</p>'+
'<ul>'+
'<li style="border-top: 1px solid #ccc;border-bottom: 1px solid #ccc;">'+
'<b><span style="font-family: serif;">'+ch('<')+'Token ID'+ch('>')+'</span></b> '+ch('-')+' A Token ID, usually supplied with something like '+ch('@')+ch('{')+'target'+ch('|')+'Target 1'+ch('|')+'token_id'+ch('}')+'.'+
'</li> '+
'</ul>'+
'</div>'+
'<b><span style="font-family: serif;">!flicker-on '+ch('[')+ch('<')+'Radius'+ch('>')+' '+ch('[')+ch('<')+'Dim Start'+ch('>')+' '+ch('[')+ch('<')+'All Players'+ch('>')+' '+ch('[')+ch('<')+'Token ID'+ch('>')+ch('|')+ch('<')+'--Angle'+ch('>')+' ... '+ch(']')+ch(']')+ch(']')+ch(']')+'</span></b>'+
'<div style="padding-left: 10px;padding-right:20px">'+
'<p>Behaves identically to !torch, save that it creates a flickering light.</p>'+
'</div>'+
'<b><span style="font-family: serif;">!flicker-off '+ch('[')+ch('<')+'Token ID'+ch('>')+' ... '+ch(']')+'</span></b>'+
'<div style="padding-left: 10px;padding-right:20px">'+
'<p>Behaves identically to !snuff, save that it affects the flickering light.</p>'+
'</div>'+
'<b><span style="font-family: serif;">!daytime '+ch('[')+ch('<')+'Token ID'+ch('>')+ch(']')+'</span></b>'+
'<div style="padding-left: 10px;padding-right:20px">'+
'<p>Turns off dynamic lighting for the current player page, or the page of the selected/supplied token.</p>'+
'<ul>'+
'<li style="border-top: 1px solid #ccc;border-bottom: 1px solid #ccc;">'+
'<b><span style="font-family: serif;">'+ch('<')+'Token ID'+ch('>')+'</span></b> '+ch('-')+' A Token ID, usually supplied with something like '+ch('@')+ch('{')+'target'+ch('|')+'Target 1'+ch('|')+'token_id'+ch('}')+'.'+
'</li> '+
'</ul>'+
'</div>'+
'<b><span style="font-family: serif;">!nighttime '+ch('[')+ch('<')+'Token ID'+ch('>')+ch(']')+'</span></b>'+
'<div style="padding-left: 10px;padding-right:20px">'+
'<p>Turns on dynamic lighting for the current player page, or the page of the selected/supplied token.</p>'+
'<ul>'+
'<li style="border-top: 1px solid #ccc;border-bottom: 1px solid #ccc;">'+
'<b><span style="font-family: serif;">'+ch('<')+'Token ID'+ch('>')+'</span></b> '+ch('-')+' A Token ID, usually supplied with something like '+ch('@')+ch('{')+'target'+ch('|')+'Target 1'+ch('|')+'token_id'+ch('}')+'.'+
'</li> '+
'</ul>'+
'</div>'+
'<b><span style="font-family: serif;">!global-light '+ch('[')+ch('<')+'Token ID'+ch('>')+ch(']')+'</span></b>'+
'<div style="padding-left: 10px;padding-right:20px">'+
'<p>Toggles Global Illumination for the current player page, or the page of the selected/supplied token.</p>'+
'<ul>'+
'<li style="border-top: 1px solid #ccc;border-bottom: 1px solid #ccc;">'+
'<b><span style="font-family: serif;">'+ch('<')+'Token ID'+ch('>')+'</span></b> '+ch('-')+' A Token ID, usually supplied with something like '+ch('@')+ch('{')+'target'+ch('|')+'Target 1'+ch('|')+'token_id'+ch('}')+'.'+
'</li> '+
'</ul>'+
'</div>'+
'</div>'+
'</div>'
);
},
setFlicker = function(o,r,d,p,a) {
var found = _.findWhere(state.Torch.flickers, {parent: o.id}),
fobj;
if( found ) {
fobj = getObj('graphic',found.id);
if(fobj) {
fobj.set({
layer: 'walls',
showname: false,
aura1_radius: '',
showplayers_aura1: false,
light_radius: r,
light_dimradius: d,
light_otherplayers: p,
light_angle: a
});
} else {
delete state.Torch.flickers[found.id];
}
}
if(!fobj) {
// new flicker
fobj =createObj('graphic',{
imgsrc: flickerURL,
subtype: 'token',
name: 'Flicker',
pageid: o.get('pageid'),
width: 70,
height: 70,
top: o.get('top'),
left: o.get('left'),
layer: 'walls',
light_radius: r,
light_dimradius: d,
light_otherplayers: p,
light_angle: a
});
}
toBack(fobj);
state.Torch.flickers[fobj.id]={
id: fobj.id,
parent: o.id,
active: true,
page: o.get('pageid'),
light_radius: r,
light_dimradius: d,
light_angle: a
};
},
clearFlicker = function(fid) {
var f = getObj('graphic',fid);
if(f) {
f.remove();
}
delete state.Torch.flickers[fid];
},
handleInput = function(msg) {
var args, radius, dim_radius, arc_angle=360, other_players, page, obj, objs=[],who;
if (msg.type !== "api") {
return;
}
who=getObj('player',msg.playerid).get('_displayname');
args = msg.content.split(" ");
switch(args[0]) {
case '!torch':
if((args[1]||'').match(/^(--)?help$/) || ( !_.has(msg,'selected') && args.length < 5)) {
showHelp(who);
return;
}
radius = parseInt(args[1],10) || 40;
dim_radius = parseInt(args[2],10) || (radius/2);
other_players = _.contains([1,'1','on','yes','true','sure','yup','-'], args[3] || 1 );
objs = _.chain(args)
.rest(4)
.uniq()
.filter(function(a){
var angle=a.match(/^--(\d+)$/);
if(angle){
arc_angle=(Math.min(360,Math.max(0,angle[1])));
return false;
}
return true;
})
.map(function(t){
return getObj('graphic',t);
})
.filter(()=>playerIsGM(msg.playerid))
.reject(_.isUndefined)
.value();
_.each(_.union(objs,_.map(msg.selected,function (o) {
return getObj(o._type,o._id);
})), function(o){
o.set({
light_radius: radius,
light_dimradius: dim_radius,
light_otherplayers: other_players,
light_angle: arc_angle
});
});
break;
case '!snuff':
if((args[1]||'').match(/^(--)?help$/) || ( !_.has(msg,'selected') && args.length < 2)) {
showHelp(who);
return;
}
if(playerIsGM(msg.playerid)) {
_.chain(args)
.rest(1)
.uniq()
.map(function(t){
return getObj('graphic',t);
})
.reject(_.isUndefined)
.each(function(t) {
t.set({
light_radius: '',
light_dimradius: '',
light_otherplayers: false,
light_angle: 360
});
});
}
_.each(msg.selected,function (o) {
getObj(o._type,o._id).set({
light_radius: '',
light_dimradius: '',
light_otherplayers: false,
light_angle: 360
});
});
break;
case '!daytime':
if((args[1]||'').match(/^(--)?help$/) ) {
showHelp(who);
return;
}
if(playerIsGM(msg.playerid)) {
if(msg.selected) {
obj=getObj('graphic', msg.selected[0]._id);
} else if(args[1]) {
obj=getObj('graphic', args[1]);
}
page = getObj('page', (obj && obj.get('pageid')) || Campaign().get('playerpageid'));
if(page) {
page.set({
showlighting: false
});
sendChat('','/w gm It is now <b>Daytime</b> on '+page.get('name')+'!');
}
}
break;
case '!nighttime':
if((args[1]||'').match(/^(--)?help$/) ) {
showHelp(who);
return;
}
if(playerIsGM(msg.playerid)) {
if(msg.selected) {
obj=getObj('graphic',msg.selected[0]._id);
} else if(args[1]) {
obj=getObj('graphic', args[1]);
}
page = getObj('page', (obj && obj.get('pageid')) || Campaign().get('playerpageid'));
if(page) {
page.set({
showlighting: true
});
sendChat('','/w gm It is now <b>Nighttime</b> on '+page.get('name')+'!');
}
}
break;
case '!global-light':
if((args[1]||'').match(/^(--)?help$/) ) {
showHelp(who);
return;
}
if(playerIsGM(msg.playerid)) {
if(msg.selected) {
obj=getObj('graphic', msg.selected[0]._id);
} else if(args[1]) {
obj=getObj('graphic', args[1]);
}
page = getObj('page', (obj && obj.get('pageid')) || Campaign().get('playerpageid'));
if(page) {
page.set({
lightglobalillum: !(page.get('lightglobalillum'))
});
sendChat('','/w gm Global Illumination is now '+(page.get('lightglobalillum')?'<span style="font-weight:bold;color:#090;">ON</span>':'<span style="font-weight:bold;color:#900;">OFF</span>' )+' on page <b>'+page.get('name')+'</b>!');
}
}
break;
case '!flicker-on':
if((args[1]||'').match(/^(--)?help$/) || ( !_.has(msg,'selected') && args.length < 5)) {
showHelp(who);
return;
}
radius = parseInt(args[1],10) || 40;
dim_radius = parseInt(args[2],10) || (radius/2);
other_players = _.contains([1,'1','on','yes','true','sure','yup','-'], args[3] || 1 );
objs=_.chain(args)
.rest(4)
.uniq()
.filter(function(a){
var angle=a.match(/^--(\d+)$/);
if(angle){
arc_angle=(Math.min(360,Math.max(0,angle[1])));
return false;
}
return true;
})
.filter(()=>playerIsGM(msg.playerid))
.map(function(t){
return getObj('graphic',t);
})
.reject(_.isUndefined)
.value();
_.each(_.union(objs,_.map(msg.selected,function (o) {
return getObj(o._type,o._id);
})), function(o){
setFlicker(o, radius, dim_radius, other_players,arc_angle);
});
break;
case '!flicker-off':
if((args[1]||'').match(/^(--)?help$/) || ( !_.has(msg,'selected') && args.length < 2)) {
showHelp(who);
return;
}
if(playerIsGM(msg.playerid)) {
objs=_.chain(args)
.rest(1)
.uniq()
.value();
}
objs=_.union(objs,_.pluck(msg.selected,'_id'));
_.each(state.Torch.flickers, function(f) {
if( _.contains(objs, f.parent)) {
clearFlicker(f.id);
}
});
break;
}
},
animateFlicker = function() {
var pages = _.union([Campaign().get('playerpageid')], _.values(Campaign().get('playerspecificpages')));
_.chain(state.Torch.flickers)
.where({active:true})
.filter(function(o){
return _.contains(pages,o.page);
})
.each(function(fdata){
var o = getObj('graphic',fdata.parent),
f = getObj('graphic',fdata.id),
dx, dy, dr, da;
if(!o) {
clearFlicker(fdata.id);
} else {
if(!f) {
delete state.Torch.flickers[fdata.id];
} else {
dx = randomInteger(2 * flickerDeltaLocation)-flickerDeltaLocation;
dy = randomInteger(2 * flickerDeltaLocation)-flickerDeltaLocation;
dr = randomInteger(2 * (fdata.light_radius*flickerDeltaRadius)) - (fdata.light_radius*flickerDeltaRadius);
da = randomInteger(2 * flickerDeltaAngle)-flickerDeltaAngle;
f.set({
top: o.get('top')+dy,
left: o.get('left')+dx,
light_radius: fdata.light_radius+dr,
light_angle: ((360 === fdata.light_angle) ? (360) : (Math.min(360,Math.max(fdata.light_angle+da,0)))) || 360
});
}
}
});
},
handleTokenDelete = function(obj) {
var found = _.findWhere(state.Torch.flickers, {parent: obj.id});
if(found) {
clearFlicker(found.id);
} else {
found = _.findWhere(state.Torch.flickers, {id: obj.id});
if(found) {
delete state.Torch.flickers[obj.id];
}
}
},
checkInstall = function() {
log('-=> Torch v'+version+' <=- ['+(new Date(lastUpdate*1000))+']');
if( ! _.has(state,'Torch') || state.Torch.version !== schemaVersion) {
log(' > Updating Schema to v'+schemaVersion+' <');
/* Default Settings stored in the state. */
state.Torch = {
version: schemaVersion,
flickers: {}
};
}
flickerInterval = setInterval(animateFlicker,flickerPeriod);
},
registerEventHandlers = function() {
on('chat:message', handleInput);
on('destroy:graphic', handleTokenDelete);
};
return {
CheckInstall: checkInstall,
RegisterEventHandlers: registerEventHandlers
};
}());
on("ready",function(){
'use strict';
Torch.CheckInstall();
Torch.RegisterEventHandlers();
});
|
# -*- coding:UTF-8 -*-
'''
Created on 2014年12月5日
@author: caifh
'''
# import chardet.universaldetector as det_
import creditutils.system_util as mysystem
import chardet
def escape_entire(src):
ch = (ord(c) for c in src)
return ''.join(('\\x%02x' % c) if c <= 255 else ('\\u%04x' % c) for c in ch)
# 将字符串转换成bool类型
def get_bool(value):
return str(value).lower() == str(True).lower()
def escape(src):
return ascii(src)[1:-1]
def decode_escape(src):
return '{}'.format(bytes(src, 'ascii').decode('unicode-escape'))
def detect_encoding(src_buf, default_encoding=None):
enc_flag = 'encoding'
# 复杂处理
# detector = det_.UniversalDetector()
#
# detector.feed(src_buf)
# if detector.done:
# encoding = detector.result[enc_flag]
#
# detector.close()
# print(detector.result)
# 简化实现
result = chardet.detect(src_buf)
# print(result)
encoding = result[enc_flag]
if not encoding:
if default_encoding:
encoding = default_encoding
else:
encoding = mysystem.get_system_encoding()
# print('encoding: ' + encoding)
return encoding
def decode_to_unicode(src_buf):
encoding = detect_encoding(src_buf)
rtn_str = ''
if src_buf:
rtn_str = src_buf.decode(encoding)
return rtn_str
# 获取以秒为单位的两个时间点之间的差值,返回以XXmXXs的时间格式字符串
def get_time_info(begin, end):
elapsed = end - begin
sec_per_min = 60
m = elapsed // sec_per_min
s = elapsed % sec_per_min
time_info = '{}m{}s'.format(round(m), round(s))
return time_info
|
/**
* Definition for a binary tree node.
* function TreeNode(val, left, right) {
* this.val = (val===undefined ? 0 : val)
* this.left = (left===undefined ? null : left)
* this.right = (right===undefined ? null : right)
* }
*/
/**
* @param {TreeNode} root
* @param {number} x
* @param {number} y
* @return {boolean}
*/
const isCousins = (root, x, y, depth = 1, P = {}, D = {}) => {
let q = [root]
while (q.length) {
let K = q.length
while (K--) {
let p = q.shift()
if (p.left) {
if (p.left.val === x) (P.x = p.val), (D.x = depth)
if (p.left.val === y) (P.y = p.val), (D.y = depth)
q.push(p.left)
}
if (p.right) {
if (p.right.val === x) (P.x = p.val), (D.x = depth)
if (p.right.val === y) (P.y = p.val), (D.y = depth)
q.push(p.right)
}
}
++depth
}
return P.x !== P.y && D.x === D.y
}
// another
/**
* Definition for a binary tree node.
* function TreeNode(val) {
* this.val = val;
* this.left = this.right = null;
* }
*/
/**
* @param {TreeNode} root
* @param {number} x
* @param {number} y
* @return {boolean}
*/
const isCousins = function(root, x, y) {
if(root == null) return false
const res = []
chk(root, x, [], res)
chk(root, y, [], res)
if(res.length < 2) return false
return chkRes(res, x, y)
};
function chkRes(arr, x, y) {
let ci = 0, xi = -1, yi = -1
let len = Math.max(arr[0].length, arr[1].length)
for(let i = 0; i < len; i++) {
if(arr[0][i] === arr[1][i]) ci = i
if(arr[0][i] === x || arr[1][i] === x) xi = i
if(arr[0][i] === y || arr[1][i] === y) yi = i
}
if(xi - yi === 0 && xi - ci > 1) {
return true
} else {
return false
}
}
function chk(node, val, path, res) {
if(node == null) return
path.push(node.val)
if(node.val === val) {
res.push(path.slice(0))
return
}
chk(node.left, val, path.slice(0), res)
chk(node.right, val, path.slice(0), res)
}
|
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, iNaT, lib
from pandas._typing import (
Axis,
Dtype,
FilePathOrBuffer,
FrameOrSeries,
JSONSerializable,
Label,
Level,
Renamer,
)
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, rewrite_axis_style_signature
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
validate_percentile,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_float,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_period_arraylike,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.indexes.api import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.missing import find_valid_index
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
if TYPE_CHECKING:
from pandas.core.resample import Resampler
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs: Dict[str, str] = dict()
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
f"cannot replace {to_replace} with method {method} on a "
f"{type(self).__name__}"
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: List[str] = [
"_data",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
_attrs: Dict[Optional[Hashable], Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
axes: Optional[List[Index]] = None,
copy: bool = False,
dtype: Optional[Dtype] = None,
attrs: Optional[Mapping[Optional[Hashable], Any]] = None,
fastpath: bool = False,
):
if not fastpath:
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_data", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=self._get_block_manager_axis(a), copy=False
)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def attrs(self) -> Dict[Optional[Hashable], Any]:
"""
Dictionary of global attributes on this object.
.. warning::
attrs is experimental and may change without warning.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {type(self).__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:
"""Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
_AXIS_ALIASES = {"rows": 0}
_AXIS_IALIASES = {0: "rows"}
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
_AXIS_NUMBERS: Dict[str, int]
_AXIS_NAMES: Dict[int, str]
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
@classmethod
def _setup_axes(cls, axes: List[str], docs: Dict[str, str]) -> None:
"""
Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
docs : docstrings for the axis properties
"""
info_axis = len(axes) - 1
axes_are_reversed = len(axes) > 1
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_REVERSED = axes_are_reversed
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
def _construct_axes_from_arguments(
self, args, kwargs, require_all: bool = False, sentinel=None
):
"""Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in self._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
raise TypeError("not enough/duplicate arguments specified!")
axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
d: Dict[str, ABCSeries] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> List[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self):
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis=0, inplace=False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s if inplace=False, None otherwise.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
Examples
--------
"""
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis, labels) -> None:
self._data.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
DataFrame
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self: FrameOrSeries, item) -> FrameOrSeries:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[FrameOrSeries]:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
baxis = self._get_block_manager_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
return None
else:
return result.__finalize__(self)
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = com.values_from_object(self)
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
values = com.values_from_object(self)
if is_bool_dtype(values) or is_period_arraylike(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._data.apply(operator.invert)
result = self._constructor(new_data).__finalize__(self)
return result
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
f"The {label_axis_name} label '{key}' "
f"is not unique.{multi_message}"
)
)
return values
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
f"{repr(type(self).__name__)} objects are mutable, "
f"thus they cannot be hashed"
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@Appender(items.__doc__)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None) -> np.ndarray:
return com.values_from_object(self)
def __array_wrap__(self, result, context=None):
result = lib.item_from_zerodim(result)
if is_scalar(result):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return result
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_data=self._data,
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta,
)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs[
"to_markdown"
] = """
Print %(klass)s in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
%(klass)s in Markdown-friendly format.
"""
_shared_docs[
"to_excel"
] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool_t = False,
compression: Optional[str] = "infer",
index: bool_t = True,
indent: Optional[int] = None,
) -> Optional[str]:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {'split','records','index','table'}.
* DataFrame:
- default is 'columns'
- allowed values are: {'split', 'records', 'index', 'columns',
'values', 'table'}.
* The format of the JSON string:
- 'split' : dict like {'index' -> [index], 'columns' -> [columns],
'data' -> [values]}
- 'records' : list like [{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
Describing the data, where data component is like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
)
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool_t = False,
format: Optional[str] = None,
index: bool_t = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool_t] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(
self,
path,
compression: Optional[str] = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
.. versionadded:: 0.21.0.
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{booktabs}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{table.tex}``.
.. versionchanged:: 0.20.2
Added to Series.
.. versionchanged:: 1.0.0
Added caption and label arguments.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str, optional
The LaTeX caption to be placed inside ``\caption{}`` in the output.
.. versionadded:: 1.0.0
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
This is used with ``\ref{}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
%(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is 'zip' or inferred as 'zip', other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher) -> None:
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self) -> None:
"""Reset the cacher."""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value) -> None:
"""The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
# Note: we need to call ref._maybe_cache_changed even in the
# case where it will raise. (Uh, not clear why)
try:
ref._maybe_cache_changed(cacher[0], self)
except AssertionError:
# ref._data.setitem can raise
# AssertionError because of shape mismatch
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self)
def _take_with_is_copy(
self: FrameOrSeries, indices, axis=0, **kwargs
) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis, **kwargs)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
new_values = self._data.fast_xs(loc)
# may need to box a datelike-scalar
#
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
if not is_list_like(new_values) or self.ndim == 1:
return com.maybe_box_datetimelike(new_values)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs: Callable = xs
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer."""
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self._take_with_is_copy(item, axis=self._info_axis_number)
return lower
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0, kind=None) -> FrameOrSeries:
"""
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
"""
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self):
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise AbstractMethodError(self)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform %(klass)s to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self)
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: FrameOrSeries,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError(
"String passed to weights not a valid column"
)
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif frac is not None and frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply
DataFrame.applymap
Series.map
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values.
Produced %(klass)s will have same axis length as self.
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: FrameOrSeries, other, method=None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
# For subclasses using _metadata.
for name in self._metadata:
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name: str):
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
""" add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace: bool_t = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self):
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self):
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
return self.values
def _internal_get_values(self) -> np.ndarray:
"""
Return an ndarray after converting sparse values to dense.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame.
See Also
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
"""
return self.values
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()
}
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
coerce: bool_t = False,
copy: bool_t = True,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self)
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
possible to turn off individual conversions to ``StringDtype``, the integer
extension types or ``BooleanDtype``, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 NaN
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
else:
results = [
col._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
for col_name, col in self.items()
]
result = pd.concat(results, axis=1, copy=False)
return result
# ----------------------------------------------------------------------
# Filling NA's
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : %(axes_single_arg)s
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
new_data = self._data.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError(f"invalid fill value with a {type(value)}")
if inplace:
self._update_inplace(new_data)
return None
else:
return self._constructor(new_data).__finalize__(self)
def ffill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in to_replace.items() if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(
to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert,
)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._data.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.items():
if k in self:
new_data = new_data.replace(
to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex,
)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
_maybe_transposed_self.T
):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = _maybe_transposed_self._data
new_data = data.interpolate(
method=method,
axis=ax,
index=index,
values=_maybe_transposed_self,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where, dtype=np.float64)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis=None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
_shared_docs[
"groupby"
] = """
Group %(klass)s using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
Returns
-------
%(klass)sGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
"""
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: Optional[str] = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {'start', 'end'}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
converted : same type as caller
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError:
raise TypeError("Index must be DatetimeIndex")
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
end_time : datetime.time or str
include_start : bool, default True
include_end : bool, default True
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError:
raise TypeError("Index must be DatetimeIndex")
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: int = 0,
on=None,
level=None,
) -> "Resampler":
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset initial periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calender days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.is_anchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset final periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calender days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: Optional[bool_t] = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit.
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
left = self._ensure_type(
left.fillna(method=method, axis=fill_axis, limit=limit)
)
right = self._ensure_type(
right.fillna(method=method, axis=fill_axis, limit=limit)
)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, "align"):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
new_other = com.values_from_object(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(
mask=cond,
new=other,
align=align,
inplace=True,
axis=block_axis,
transpose=self._AXIS_REVERSED,
)
self._update_inplace(new_data)
else:
new_data = self._data.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : bool %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis=0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
%(klass)s
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(
self: FrameOrSeries, percentiles=None, include=None, exclude=None
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data):
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return pd.Series(d, index=stat_index, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_datetime64_any_dtype(data):
return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names: List[Optional[Hashable]] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self._ensure_type(
self.fillna(method=fill_method, axis=axis, limit=limit)
)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name,
name2,
axis_descr,
_any_desc,
nanops.nanany,
_any_see_also,
_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name,
name2,
axis_descr,
_all_desc,
nanops.nanall,
_all_see_also,
_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name,
name2,
axis_descr,
"Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name,
name2,
axis_descr,
"Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name,
name2,
axis_descr,
"Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
name,
name2,
axis_descr,
"minimum",
np.minimum.accumulate,
"min",
np.inf,
np.nan,
_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name,
name2,
axis_descr,
"sum",
np.cumsum,
"sum",
0.0,
np.nan,
_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name,
name2,
axis_descr,
"product",
np.cumprod,
"prod",
1.0,
np.nan,
_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name,
name2,
axis_descr,
"maximum",
np.maximum.accumulate,
"max",
-np.inf,
np.nan,
_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name,
name2,
axis_descr,
"""Return the sum of the values for the requested axis.\n
This is equivalent to the method ``numpy.sum``.""",
nanops.nansum,
_stat_func_see_also,
_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name,
name2,
axis_descr,
"Return the mean of the values for the requested axis.",
nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name,
name2,
axis_descr,
"Return unbiased skew over requested axis.\n\nNormalized by N-1.",
nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name,
name2,
axis_descr,
"Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name,
name2,
axis_descr,
"Return the product of the values for the requested axis.",
nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name,
name2,
axis_descr,
"Return the median of the values for the requested axis.",
nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name,
name2,
axis_descr,
"""Return the maximum of the values for the requested axis.\n
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
nanops.nanmax,
_stat_func_see_also,
_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name,
name2,
axis_descr,
"""Return the minimum of the values for the requested axis.\n
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
nanops.nanmin,
_stat_func_see_also,
_min_examples,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core.window import EWM, Expanding, Rolling, Window
@Appender(Rolling.__doc__)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@Appender(EWM.__doc__)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how: str):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how)
if idxpos is None:
return None
return self.index[idxpos]
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = (
f"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f, name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
f, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name,
name1,
name2,
axis_descr,
desc,
accum_func,
accum_func_name,
mask_a,
mask_b,
examples,
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
def na_accum_func(blk_values):
# We will be applying this function to block values
if blk_values.dtype.kind in ["m", "M"]:
# GH#30460, GH#29058
# numpy 1.18 started sorting NaTs at the end instead of beginning,
# so we need to work around to maintain backwards-consistency.
orig_dtype = blk_values.dtype
# We need to define mask before masking NaTs
mask = isna(blk_values)
if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = blk_values.view("i8")
y[mask] = np.iinfo(np.int64).max
changed = True
else:
y = blk_values
changed = False
result = accum_func(y.view("i8"), axis)
if skipna:
np.putmask(result, mask, iNaT)
elif accum_func == np.minimum.accumulate:
# Restore NaTs that we masked previously
nz = (~np.asarray(mask)).nonzero()[0]
if len(nz):
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?
if isinstance(blk_values, np.ndarray):
result = result.view(orig_dtype)
else:
# DatetimeArray
result = type(blk_values)._from_sequence(result, dtype=orig_dtype)
elif skipna and not issubclass(
blk_values.dtype.type, (np.integer, np.bool_)
):
vals = blk_values.copy().T
mask = isna(vals)
np.putmask(vals, mask, mask_a)
result = accum_func(vals, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(blk_values.T, axis)
# transpose back for ndarray, not for EA
return result.T if hasattr(result, "T") else result
result = self._data.apply(na_accum_func)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
|
// https://stackoverflow.com/a/3959275
const { isPositiveInteger } = require('./isPositiveInteger');
const handleErrors = (params) => {
if (params.length === 0) throw new Error('Must provide one or more paramters');
if (params.some(param => !Number.isInteger(param))) {
throw new Error('One of your parameters does not evaluate to a integer');
}
// Has to be a non negative integer
if (params.some(param => !isPositiveInteger(param) && param !== 0)) {
throw new Error('One of your parameters does not evaluate to a positive integer');
}
// JS can only safely represent integers less than Number.MAX_SAFE_INTEGER
if (params.some(param => param > 18)) {
throw new Error('Cannot reliably return numbers larger than 9,007,199,254,740,991');
}
};
const factorialCache = [1, 1];
const caclulateFactorial = (num) => {
if (typeof factorialCache[num] !== 'undefined') {
return factorialCache[num];
}
const start = factorialCache.length;
for (let i = start; i <= num; i += 1) {
factorialCache[i] = factorialCache[i - 1] * i;
}
return factorialCache[num];
};
/**
* This function caculates the factorial of each numerical parameter
* @memberof variadic
* @author devNoiseConsulting
* @param {...*} params - One or more parameters.
*/
exports.factorial = (...params) => {
handleErrors(params);
return params.map(caclulateFactorial);
};
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# NOTICE FILE in the root directory of this source tree.
#
# Translate sentences from the input stream.
# The model will be faster is sentences are sorted by length.
# Input sentences must have the same tokenization and BPE codes than the ones used in the model.
#
# Usage:
# cat source_sentences.bpe | \
# python translate.py --exp_name translate \
# --src_lang en --tgt_lang fr \
# --model_path trained_model.pth --output_path output
#
import os
import io
import sys
import argparse
import torch
from src.utils import AttrDict
from src.utils import bool_flag, initialize_exp
from src.data.dictionary import Dictionary
from src.model import transfer_vocab
from src.model.transformer import TransformerModel
from src.fp16 import network_to_half
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Translate sentences")
# main parameters
parser.add_argument("--dump_path", type=str, default="./dumped/", help="Experiment dump path")
parser.add_argument("--exp_name", type=str, default="", help="Experiment name")
parser.add_argument("--exp_id", type=str, default="", help="Experiment ID")
parser.add_argument("--fp16", type=bool_flag, default=False, help="Run model with float16")
parser.add_argument("--batch_size", type=int, default=32, help="Number of sentences per batch")
# model / output paths
parser.add_argument("--model_path", type=str, default="", help="Model path")
parser.add_argument("--output_path", type=str, default="", help="Output path")
parser.add_argument("--beam", type=int, default=1, help="Beam size")
parser.add_argument("--length_penalty", type=float, default=1, help="length penalty")
parser.add_argument("--max_len", type=int, default=-1, help="Maximum length (-1 to disable)")
# parser.add_argument("--max_vocab", type=int, default=-1, help="Maximum vocabulary size (-1 to disable)")
# parser.add_argument("--min_count", type=int, default=0, help="Minimum vocabulary count")
# source language / target language
parser.add_argument("--src_lang", type=str, default="", help="Source language")
parser.add_argument("--tgt_lang", type=str, default="", help="Target language")
parser.add_argument("--tie_lang_embs", type=str, default="",
help="Tie language embeddings for two langs")
parser.add_argument("--transfer_vocab", type=str, default="",
help="Path to bidict file for vocab transfer")
return parser
def main(params):
# initialize the experiment
logger = initialize_exp(params)
# generate parser / parse parameters
parser = get_parser()
params = parser.parse_args()
reloaded = torch.load(params.model_path)
model_params = AttrDict(reloaded['params'])
logger.info("Supported languages: %s" % ", ".join(model_params.lang2id.keys()))
# update dictionary parameters
for name in ['n_words', 'bos_index', 'eos_index', 'pad_index', 'unk_index', 'mask_index']:
setattr(params, name, getattr(model_params, name))
# build dictionary / build encoder / build decoder / reload weights
dico = Dictionary(reloaded['dico_id2word'], reloaded['dico_word2id'], reloaded['dico_counts'])
encoder = TransformerModel(model_params, dico, is_encoder=True, with_output=True).cuda().eval()
decoder = TransformerModel(model_params, dico, is_encoder=False, with_output=True).cuda().eval()
encoder.load_state_dict(reloaded['encoder'])
decoder.load_state_dict(reloaded['decoder'])
params.src_id = model_params.lang2id[params.src_lang]
params.tgt_id = model_params.lang2id[params.tgt_lang]
if params.transfer_vocab:
transfer_vocab(params, dico, encoder.embeddings)
transfer_vocab(params, dico, decoder.embeddings)
if hasattr(params, 'tie_lang_embs') and params.tie_lang_embs:
to_lang, from_lang = params.tie_lang_embs.split(",")
encoder.tie_lang_embs(model_params.lang2id[to_lang], model_params.lang2id[from_lang])
decoder.tie_lang_embs(model_params.lang2id[to_lang], model_params.lang2id[from_lang])
# float16
if params.fp16:
assert torch.backends.cudnn.enabled
encoder = network_to_half(encoder)
decoder = network_to_half(decoder)
# read sentences from stdin
src_sent = []
for line in sys.stdin.readlines():
line_spl = line.strip().split()
assert len(line_spl) > 0
if len(line_spl) > params.max_len and params.max_len > 0:
src_sent.append(" ".join(line_spl[:params.max_len]))
else:
src_sent.append(line)
logger.info("Read %i sentences from stdin. Translating ..." % len(src_sent))
f = io.open(params.output_path, 'w', encoding='utf-8')
for i in range(0, len(src_sent), params.batch_size):
# prepare batch
word_ids = [torch.LongTensor([dico.index(w) for w in s.strip().split()])
for s in src_sent[i:i + params.batch_size]]
lengths = torch.LongTensor([len(s) + 2 for s in word_ids])
batch = torch.LongTensor(lengths.max().item(), lengths.size(0)).fill_(params.pad_index)
batch[0] = params.eos_index
for j, s in enumerate(word_ids):
if lengths[j] > 2: # if sentence not empty
batch[1:lengths[j] - 1, j].copy_(s)
batch[lengths[j] - 1, j] = params.eos_index
langs = batch.clone().fill_(params.src_id)
# encode source batch and translate it
encoded = encoder('fwd', x=batch.cuda(), lengths=lengths.cuda(), langs=langs.cuda(), causal=False)
encoded = encoded.transpose(0, 1)
max_len = min(int(1.5 * lengths.max().item() + 10), 512)
if params.beam == 1:
decoded, dec_lengths = decoder.generate(encoded, lengths.cuda(), params.tgt_id, max_len=max_len)
else:
decoded, dec_lengths = decoder.generate_beam(
encoded, lengths.cuda(), params.tgt_id, beam_size=params.beam,
length_penalty=params.length_penalty,
early_stopping=False,
max_len=max_len)
# convert sentences to words
for j in range(decoded.size(1)):
# remove delimiters
sent = decoded[:, j]
delimiters = (sent == params.eos_index).nonzero().view(-1)
assert len(delimiters) >= 1 and delimiters[0].item() == 0
sent = sent[1:] if len(delimiters) == 1 else sent[1:delimiters[1]]
# output translation
source = src_sent[i + j].strip()
target = " ".join([dico[sent[k].item()] for k in range(len(sent))])
sys.stderr.write("%i / %i: %s -> %s\n" % (i + j, len(src_sent), source, target))
f.write(target + "\n")
f.close()
if __name__ == '__main__':
# generate parser / parse parameters
parser = get_parser()
params = parser.parse_args()
# check parameters
assert os.path.isfile(params.model_path)
assert params.src_lang != '' and params.tgt_lang != '' and params.src_lang != params.tgt_lang
assert params.output_path and not os.path.isfile(params.output_path)
# translate
with torch.no_grad():
main(params)
|
# file: config.py
# author: mbiokyle29
import configparser
import logging
import os.path as path
from pkg_resources import resource_filename
logger = logging.getLogger("place.lib.config")
class PlaceConfig(object):
""" Encapsulation of configuration options """
RC_FILE_NAME = ".placerc"
def __init__(self):
self._verbose = False
self._debug = False
self._log_level = None
@classmethod
def readConfigFiles(cls, cwd):
""" Find and attempt to load settings from config file """
rc_file_paths = [
path.join(cwd, cls.RC_FILE_NAME),
path.join(path.expanduser("~"), cls.RC_FILE_NAME),
resource_filename("place", path.join("data", cls.RC_FILE_NAME))
]
found_rc_file_paths = [
rc_file_path for rc_file_path in rc_file_paths
if path.isfile(rc_file_path)
]
kwargs = {}
if len(found_rc_file_paths) >= 1:
rc_file_path = found_rc_file_paths[0]
logger.debug("Loading config from %s", rc_file_path)
parser = configparser.ConfigParser()
parser.read(rc_file_path)
if "place" in parser:
kwargs["verbose"] = parser["place"].getboolean("verbose", fallback=False)
kwargs["debug"] = parser["place"].getboolean("debug", fallback=False)
logger.debug("Creating with: %s", kwargs)
return kwargs
@classmethod
def fromKwargs(cls, **kwargs):
""" Create a config instance from kwargs """
instance = cls()
instance._verbose = bool(kwargs.get("verbose", False))
instance._debug = bool(kwargs.get("debug", False))
return instance
@property
def verbose(self):
return self._verbose
@property
def debug(self):
return self._debug
@property
def log_level(self):
if self._log_level is None:
log_level = logging.INFO if self._verbose else logging.WARN
log_level = logging.DEBUG if self._debug else log_level
self._log_level = log_level
return self._log_level
|
try:
from asyncio import run
except ImportError:
import asyncio
def run(main):
loop = asyncio.get_event_loop()
return loop.run_until_complete(main)
from injectify.api import inject
from injectify.injectors import (
HeadInjector,
TailInjector,
ReturnInjector,
FieldInjector,
NestedInjector,
)
def test_head_injector_correctly_injects_async_function():
async def target(x):
a = 10
if x > a:
a = x
return a
@inject(target=target, injector=HeadInjector())
def handler():
x = 11
assert run(target(0)) == 11
assert run(target(10)) == 11
assert run(target(101)) == 11
def test_tail_injector_correctly_injects_async_function():
async def target(x):
if x > 100:
return x
@inject(target=target, injector=TailInjector())
def handler():
return -1
assert run(target(13)) == -1
assert run(target(101)) == 101
def test_return_injector_correctly_injects_async_function_all_returns():
async def target(x):
if x > 100:
y = x * 2
return y
else:
y = x + 2
return y
@inject(target=target, injector=ReturnInjector())
def handler():
return '{} :)'.format(y)
assert run(target(13)) == '15 :)'
assert run(target(101)) == '202 :)'
def test_return_injector_correctly_injects_async_function_ordinal_returns():
async def target(x):
if x > 100:
y = x * 2
return y
else:
y = x + 2
return y
@inject(target=target, injector=ReturnInjector(ordinal=1))
def handler():
return '{} :)'.format(y)
assert run(target(13)) == '15 :)'
assert run(target(101)) == 202
def test_field_injector_correctly_injects_async_function_before_all_fields():
async def target(x):
if x > 100:
y = x * 2
else:
y = x + 2
return y
@inject(target=target, injector=FieldInjector('y', insert='before'))
def handler():
x += 1
assert run(target(13)) == 16
assert run(target(101)) == 204
def test_field_injector_correctly_injects_async_function_after_all_fields():
async def target(x):
if x > 100:
y = x * 2
else:
y = x + 2
return y
@inject(target=target, injector=FieldInjector('y', insert='after'))
def handler():
y -= 1
assert run(target(13)) == 14
assert run(target(101)) == 201
def test_field_injector_correctly_injects_async_function_before_ordinal_field():
async def target(x):
if x > 100:
y = x * 2
else:
y = x + 2
return y
@inject(
target=target, injector=FieldInjector('y', ordinal=1, insert='before'),
)
def handler():
x += 1
assert run(target(13)) == 16
assert run(target(101)) == 202
def test_field_injector_correctly_injects_async_function_after_ordinal_field():
async def target(x):
if x > 100:
y = x * 2
else:
y = x + 2
return y
@inject(target=target, injector=FieldInjector('y', ordinal=0, insert='after'))
def handler():
y -= 1
assert run(target(13)) == 15
assert run(target(101)) == 201
def test_nested_injector_correctly_injects_async_function_sync_nested():
async def target(x):
def nested(y):
if y > 100:
return y
if x < 200:
return nested(x)
@inject(target=target, injector=NestedInjector('nested', TailInjector()))
def handler():
return -1
assert run(target(13)) == -1
assert run(target(101)) == 101
assert run(target(200)) is None
def test_nested_injector_correctly_injects_async_function_async_nested():
async def target(x):
async def nested(y):
if y > 100:
return y
if x < 200:
return await nested(x)
@inject(target=target, injector=NestedInjector('nested', TailInjector()))
def handler():
return -1
assert run(target(13)) == -1
assert run(target(101)) == 101
assert run(target(200)) is None
def test_nested_injector_correctly_injects_nested_sync_function_async_nested():
async def target(x):
async def nested(y):
if y > 100:
return y
if x < 200:
return await nested(x)
@inject(target=target, injector=NestedInjector('nested', TailInjector()))
def handler():
return -1
assert run(target(13)) == -1
assert run(target(101)) == 101
assert run(target(200)) is None
|
#ifdef __OBJC__
#import <UIKit/UIKit.h>
#else
#ifndef FOUNDATION_EXPORT
#if defined(__cplusplus)
#define FOUNDATION_EXPORT extern "C"
#else
#define FOUNDATION_EXPORT extern
#endif
#endif
#endif
FOUNDATION_EXPORT double FlowCoreVersionNumber;
FOUNDATION_EXPORT const unsigned char FlowCoreVersionString[];
|
export default ['$scope', '$state', 'configService',
($scope, $state, configService) => {
$scope.page = {
'message': () => `Page 2, id = ${$state.params.id}`
};
}];
|
import{r as t,e as o,h as r,H as a,g as n}from"./p-8dfd188c.js";import{o as i}from"./p-8bf53d8d.js";import{h as e,s}from"./p-09b2e663.js";const d=class{constructor(r){t(this,r),this.bkkrFocus=o(this,"bkkrFocus",7),this.bkkrBlur=o(this,"bkkrBlur",7),this.disabled=!1,this.routerDirection="forward",this.type="button",this.onFocus=()=>{this.bkkrFocus.emit()},this.onBlur=()=>{this.bkkrBlur.emit()}}render(){const{el:t,disabled:o,color:n,href:d,size:b,routerDirection:c}=this,l=e("bkkr-fab-list",t),h=void 0===d?"button":"a",p="button"===h?{type:this.type}:{download:this.download,href:d,rel:this.rel,target:this.target};return r(a,{"aria-disabled":o?"true":null,class:s(n,{"fab-button-in-list":l,"state-disabled":o,"state-activatable":!0,"state-focusable":!0,"fab-button-color":void 0!==n,[`fab-button-${b}`]:void 0!==b})},r(h,Object.assign({},p,{class:"button-native",part:"native",disabled:o,onFocus:this.onFocus,onBlur:this.onBlur,onClick:t=>i(d,t,c)}),r("span",{class:"button-inner"},r("slot",null))))}get el(){return n(this)}};d.style=':host{--border-radius:50%;--border-width:0;--border-style:none;--border-color:initial;--padding-top:0;--padding-end:0;--padding-bottom:0;--padding-start:0;--background:var(--bkkr-text-color, #000);--background-active:var(--bkkr-background-color, #fff);--background-focus:var(--bkkr-background-color, #fff);--background-hover:var(--bkkr-background-color, #fff);--background-active-opacity:.1;--background-focus-opacity:.1;--background-hover-opacity:.1;--color:var(--bkkr-background-color, #fff);--color-active:var(--color);--color-focus:var(--color);--color-hover:var(--color);--box-shadow-color:var(--bkkr-text-color, #000);--box-shadow:0 15px 25px 0 rgba(var(--box-shadow-color, 0, 0, 0), 0.1), 0 6px 20px 0 rgba(var(--box-shadow-color, 0, 0, 0), 0.12), 0 13.2px 40px 0 rgba(var(--box-shadow-color, 0, 0, 0), 0.08);--transition:0.2s transform cubic-bezier(0.25, 1.11, 0.78, 1.59), 0.2s box-shadow cubic-bezier(0.25, 1.11, 0.78, 1.59);--transform:translateZ(0);margin-left:0;margin-right:0;margin-top:0;margin-bottom:0;display:block;width:56px;height:56px;outline:none;font-size:14px;text-align:center;text-overflow:ellipsis;text-transform:none;white-space:nowrap;-webkit-font-kerning:none;font-kerning:none}.button-native{border-radius:var(--border-radius);padding-left:var(--padding-start);padding-right:var(--padding-end);padding-top:var(--padding-top);padding-bottom:var(--padding-bottom);color:inherit;font-family:inherit;font-size:inherit;font-style:inherit;font-weight:inherit;letter-spacing:inherit;text-align:inherit;text-decoration:inherit;text-indent:inherit;text-overflow:inherit;text-transform:inherit;white-space:inherit;display:block;position:relative;width:100%;height:100%;-webkit-transform:var(--transform);transform:var(--transform);-webkit-transition:var(--transition);transition:var(--transition);border-width:var(--border-width);border-style:var(--border-style);border-color:var(--border-color);outline:none;background:var(--background);background-clip:padding-box;color:var(--color);-webkit-box-shadow:var(--box-shadow);box-shadow:var(--box-shadow);contain:strict;cursor:pointer;overflow:hidden;z-index:0;-webkit-appearance:none;-moz-appearance:none;appearance:none;-webkit-box-sizing:border-box;box-sizing:border-box}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){.button-native{padding-left:unset;padding-right:unset;-webkit-padding-start:var(--padding-start);padding-inline-start:var(--padding-start);-webkit-padding-end:var(--padding-end);padding-inline-end:var(--padding-end)}}.button-native::-moz-focus-inner{border:0}.button-inner{top:0;right:0;left:0;display:-ms-flexbox;display:flex;position:absolute;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-ms-flex-negative:0;flex-shrink:0;-ms-flex-align:center;align-items:center;-ms-flex-pack:center;justify-content:center;height:100%;z-index:1}.button-native::after{top:0;right:0;bottom:0;left:0;position:absolute;content:"";opacity:0}@media (any-hover: hover){:host(:hover) .button-native{color:var(--color-hover)}:host(:hover) .button-native::after{background:var(--background-hover);opacity:var(--background-hover-opacity)}}:host(:focus) .button-native,:host(.state-focused) .button-native{color:var(--color-focus)}:host(:focus) .button-native::after,:host(.state-focused) .button-native::after{background:var(--background-focus);opacity:var(--background-focus-opacity)}:host(:active),:host(.state-activated){--box-shadow:0 3px 5px 0 rgba(var(--box-shadow-color, 0, 0, 0), 0.1), 0 6px 10px 0 rgba(var(--box-shadow-color, 0, 0, 0), 0.12), 0 1.2px 20px 0 rgba(var(--box-shadow-color, 0, 0, 0), 0.08);--transform:scale3d(0.97, 0.97, 1);--transition:0.2s transform ease-out, 0.2s box-shadow ease-out}:host(:active) .button-native,:host(.state-activated) .button-native{color:var(--color-active)}:host(:active) .button-native::after,:host(.state-activated) .button-native::after{background:var(--background-active);opacity:var(--background-active-opacity)}:host(:disabled),:host(.state-disabled){cursor:default;opacity:0.5;pointer-events:none}:host(.fab-button-color){--box-shadow-color:var(--color-base-rgb)}:host(.fab-button-color) .button-native{background:var(--color-base);color:var(--color-contrast)}::slotted(bkkr-icon){font-size:28px;line-height:1}:host(.fab-button-small){margin-left:8px;margin-right:8px;margin-top:8px;margin-bottom:8px;width:40px;height:40px}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){:host(.fab-button-small){margin-left:unset;margin-right:unset;-webkit-margin-start:8px;margin-inline-start:8px;-webkit-margin-end:8px;margin-inline-end:8px}}';export{d as bkkr_fab_button}
|
//#include <sys/stat.h>
extern char _HeapPlaceholderStart; /* Defined by the linker */
extern char _HeapPlaceholderEnd; /* Defined by the linker */
char *current_heap_end = 0;
char* _sbrk(int incr) {
char* prev_heap_end;
if (current_heap_end == 0) { //Initialize with value from linker
current_heap_end = &_HeapPlaceholderStart
;
}
prev_heap_end = current_heap_end;
if (current_heap_end + incr > &_HeapPlaceholderEnd) {
/* Heap and stack collision */
return (char*) -1;
}
current_heap_end += incr;
return (char*) prev_heap_end;
}
|
import base64
import csv
import datetime
import gzip
import ujson
import logging
import os
import ssl
import subprocess
import uuid
import bson
import pytz
import tzlocal
from typing import Tuple, Optional, Dict, Callable, Any
from pymongo import MongoClient
from pymongo.database import Database
from singer.utils import strftime as singer_strftime
from . import utils, split_gzip
from .errors import ExportError, TableNotFoundError, MongoDBInvalidDatetimeError, UnsupportedKeyTypeException
LOGGER = logging.getLogger(__name__)
DEFAULT_WRITE_BATCH_ROWS = 50000
def serialize_document(document: Dict) -> Dict:
"""
serialize mongodb Document into a json object
Args:
document: MongoDB document
Returns: Dict
"""
return {key: transform_value(val, [key]) for key, val in document.items()
if not isinstance(val, (bson.min_key.MinKey, bson.max_key.MaxKey))}
def class_to_string(key_value: Any, key_type: str) -> str:
"""
Converts specific types to string equivalent
The supported types are: datetime, bson Timestamp, bytes, int, Int64, float, ObjectId, str and UUID
Args:
key_value: The value to convert to string
key_type: the value type
Returns: string equivalent of key value
Raises: UnsupportedKeyTypeException if key_type is not supported
"""
if key_type == 'datetime':
if key_value.tzinfo is None:
timezone = tzlocal.get_localzone()
local_datetime = timezone.localize(key_value)
utc_datetime = local_datetime.astimezone(pytz.UTC)
else:
utc_datetime = key_value.astimezone(pytz.UTC)
return singer_strftime(utc_datetime)
if key_type == 'Timestamp':
return '{}.{}'.format(key_value.time, key_value.inc)
if key_type == 'bytes':
return base64.b64encode(key_value).decode('utf-8')
if key_type in ['int', 'Int64', 'float', 'ObjectId', 'str', 'UUID']:
return str(key_value)
raise UnsupportedKeyTypeException('{} is not a supported key type'.format(key_type))
def safe_transform_datetime(value: datetime.datetime, path) -> str:
"""
Safely transform datetime from local tz to UTC if applicable
Args:
value: datetime value to transform
path:
Returns: utc datetime as string
"""
timezone = tzlocal.get_localzone()
try:
local_datetime = timezone.localize(value)
utc_datetime = local_datetime.astimezone(pytz.UTC)
except Exception as ex:
if str(ex) == 'year is out of range' and value.year == 0:
# NB: Since datetimes are persisted as strings, it doesn't
# make sense to blow up on invalid Python datetimes (e.g.,
# year=0). In this case we're formatting it as a string and
# passing it along down the pipeline.
return '{:04d}-{:02d}-{:02d}T{:02d}:{:02d}:{:02d}.{:06d}Z'.format(value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second,
value.microsecond)
raise MongoDBInvalidDatetimeError('Found invalid datetime at [{}]: {}'.format('.'.join(map(str, path)),
value)) from ex
return singer_strftime(utc_datetime)
def transform_value(value: Any, path) -> Any:
"""
transform values to json friendly ones
Args:
value: value to transform
path:
Returns: transformed value
"""
conversion = {
list: lambda val, pat: list(map(lambda v: transform_value(v[1], pat + [v[0]]), enumerate(val))),
dict: lambda val, pat: {k: transform_value(v, pat + [k]) for k, v in val.items()},
uuid.UUID: lambda val, _: class_to_string(val, 'UUID'),
bson.objectid.ObjectId: lambda val, _: class_to_string(val, 'ObjectId'),
bson.datetime.datetime: safe_transform_datetime,
bson.timestamp.Timestamp: lambda val, _: singer_strftime(val.as_datetime()),
bson.int64.Int64: lambda val, _: class_to_string(val, 'Int64'),
bytes: lambda val, _: class_to_string(val, 'bytes'),
datetime.datetime: lambda val, _: class_to_string(val, 'datetime'),
bson.decimal128.Decimal128: lambda val, _: val.to_decimal(),
bson.regex.Regex: lambda val, _: dict(pattern=val.pattern, flags=val.flags),
bson.code.Code: lambda val, _: dict(value=str(val), scope=str(val.scope)) if val.scope else str(val),
bson.dbref.DBRef: lambda val, _: dict(id=str(val.id), collection=val.collection, database=val.database),
}
if isinstance(value, tuple(conversion.keys())):
return conversion[type(value)](value, path)
return value
class FastSyncTapMongoDB:
"""
Common functions for fastsync from a MongoDB database
"""
def __init__(self, connection_config: Dict, tap_type_to_target_type: Callable):
"""
FastSyncTapMongoDB constructor
Args:
connection_config: A map of tap source config
tap_type_to_target_type: Function that maps tap types to target ones
"""
self.connection_config = connection_config
self.connection_config['write_batch_rows'] = connection_config.get('write_batch_rows',
DEFAULT_WRITE_BATCH_ROWS)
self.tap_type_to_target_type = tap_type_to_target_type
self.database: Optional[Database] = None
def open_connection(self):
"""
Open connection
"""
# Default SSL verify mode to true, give option to disable
verify_mode = self.connection_config.get('verify_mode', 'true') == 'true'
use_ssl = self.connection_config.get('ssl') == 'true'
connection_params = dict(host=self.connection_config['host'], port=int(self.connection_config['port']),
username=self.connection_config['user'], password=self.connection_config['password'],
authSource=self.connection_config['auth_database'], ssl=use_ssl,
replicaSet=self.connection_config.get('replica_set', None),
readPreference='secondaryPreferred')
# NB: "ssl_cert_reqs" must ONLY be supplied if `SSL` is true.
if not verify_mode and use_ssl:
connection_params['ssl_cert_reqs'] = ssl.CERT_NONE
self.database = MongoClient(**connection_params)[self.connection_config['database']]
def close_connection(self):
"""
Close connection
"""
self.database.client.close()
# pylint: disable=R0914,R0913
def copy_table(self,
table_name: str,
filepath: str,
temp_dir: str,
split_large_files=False,
split_file_chunk_size_mb=1000,
split_file_max_chunks=20,
compress=True
):
"""
Export data from table to a zipped csv
Args:
table_name: Fully qualified table name to export
filepath: Path where to create the zip file(s) with the exported data
temp_dir: Temporary directory to export
split_large_files: Split large files to multiple pieces and create multiple zip files
with -partXYZ postfix in the filename. (Default: False)
split_file_chunk_size_mb: File chunk sizes if `split_large_files` enabled. (Default: 1000)
split_file_max_chunks: Max number of chunks if `split_large_files` enabled. (Default: 20)
compress: Flag to indicate whether to compress export files
"""
table_dict = utils.tablename_to_dict(table_name, '.')
if table_dict['table_name'] not in self.database.list_collection_names():
raise TableNotFoundError(f'{table_name} table not found!')
export_file_path = self._export_collection(temp_dir, table_dict['table_name'])
extracted_at = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
write_batch_rows = self.connection_config['write_batch_rows']
exported_rows = 0
try:
gzip_splitter = split_gzip.open(filepath,
mode='wt',
chunk_size_mb=split_file_chunk_size_mb,
max_chunks=split_file_max_chunks if split_large_files else 0,
compress=compress)
with gzip.open(export_file_path, 'rb') as export_file, gzip_splitter as gzfile:
writer = csv.DictWriter(gzfile,
fieldnames=[elem[0] for elem in self._get_collection_columns()],
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
rows = []
LOGGER.info('Starting data processing...')
# bson.decode_file_iter will generate one document at a time from the exported file
for document in bson.decode_file_iter(export_file):
try:
rows.append({
'_ID': str(document['_id']),
'DOCUMENT': ujson.dumps(serialize_document(document)),
utils.SDC_EXTRACTED_AT: extracted_at,
utils.SDC_BATCHED_AT: datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f'),
utils.SDC_DELETED_AT: None
})
except TypeError:
LOGGER.error('TypeError encountered when processing document ID: %s', document['_id'])
raise
exported_rows += 1
# writes batch to csv file and log some nice message on the progress.
if exported_rows % write_batch_rows == 0:
LOGGER.info(
'Exporting batch from %s to %s rows from %s...',
(exported_rows - write_batch_rows),
exported_rows, table_name
)
writer.writerows(rows)
rows.clear()
# write rows one last time
if rows:
LOGGER.info('Exporting last batch ...')
writer.writerows(rows)
rows.clear()
finally:
# whether the code in try succeeds or fails
# make sure to delete the exported file
os.remove(export_file_path)
LOGGER.info('Exported total of %s rows from %s...', exported_rows, table_name)
@staticmethod
def _get_collection_columns() -> Tuple:
"""
Get predefined table/collection column details
"""
return (
('_ID', 'string'),
('DOCUMENT', 'object'),
(utils.SDC_EXTRACTED_AT, 'datetime'),
(utils.SDC_BATCHED_AT, 'datetime'),
(utils.SDC_DELETED_AT, 'string'),
)
def fetch_current_log_pos(self) -> Dict:
"""
Find and returns the latest ChangeStream token.
LOG_BASED method uses changes streams.
MongoDB doesn't have any built-in feature to get the most recent token atm,
so a workaround is to start a cursor, grab the first token it returns then exit.
Returns: token
"""
token = None
with self.database.watch(max_await_time_ms=1000) as cursor:
while cursor.alive:
_ = cursor.try_next()
token = cursor.resume_token
if token is not None:
break
# Token can look like:
# {'_data': 'A_LONG_HEX_DECIMAL_STRING'}
# or {'_data': 'A_LONG_HEX_DECIMAL_STRING', '_typeBits': b'SOME_HEX'}
# https://github.com/mongodb/mongo/blob/master/src/mongo/db/pipeline/resume_token.cpp#L82-L96
# Get the '_data' only from resume token
# token can contain a property '_typeBits' of type bytes which cannot be json
# serialized when saving the state in the function 'utils.save_state_file'.
# '_data' is enough to resume LOG_BASED Singer replication after FastSync
return {
'token': {
'_data': token['_data']
}
}
# pylint: disable=invalid-name
def fetch_current_incremental_key_pos(self, fully_qualified_table_name: str, replication_key: str):
"""
No Implemented
Args:
fully_qualified_table_name:
replication_key:
"""
raise NotImplementedError('INCREMENTAL method is not supported for tap-mongodb')
def map_column_types_to_target(self):
"""
Create a map of columns and their target type in addition of primary keys
Returns: dictionary
"""
mapped_columns = []
for column_name, column_type in self._get_collection_columns():
mapped_columns.append(f'{column_name} {self.tap_type_to_target_type(column_type)}')
return {
'columns': mapped_columns,
'primary_key': ['_ID']
}
def _export_collection(self, export_dir: str, collection_name) -> str:
"""
Dump a collection data into a compressed bson file and returns the path
Args:
export_dir: Specifies the directory where dumped file will be
collection_name: Name of the collection to dump
Returns: Path to the file
"""
LOGGER.info('Starting export of table "%s"', collection_name)
url = f'mongodb://{self.connection_config["user"]}:{self.connection_config["password"]}' \
f'@{self.connection_config["host"]}:{self.connection_config["port"]}/' \
f'{self.connection_config["database"]}?authSource={self.connection_config["auth_database"]}' \
f'&readPreference=secondaryPreferred'
if self.connection_config.get('replica_set', None) is not None:
url += f'&replicaSet={self.connection_config["replica_set"]}'
if self.connection_config.get('ssl', None) is not None:
url += f'&ssl={self.connection_config["ssl"]}'
return_code = subprocess.call([
'mongodump',
'--uri', f'"{url}"',
'--forceTableScan',
'--gzip',
'-c', collection_name,
'-o', export_dir
])
LOGGER.debug('Export command return code %s', return_code)
if return_code != 0:
raise ExportError(f'Export failed with code {return_code}')
# mongodump creates two files "{collection_name}.metadata.json.gz" & "{collection_name}.bson.gz"
# we are only interested in the latter so we delete the former.
os.remove(os.path.join(export_dir, self.connection_config['database'], f'{collection_name}.metadata.json.gz'))
return os.path.join(export_dir, self.connection_config['database'], f'{collection_name}.bson.gz')
|
/*
* Copyright (c) 2020, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
/* global jest, describe, expect */
import {PluginError, _} from '../../common/index.js';
import {DockerExec} from '../../docker/index.js';
import Plugin from './Plugin.js';
import log from 'winston';
jest.autoMockOff();
describe('Plugin tests', () => {
afterEach(() => {
jest.clearAllMocks();
jest.restoreAllMocks();
});
test('Empty constructor throws error.', () => {
expect(() => {
new Plugin();
}).toThrow(PluginError);
});
test('Missing pluginPath throws error.', () => {
expect(() => {
new Plugin({});
}).toThrow(PluginError);
});
test('New object creates new 36 char UUID.', () => {
const plugin = new Plugin({pluginPath: '/blah'});
expect(plugin.getId().length).toEqual(36);
});
test('loadAndValidate throws error on invalid schema.', async () => {
expect.assertions(1);
jest.spyOn(_, 'fetch').mockResolvedValue('{"invalid": "value"}');
const plugin = new Plugin({pluginPath: '/blah'});
try {
await plugin.loadAndValidate();
} catch (error) {
expect(error).toBeInstanceOf(PluginError);
}
});
test('loadAndValidate loads JSON.', async () => {
jest.spyOn(_, 'fetch').mockResolvedValue('{"version":2.4,"kind":"Plugin","preprocessor":{"image":"test:latest"}}');
const plugin = new Plugin({pluginPath: '/blah'});
await plugin.loadAndValidate();
expect(plugin.preprocessor).toEqual({'image': 'test:latest'});
});
test('loadAndValidate loads YAML.', async () => {
jest.spyOn(_, 'fetch').mockResolvedValue('version: 2.4\nkind: Plugin\npreprocessor:\n image: test:latest');
const plugin = new Plugin({pluginPath: '/blah'});
await plugin.loadAndValidate();
expect(plugin.preprocessor).toEqual({'image': 'test:latest'});
});
test('runPreprocessor skips executing preprocessor if one does not exist.', async () => {
jest.spyOn(_, 'fetch').mockResolvedValue('version: 2.4\nkind: Plugin');
const plugin = new Plugin({pluginPath: '/blah'});
await plugin.loadAndValidate();
const logDebug = jest.spyOn(log, 'debug').mockImplementation();
await plugin.runPreprocessor();
expect(logDebug.mock.calls[0][0]).toEqual(expect.stringMatching(/Skipping preprocessor/));
});
test('runPreprocessor returns result.output when exec.runPreprocessor returns status of 0', async () => {
jest.spyOn(_, 'fetch').mockResolvedValue('version: 2.4\nkind: Plugin\npreprocessor:\n image: test:latest');
const plugin = new Plugin({pluginPath: '/blah'});
await plugin.loadAndValidate();
const exec = new DockerExec();
exec.runPreprocessor = jest.fn().mockReturnValue({
status: 0,
output: 'test',
});
expect(await plugin.runPreprocessor(exec, '')).toEqual('test');
});
test('runPreprocessor throws PluginError with result.output when exec.runPreprocessor returns a non-0 status', async () => {
const logError = jest.spyOn(log, 'error').mockImplementation();
jest.spyOn(_, 'fetch').mockResolvedValue('version: 2.4\nkind: Plugin\npreprocessor:\n image: test:latest');
const plugin = new Plugin({pluginPath: '/blah'});
await plugin.loadAndValidate();
const exec = new DockerExec();
exec.runPreprocessor = jest.fn().mockReturnValue({
status: 1,
output: 'error',
});
await expect(plugin.runPreprocessor(exec, '')).rejects.toThrow(PluginError);
expect(logError.mock.calls[0][0]).toEqual('The preprocessor has returned a non-zero exit code (1):\nerror');
});
});
|
from big_ol_pile_of_manim_imports import *
from old_projects.div_curl import PureAirfoilFlow
from old_projects.div_curl import move_submobjects_along_vector_field
from old_projects.div_curl import move_points_along_vector_field
from old_projects.div_curl import four_swirls_function
from old_projects.lost_lecture import ShowWord
class CreationDestructionMobject(VMobject):
CONFIG = {
"start_time": 0,
"frequency": 0.25,
"max_ratio_shown": 0.3,
"use_copy": True,
}
def __init__(self, template, **kwargs):
VMobject.__init__(self, **kwargs)
if self.use_copy:
self.ghost_mob = template.copy().fade(1)
self.add(self.ghost_mob)
else:
self.ghost_mob = template
# Don't add
self.shown_mob = template.deepcopy()
self.shown_mob.clear_updaters()
self.add(self.shown_mob)
self.total_time = self.start_time
def update(mob, dt):
mob.total_time += dt
period = 1.0 / mob.frequency
unsmooth_alpha = (mob.total_time % period) / period
alpha = bezier([0, 0, 1, 1])(unsmooth_alpha)
mrs = mob.max_ratio_shown
mob.shown_mob.pointwise_become_partial(
mob.ghost_mob,
max(interpolate(-mrs, 1, alpha), 0),
min(interpolate(0, 1 + mrs, alpha), 1),
)
self.add_updater(update)
class Eddy(VMobject):
CONFIG = {
"cd_mob_config": {
"frequency": 0.2,
"max_ratio_shown": 0.3
},
"n_spirils": 5,
"n_layers": 20,
"radius": 1,
"colors": [BLUE_A, BLUE_E],
}
def __init__(self, **kwargs):
VMobject.__init__(self, **kwargs)
lines = self.get_lines()
# self.add(lines)
self.add(*[
CreationDestructionMobject(line, **self.cd_mob_config)
for line in lines
])
self.randomize_times()
def randomize_times(self):
for submob in self.submobjects:
if hasattr(submob, "total_time"):
T = 1.0 / submob.frequency
submob.total_time = T * random.random()
def get_lines(self):
a = 0.2
return VGroup(*[
self.get_line(r=self.radius * (1 - a + 2 * a * random.random()))
for x in range(self.n_layers)
])
def get_line(self, r):
return ParametricFunction(
lambda t: r * (t + 1)**(-1) * np.array([
np.cos(TAU * t),
np.sin(TAU * t),
0,
]),
t_min=0.1 * random.random(),
t_max=self.n_spirils,
stroke_width=1,
color=interpolate_color(*self.colors, random.random())
)
class Chaos(Eddy):
CONFIG = {
"n_lines": 12,
"height": 1,
"width": 2,
"n_midpoints": 4,
"cd_mob_config": {
"use_copy": False,
"frequency": 1,
"max_ratio_shown": 0.8
}
}
def __init__(self, **kwargs):
VMobject.__init__(self, **kwargs)
rect = Rectangle(height=self.height, width=self.width)
rect.move_to(ORIGIN, DL)
rect.fade(1)
self.rect = rect
self.add(rect)
lines = self.get_lines()
self.add(*[
CreationDestructionMobject(line, **self.cd_mob_config)
for line in lines
])
self.randomize_times()
lines.fade(1)
self.add(lines)
def get_lines(self):
return VGroup(*[
self.get_line(y)
for y in np.linspace(0, self.height, self.n_lines)
])
def get_line(self, y):
frequencies = [0] + list(2 + 2 * np.random.random(self.n_midpoints)) + [0]
rect = self.rect
line = Line(
y * UP, y * UP + self.width * RIGHT,
stroke_width=1
)
line.insert_n_curves(self.n_midpoints)
line.total_time = random.random()
delta_h = self.height / (self.n_lines - 1)
def update(line, dt):
x0, y0 = rect.get_corner(DL)[:2]
x1, y1 = rect.get_corner(UR)[:2]
line.total_time += dt
xs = np.linspace(x0, x1, self.n_midpoints + 2)
new_anchors = [
np.array([
x + 1.0 * delta_h * np.cos(f * line.total_time),
y0 + y + 1.0 * delta_h * np.cos(f * line.total_time),
0
])
for (x, f) in zip(xs, frequencies)
]
line.set_points_smoothly(new_anchors)
line.add_updater(update)
return line
class DoublePendulum(VMobject):
CONFIG = {
"start_angles": [3 * PI / 7, 3 * PI / 4],
"color1": BLUE,
"color2": RED,
}
def __init__(self, **kwargs):
VMobject.__init__(self, **kwargs)
line1 = Line(ORIGIN, UP)
dot1 = Dot(color=self.color1)
dot1.add_updater(lambda d: d.move_to(line1.get_end()))
line2 = Line(UP, 2 * UP)
dot2 = Dot(color=self.color2)
dot2.add_updater(lambda d: d.move_to(line2.get_end()))
self.add(line1, line2, dot1, dot2)
# Largely copied from https://scipython.com/blog/the-double-pendulum/
# Pendulum rod lengths (m), bob masses (kg).
L1, L2 = 1, 1
m1, m2 = 1, 1
# The gravitational acceleration (m.s-2).
g = 9.81
self.state_vect = np.array([
self.start_angles[0], 0,
self.start_angles[1], 0,
])
self.state_vect += np.random.random(4) * 1e-7
def update(group, dt):
for x in range(2):
line1, line2 = group.submobjects[:2]
theta1, z1, theta2, z2 = group.state_vect
c, s = np.cos(theta1 - theta2), np.sin(theta1 - theta2)
theta1dot = z1
z1dot = (m2 * g * np.sin(theta2) * c - m2 * s * (L1 * (z1**2) * c + L2 * z2**2) -
(m1 + m2) * g * np.sin(theta1)) / L1 / (m1 + m2 * s**2)
theta2dot = z2
z2dot = ((m1 + m2) * (L1 * (z1**2) * s - g * np.sin(theta2) + g * np.sin(theta1) * c) +
m2 * L2 * (z2**2) * s * c) / L2 / (m1 + m2 * s**2)
group.state_vect += 0.5 * dt * np.array([
theta1dot, z1dot, theta2dot, z2dot,
])
group.state_vect[1::2] *= 0.9999
p1 = L1 * np.sin(theta1) * RIGHT - L1 * np.cos(theta1) * UP
p2 = p1 + L2 * np.sin(theta2) * RIGHT - L2 * np.cos(theta2) * UP
line1.put_start_and_end_on(ORIGIN, p1)
line2.put_start_and_end_on(p1, p2)
self.add_updater(update)
class DoublePendulums(VGroup):
def __init__(self, **kwargs):
colors = [BLUE, RED, YELLOW, PINK, MAROON_B, PURPLE, GREEN]
VGroup.__init__(
self,
*[
DoublePendulum(
color1=random.choice(colors),
color2=random.choice(colors),
)
for x in range(5)
],
**kwargs,
)
class Diffusion(VMobject):
CONFIG = {
"height": 1.5,
"n_dots": 1000,
"colors": [RED, BLUE]
}
def __init__(self, **kwargs):
VMobject.__init__(self, **kwargs)
self.add_dots()
self.add_invisible_circles()
def add_dots(self):
dots = VGroup(*[Dot() for x in range(self.n_dots)])
dots.arrange_in_grid(buff=SMALL_BUFF)
dots.center()
dots.set_height(self.height)
dots.sort(lambda p: p[0])
dots[:len(dots) // 2].set_color(self.colors[0])
dots[len(dots) // 2:].set_color(self.colors[1])
dots.set_fill(opacity=0.8)
self.dots = dots
self.add(dots)
def add_invisible_circles(self):
circles = VGroup()
for dot in self.dots:
point = dot.get_center()
radius = get_norm(point)
circle = Circle(radius=radius)
circle.rotate(angle_of_vector(point))
circle.fade(1)
circles.add(circle)
self.add_updater_to_dot(dot, circle)
self.add(circles)
def add_updater_to_dot(self, dot, circle):
dot.total_time = 0
radius = get_norm(dot.get_center())
freq = 0.1 + 0.05 * random.random() + 0.05 / radius
def update(dot, dt):
dot.total_time += dt
prop = (freq * dot.total_time) % 1
dot.move_to(circle.point_from_proportion(prop))
dot.add_updater(update)
class NavierStokesEquations(TexMobject):
CONFIG = {
"tex_to_color_map": {
"\\rho": YELLOW,
"\\mu": GREEN,
"\\textbf{v}": BLUE,
"p{}": RED,
},
"width": 10,
}
def __init__(self, **kwargs):
v_tex = "\\textbf{v}"
TexMobject.__init__(
self,
"\\rho",
"\\left("
"{\\partial", v_tex, "\\over",
"\\partial", "t}",
"+",
v_tex, "\\cdot", "\\nabla", v_tex,
"\\right)",
"=",
"-", "\\nabla", "p{}", "+",
"\\mu", "\\nabla^2", v_tex, "+",
# "\\frac{1}{3}", "\\mu", "\\nabla",
# "(", "\\nabla", "\\cdot", v_tex, ")", "+",
"\\textbf{F}",
"\\qquad\\qquad",
"\\nabla", "\\cdot", v_tex, "=", "0",
**kwargs
)
self.set_width(self.width)
def get_labels(self):
parts = self.get_parts()
words = [
"Analogous to \\\\ mass $\\times$ acceleration",
"Pressure\\\\forces",
"Viscous\\\\forces",
"External\\\\forces",
]
result = VGroup()
braces = VGroup()
word_mobs = VGroup()
for i, part, word in zip(it.count(), parts, words):
brace = Brace(part, DOWN, buff=SMALL_BUFF)
word_mob = brace.get_text(word)
word_mob.scale(0.7, about_edge=UP)
word_mobs.add(word_mob)
braces.add(brace)
result.add(VGroup(brace, word_mob))
word_mobs[1:].arrange(RIGHT, buff=MED_SMALL_BUFF)
word_mobs[1:].next_to(braces[2], DOWN, SMALL_BUFF)
word_mobs[1].set_color(RED)
word_mobs[2].set_color(GREEN)
return result
def get_parts(self):
return VGroup(
self[:12],
self[13:16],
self[17:20],
self[21:22],
)
class Test(Scene):
def construct(self):
self.add(DoublePendulums())
self.wait(30)
# Scenes
class EddyReference(Scene):
CONFIG = {
"radius": 1,
"label": "Eddy",
"label": "",
}
def construct(self):
eddy = Eddy(radius=self.radius)
new_eddy = eddy.get_lines()
for line in new_eddy:
line.set_stroke(
width=(3 + 3 * random.random())
)
label = TextMobject(self.label)
label.next_to(new_eddy, UP)
self.play(
LaggedStartMap(ShowCreationThenDestruction, new_eddy),
FadeIn(
label,
rate_func=there_and_back_with_pause,
),
run_time=3
)
class EddyReferenceWithLabel(EddyReference):
CONFIG = {
"label": "Eddy"
}
class EddyLabels(Scene):
def construct(self):
labels = VGroup(
TextMobject("Large eddy"),
TextMobject("Medium eddy"),
TextMobject("Small eddy"),
)
for label in labels:
self.play(FadeIn(
label,
rate_func=there_and_back_with_pause,
run_time=3
))
class LargeEddyReference(EddyReference):
CONFIG = {
"radius": 2,
"label": ""
}
class MediumEddyReference(EddyReference):
CONFIG = {
"radius": 0.8,
"label": "Medium eddy"
}
class SmallEddyReference(EddyReference):
CONFIG = {
"radius": 0.25,
"label": "Small eddy"
}
class SomeTurbulenceEquations(PiCreatureScene):
def construct(self):
randy, morty = self.pi_creatures
navier_stokes = NavierStokesEquations()
line = Line(randy.get_right(), morty.get_left())
navier_stokes.replace(line, dim_to_match=0)
navier_stokes.scale(1.2)
distribution = TexMobject(
"E(k) \\propto k^{-5/3}",
tex_to_color_map={
"k": GREEN,
"-5/3": YELLOW,
}
)
distribution.next_to(morty, UL)
brace = Brace(distribution, DOWN, buff=SMALL_BUFF)
brace_words = brace.get_text("Explained soon...")
brace_group = VGroup(brace, brace_words)
self.play(
Write(navier_stokes),
randy.change, "confused", navier_stokes,
morty.change, "confused", navier_stokes,
)
self.wait(3)
self.play(
morty.change, "raise_right_hand", distribution,
randy.look_at, distribution,
FadeInFromDown(distribution),
navier_stokes.fade, 0.5,
)
self.play(GrowFromCenter(brace_group))
self.play(randy.change, "pondering", distribution)
self.wait(3)
dist_group = VGroup(distribution, brace_group)
self.play(
LaggedStartMap(FadeOut, VGroup(randy, morty, navier_stokes)),
dist_group.scale, 1.5,
dist_group.center,
dist_group.to_edge, UP,
)
self.wait()
def create_pi_creatures(self):
randy, morty = Randolph(), Mortimer()
randy.to_corner(DL)
morty.to_corner(DR)
return (randy, morty)
class JokeRingEquation(Scene):
def construct(self):
items = VGroup(
TextMobject("Container with a lip"),
TextMobject("Fill with smoke (or fog)"),
TextMobject("Hold awkwardly"),
)
line = Line(LEFT, RIGHT).set_width(items.get_width() + 1)
items.add(line)
items.add(TextMobject("Vortex ring"))
items.arrange(DOWN, buff=MED_LARGE_BUFF, aligned_edge=LEFT)
line.shift(LEFT)
plus = TexMobject("+")
plus.next_to(line.get_left(), UR, SMALL_BUFF)
line.add(plus)
items.to_edge(RIGHT)
point = 3.8 * LEFT + 0.2 * UP
arrow1 = Arrow(
items[0].get_left(), point + 0.8 * UP + 0.3 * RIGHT,
path_arc=90 * DEGREES,
)
arrow1.pointwise_become_partial(arrow1, 0, 0.99)
arrow2 = Arrow(
items[1].get_left(), point,
)
arrows = VGroup(arrow1, arrow2)
for i in 0, 1:
self.play(
FadeInFromDown(items[i]),
ShowCreation(arrows[i])
)
self.wait()
self.play(LaggedStartMap(FadeIn, items[2:]))
self.wait()
self.play(FadeOut(arrows))
self.wait()
class VideoOnPhysicsGirlWrapper(Scene):
def construct(self):
rect = ScreenRectangle(height=6)
title = TextMobject("Video on Physics Girl")
title.scale(1.5)
title.to_edge(UP)
rect.next_to(title, DOWN)
self.add(title)
self.play(ShowCreation(rect))
self.wait()
class LightBouncingOffFogParticle(Scene):
def construct(self):
words = TextMobject(
"Light bouncing\\\\",
"off fog particles"
)
arrow = Vector(UP + 0.5 * RIGHT)
arrow.next_to(words, UP)
arrow.set_color(WHITE)
self.add(words)
self.play(GrowArrow(arrow))
self.wait()
class NightHawkInLightWrapper(Scene):
def construct(self):
title = TextMobject("NightHawkInLight")
title.scale(1.5)
title.to_edge(UP)
rect = ScreenRectangle(height=6)
rect.next_to(title, DOWN)
self.add(title)
self.play(ShowCreation(rect))
self.wait()
class CarefulWithLasers(TeacherStudentsScene):
def construct(self):
morty = self.teacher
randy = self.students[1]
randy2 = self.students[2]
# randy.change('hooray')
laser = VGroup(
Rectangle(
height=0.1,
width=0.3,
fill_color=LIGHT_GREY,
fill_opacity=1,
stroke_color=DARK_GREY,
stroke_width=1,
),
Line(ORIGIN, 10 * RIGHT, color=GREEN_SCREEN)
)
laser.arrange(RIGHT, buff=0)
laser.rotate(45 * DEGREES)
laser.shift(randy.get_corner(UR) - laser[0].get_center() + 0.1 * DR)
laser.time = 0
def update_laser(laser, dt):
laser.time += dt
laser.rotate(
0.5 * dt * np.sin(laser.time),
about_point=laser[0].get_center()
)
laser.add_updater(update_laser)
self.play(LaggedStartMap(FadeInFromDown, self.pi_creatures, run_time=1))
self.add(self.pi_creatures, laser)
for pi in self.pi_creatures:
pi.add_updater(lambda p: p.look_at(laser[1]))
self.play(
ShowCreation(laser),
self.get_student_changes(
"surprised", "hooray", "horrified",
look_at_arg=laser
)
)
self.teacher_says(
"Careful with \\\\ the laser!",
target_mode="angry"
)
self.wait(2.2)
morty.save_state()
randy2.save_state()
self.play(
morty.blink, randy2.blink,
run_time=0.3
)
self.wait(2)
self.play(
morty.restore, randy2.restore,
run_time=0.3
)
self.wait(2)
class SetAsideTurbulence(PiCreatureScene):
def construct(self):
self.pi_creature_says(
"Forget vortex rings",
target_mode="speaking"
)
self.wait()
self.pi_creature_says(
"look at that\\\\ turbulence!",
target_mode="surprised"
)
self.wait()
def create_pi_creature(self):
morty = Mortimer()
morty.to_corner(DR)
return morty
class WavingRodLabel(Scene):
def construct(self):
words = TextMobject(
"(Waving a small flag \\\\ through the air)"
)
self.play(Write(words))
self.wait()
class SeekOrderWords(Scene):
def construct(self):
words = TextMobject("Seek order amidst chaos")
words.scale(1.5)
self.play(Write(words))
self.wait()
class LongEddy(Scene):
def construct(self):
self.add(Eddy())
self.wait(30)
class LongDoublePendulum(Scene):
def construct(self):
self.add(DoublePendulums())
self.wait(30)
class LongDiffusion(Scene):
def construct(self):
self.add(Diffusion())
self.wait(30)
class AskAboutTurbulence(TeacherStudentsScene):
def construct(self):
self.pi_creatures_ask()
self.divide_by_qualitative_quantitative()
self.three_qualitative_descriptors()
self.rigorous_definition()
def pi_creatures_ask(self):
morty = self.teacher
randy = self.students[1]
morty.change("surprised")
words = TextMobject("Wait,", "what", "exactly \\\\", "is turbulence?")
question = TextMobject("What", "is turbulence?")
question.to_edge(UP, buff=MED_SMALL_BUFF)
h_line = Line(LEFT, RIGHT).set_width(FRAME_WIDTH - 1)
h_line.next_to(question, DOWN, buff=MED_LARGE_BUFF)
self.student_says(
words,
target_mode='raise_left_hand',
added_anims=[morty.change, 'pondering']
)
self.change_student_modes(
"erm", "raise_left_hand", "confused",
)
self.wait(3)
self.play(
morty.change, "raise_right_hand",
FadeOut(randy.bubble),
ReplacementTransform(VGroup(words[1], words[3]), question),
FadeOut(VGroup(words[0], words[2])),
self.get_student_changes(
*3 * ["pondering"],
look_at_arg=question
)
)
self.play(
ShowCreation(h_line),
LaggedStartMap(
FadeOutAndShiftDown, self.pi_creatures,
run_time=1,
lag_ratio=0.8
)
)
self.wait()
self.question = question
self.h_line = h_line
def divide_by_qualitative_quantitative(self):
v_line = Line(
self.h_line.get_center(),
FRAME_HEIGHT * DOWN / 2,
)
words = VGroup(
TextMobject("Features", color=YELLOW),
TextMobject("Rigorous definition", color=BLUE),
)
words.next_to(self.h_line, DOWN)
words[0].shift(FRAME_WIDTH * LEFT / 4)
words[1].shift(FRAME_WIDTH * RIGHT / 4)
self.play(
ShowCreation(v_line),
LaggedStartMap(FadeInFromDown, words)
)
self.wait()
self.words = words
def three_qualitative_descriptors(self):
words = VGroup(
TextMobject("- Eddies"),
TextMobject("- Chaos"),
TextMobject("- Diffusion"),
)
words.arrange(
DOWN, buff=1.25,
aligned_edge=LEFT
)
words.to_edge(LEFT)
words.shift(MED_LARGE_BUFF * DOWN)
# objects = VGroup(
# Eddy(),
# DoublePendulum(),
# Diffusion(),
# )
# for word, obj in zip(words, objects):
for word in words:
# obj.next_to(word, RIGHT)
self.play(
FadeInFromDown(word),
# VFadeIn(obj)
)
self.wait(3)
def rigorous_definition(self):
randy = Randolph()
randy.move_to(FRAME_WIDTH * RIGHT / 4)
randy.change("pondering", self.words[1])
self.play(FadeIn(randy))
self.play(Blink(randy))
self.wait()
self.play(randy.change, "shruggie")
for x in range(2):
self.play(Blink(randy))
self.wait()
self.play(randy.look, LEFT)
self.wait(2)
self.play(randy.look, UP)
self.play(Blink(randy))
self.wait()
class BumpyPlaneRide(Scene):
def construct(self):
plane = SVGMobject(file_name="plane2")
self.add(plane)
total_time = 0
while total_time < 10:
point = 2 * np.append(np.random.random(2), 2) + DL
point *= 0.2
time = 0.2 * random.random()
total_time += time
arc = PI * random.random() - PI / 2
self.play(
plane.move_to, point,
run_time=time,
path_arc=arc
)
class PureAirfoilFlowCopy(PureAirfoilFlow):
def modify_vector_field(self, vector_field):
PureAirfoilFlow.modify_vector_field(self, vector_field)
vector_field.set_fill(opacity=0.1)
vector_field.set_stroke(opacity=0.1)
class LaminarFlowLabel(Scene):
def construct(self):
words = TextMobject("Laminar flow")
words.scale(1.5)
words.to_edge(UP)
subwords = TextMobject(
"`Lamina', in Latin, means \\\\"
"``a thin sheet of material''",
tex_to_color_map={"Lamina": YELLOW},
arg_separator="",
)
subwords.next_to(words, DOWN, MED_LARGE_BUFF)
VGroup(words, subwords).set_background_stroke(width=4)
self.play(Write(words))
self.wait()
self.play(FadeInFromDown(subwords))
self.wait()
class HighCurlFieldBreakingLayers(Scene):
CONFIG = {
"flow_anim": move_submobjects_along_vector_field,
}
def construct(self):
lines = VGroup(*[
self.get_line()
for x in range(20)
])
lines.arrange(DOWN, buff=MED_SMALL_BUFF)
lines[0::2].set_color(BLUE)
lines[1::2].set_color(RED)
all_dots = VGroup(*it.chain(*lines))
def func(p):
vect = four_swirls_function(p)
norm = get_norm(vect)
if norm > 2:
vect *= 4.0 / get_norm(vect)**2
return vect
self.add(lines)
self.add(self.flow_anim(all_dots, func))
self.wait(16)
def get_line(self):
line = VGroup(*[Dot() for x in range(100)])
line.set_height(0.1)
line.arrange(RIGHT, buff=0)
line.set_width(10)
return line
class HighCurlFieldBreakingLayersLines(HighCurlFieldBreakingLayers):
CONFIG = {
"flow_anim": move_points_along_vector_field
}
def get_line(self):
line = Line(LEFT, RIGHT)
line.insert_n_curves(500)
line.set_width(5)
return line
class VorticitySynonyms(Scene):
def construct(self):
words = VGroup(
TextMobject("High", "vorticity"),
TexMobject(
"\\text{a.k.a} \\,",
"|\\nabla \\times \\vec{\\textbf{v}}| > 0"
),
TextMobject("a.k.a", "high", "swirly-swirly", "factor"),
)
words[0].set_color_by_tex("vorticity", BLUE)
words[1].set_color_by_tex("nabla", BLUE)
words[2].set_color_by_tex("swirly", BLUE)
words.arrange(
DOWN,
aligned_edge=LEFT,
buff=MED_LARGE_BUFF
)
for word in words:
word.add_background_rectangle()
self.play(FadeInFromDown(word))
self.wait()
class VorticityDoesNotImplyTurbulence(TeacherStudentsScene):
def construct(self):
t_to_v = TextMobject(
"Turbulence", "$\\Rightarrow$", "Vorticity",
)
v_to_t = TextMobject(
"Vorticity", "$\\Rightarrow$", "Turbulence",
)
for words in t_to_v, v_to_t:
words.move_to(self.hold_up_spot, DR)
words.set_color_by_tex_to_color_map({
"Vorticity": BLUE,
"Turbulence": GREEN,
})
v_to_t.submobjects.reverse()
cross = Cross(v_to_t[1])
morty = self.teacher
self.play(
morty.change, "raise_right_hand",
FadeInFromDown(t_to_v)
)
self.wait()
self.play(t_to_v.shift, 2 * UP,)
self.play(
TransformFromCopy(t_to_v, v_to_t, path_arc=PI / 2),
self.get_student_changes(
"erm", "confused", "sassy",
run_time=1
),
ShowCreation(cross, run_time=2),
)
self.add(cross)
self.wait(4)
class SurroundingRectangleSnippet(Scene):
def construct(self):
rect = Rectangle()
rect.set_color(YELLOW)
rect.set_stroke(width=5)
self.play(ShowCreation(rect))
self.play(FadeOut(rect))
class FeynmanOnTurbulence(Scene):
def construct(self):
feynman = ImageMobject("Feynman_Woods", height=4)
name = TextMobject("Richard Feynman")
name.next_to(feynman, DOWN)
quote = TextMobject(
"``", "Turbulence", "is the most\\\\"
"important", "unsolved problem\\\\",
"of classical physics.''",
tex_to_color_map={
"Turbulence": BLUE,
"unsolved problem\\\\": YELLOW,
},
)
quote[0].shift(SMALL_BUFF * RIGHT)
quote.next_to(feynman, RIGHT)
Group(feynman, name, quote).center()
self.play(
FadeInFrom(feynman, UP),
FadeInFrom(name, DOWN),
Write(quote, run_time=4)
)
self.wait()
class ShowNavierStokesEquations(Scene):
def construct(self):
self.introduce_equations()
self.ask_about_evolution()
self.ask_about_reasonable()
self.ask_about_blowup()
self.show_money()
def introduce_equations(self):
name = TextMobject("Navier-Stokes equations (incompressible)")
equations = NavierStokesEquations()
name.to_edge(UP)
equations.next_to(name, DOWN, MED_LARGE_BUFF)
labels = equations.get_labels()
parts = equations.get_parts()
newtons_second = TextMobject(
"Newton's 2nd law \\\\ $ma = F$"
)
newtons_second.next_to(parts, DOWN)
variables = TexMobject(
"&\\textbf{v}", "\\text{ is velocity}\\\\",
"&\\rho", "\\text{ is density}\\\\",
"&p{}", "\\text{ is pressure}\\\\",
"&\\mu", "\\text{ is viscosity}\\\\",
tex_to_color_map=NavierStokesEquations.CONFIG["tex_to_color_map"]
)
variables.to_corner(DL)
self.play(FadeInFromDown(equations))
self.play(Write(name))
self.play(LaggedStartMap(
FadeInFrom, variables,
lambda m: (m, RIGHT),
))
self.wait()
self.play(Write(newtons_second))
self.wait()
self.play(
FadeInFromDown(labels[0]),
newtons_second.next_to, variables, RIGHT, LARGE_BUFF
)
self.play(ShowCreationThenFadeAround(parts[0]))
self.wait()
self.play(LaggedStartMap(FadeInFrom, labels[1:]))
self.wait(3)
self.play(LaggedStartMap(
FadeOut, VGroup(*it.chain(labels, variables, newtons_second))
))
self.equations = equations
def ask_about_evolution(self):
words = TextMobject(
"Given a start state...",
"...how does it evolve?"
)
words.arrange(RIGHT, buff=2)
words.next_to(self.equations, DOWN, LARGE_BUFF)
self.play(Write(words[0]))
self.wait()
self.play(Write(words[1]))
self.wait(2)
self.play(FadeOut(words))
def ask_about_reasonable(self):
question = TextMobject(
"Do ``reasonable'' \\\\"
"solutions always\\\\"
"exist?"
)
self.play(FadeInFromDown(question))
self.wait()
self.reasonable_question = question
def ask_about_blowup(self):
axes, graph = self.get_axes_and_graph()
question = TextMobject("Is this possible?")
question.set_color(YELLOW)
question.move_to(axes.get_corner(UR), LEFT)
question.align_to(axes, UP)
q_arrow = Arrow(
question.get_bottom(),
graph.point_from_proportion(0.8),
buff=SMALL_BUFF,
path_arc=-60 * DEGREES
)
q_arrow.set_stroke(WHITE, 3)
morty = Mortimer()
morty.to_corner(DR)
morty.change('confused', graph)
self.play(
Write(axes, run_time=1),
self.reasonable_question.to_edge, LEFT,
self.reasonable_question.shift, DOWN,
)
self.play(
Write(question),
ShowCreation(graph),
FadeIn(morty),
)
self.add(q_arrow, morty)
self.play(ShowCreation(q_arrow), Blink(morty))
self.wait()
self.play(morty.look_at, question)
self.wait()
self.play(morty.change, "maybe", graph)
self.wait(2)
to_fade = VGroup(question, q_arrow, axes, graph)
self.play(
LaggedStartMap(FadeOut, to_fade),
morty.change, "pondering"
)
self.wait(2)
self.play(Blink(morty))
self.wait(2)
self.morty = morty
def show_money(self):
# Million dollar problem
problem = TextMobject(
"Navier-Stokes existence \\\\ and smoothness problems"
)
money = TextMobject("\\$1{,}000{,}000")
money.set_color(GREEN)
money.next_to(problem, DOWN)
pi1 = Randolph()
pi2 = self.morty
pi1.to_corner(DL)
pis = VGroup(pi1, pi2)
for pi in pis:
pi.change("pondering")
pi.money_eyes = VGroup()
for eye in pi.eyes:
cash = TexMobject("\\$")
cash.set_color(GREEN)
cash.replace(eye, dim_to_match=1)
pi.money_eyes.add(cash)
self.play(
ReplacementTransform(
self.reasonable_question,
problem,
),
pi2.look_at, problem,
pi1.look_at, problem,
VFadeIn(pi1),
)
self.wait()
self.play(FadeInFromLarge(money))
self.play(
pi1.change, "hooray",
pi2.change, "hooray",
)
self.play(
ReplacementTransform(pi1.pupils, pi1.money_eyes),
ReplacementTransform(pi2.pupils, pi2.money_eyes),
)
self.wait()
# Helpers
def get_axes_and_graph(self):
axes = Axes(
x_min=-1,
x_max=5,
y_min=-1,
y_max=5,
)
time = TextMobject("Time")
time.next_to(axes.x_axis, RIGHT)
ke = TextMobject("Kinetic energy")
ke.next_to(axes.y_axis, UP)
axes.add(time, ke)
axes.set_height(4)
axes.center()
axes.to_edge(DOWN)
v_line = DashedLine(
axes.coords_to_point(4, 0),
axes.coords_to_point(4, 5),
)
axes.add(v_line)
graph = axes.get_graph(
lambda x: -1.0 / (x - 4),
x_min=0.01,
x_max=3.8,
)
graph.set_color(BLUE)
return axes, graph
class NewtonsSecond(Scene):
def construct(self):
square = Square(
stroke_color=WHITE,
fill_color=LIGHT_GREY,
fill_opacity=0.5,
side_length=1
)
label = TexMobject("m")
label.scale(1.5)
label.move_to(square)
square.add(label)
square.save_state()
arrows = VGroup(
Vector(0.5 * UP).next_to(square, UP, buff=0),
Vector(RIGHT).next_to(square, RIGHT, buff=0),
)
self.play(
square.shift, 4 * RIGHT + 2 * UP,
rate_func=lambda t: t**2,
run_time=2
)
self.wait()
square.restore()
self.play(
LaggedStartMap(GrowArrow, arrows)
)
square.add(arrows)
self.play(
square.shift, 4 * RIGHT + 2 * UP,
rate_func=lambda t: t**2,
run_time=2
)
self.wait()
class CandleLabel(Scene):
def construct(self):
word = TextMobject("Candle")
arrow = Vector(DR, color=WHITE)
arrow.move_to(word.get_bottom() + SMALL_BUFF * DOWN, UL)
self.play(
FadeInFromDown(word),
GrowArrow(arrow)
)
self.wait()
class FiguresOfFluidDynamics(Scene):
def construct(self):
names = [
"Leonhard Euler",
"George Stokes",
"Hermann von Helmholtz",
"Lewis Richardson",
"Geoffrey Taylor",
"Andrey Kolmogorov",
]
images = Group(*[
ImageMobject(name.replace(" ", "_"), height=3)
for name in names
])
images.arrange(RIGHT, buff=MED_SMALL_BUFF)
image_groups = Group()
for image, name in zip(images, names):
name_mob = TextMobject(name)
name_mob.scale(0.6)
name_mob.next_to(image, DOWN)
image_groups.add(Group(image, name_mob))
image_groups.arrange_in_grid(2, 3)
image_groups.set_height(FRAME_HEIGHT - 1)
self.play(LaggedStartMap(
FadeInFromDown, image_groups,
lag_ratio=0.5,
run_time=3
))
self.wait()
to_fade = image_groups[:-1]
to_fade.generate_target()
to_fade.target.space_out_submobjects(3)
to_fade.target.shift(3 * UL)
to_fade.target.fade(1)
self.play(
MoveToTarget(to_fade, remover=True),
image_groups[-1].set_height, 5,
image_groups[-1].center,
)
self.wait()
class KineticEnergyBreakdown(Scene):
def construct(self):
title = TextMobject("Kinetic energy breakdown")
title.to_edge(UP)
h_line = Line(LEFT, RIGHT).set_width(FRAME_WIDTH)
h_line.next_to(title, DOWN)
v_line = Line(h_line.get_center(), FRAME_HEIGHT * DOWN / 2)
lc_title = TextMobject("Simpler physics")
lc_title.set_color(YELLOW)
rc_title = TextMobject("Turbulence physics")
rc_title.set_color(GREEN)
for word, vect in (lc_title, LEFT), (rc_title, RIGHT):
word.next_to(h_line, DOWN)
word.shift(FRAME_WIDTH * vect / 4)
left_items = VGroup(
TextMobject("- Big moving things"),
TextMobject("- Heat"),
)
left_items.arrange(DOWN, aligned_edge=LEFT)
left_items.next_to(lc_title, DOWN, MED_LARGE_BUFF)
left_items.to_edge(LEFT)
self.play(
Write(VGroup(*it.chain(
title, h_line, v_line, lc_title, rc_title
)))
)
self.wait()
for item in left_items:
self.play(FadeInFrom(item))
self.wait()
class MovingCar(Scene):
def construct(self):
car = Car()
x = 3
car.move_to(x * LEFT)
self.play(MoveCar(car, x * RIGHT, run_time=4))
class Heat(Scene):
def construct(self):
box = Square(
side_length=2,
stroke_color=WHITE,
)
balls = VGroup(*[
self.get_ball(box)
for x in range(20)
])
self.add(box, balls)
self.wait(20)
def get_ball(self, box):
speed_factor = random.random()
ball = Dot(
radius=0.05,
color=interpolate_color(BLUE, RED, speed_factor)
)
speed = 2 + 3 * speed_factor
direction = rotate_vector(RIGHT, TAU * random.random())
ball.velocity = speed * direction
x0, y0, z0 = box.get_corner(DL)
x1, y1, z1 = box.get_corner(UR)
ball.move_to(np.array([
interpolate(x0, x1, random.random()),
interpolate(y0, y1, random.random()),
0
]))
def update(ball, dt):
ball.shift(ball.velocity * dt)
if ball.get_left()[0] < box.get_left()[0]:
ball.velocity[0] = abs(ball.velocity[0])
if ball.get_right()[0] > box.get_right()[0]:
ball.velocity[0] = -abs(ball.velocity[0])
if ball.get_bottom()[1] < box.get_bottom()[1]:
ball.velocity[1] = abs(ball.velocity[1])
if ball.get_top()[1] > box.get_top()[1]:
ball.velocity[1] = -abs(ball.velocity[1])
return ball
ball.add_updater(update)
return ball
class GrowArrowScene(Scene):
def construct(self):
arrow = Arrow(UP, DOWN, color=WHITE)
self.play(GrowArrow(arrow))
self.wait()
class Poem(Scene):
def construct(self):
picture = ImageMobject("Lewis_Richardson")
picture.set_height(4)
picture.center().to_edge(LEFT, buff=LARGE_BUFF)
title = TextMobject("Poem by Lewis F. Richardson")
title.to_edge(UP)
poem_text = """
Big{\\,\\,}whirls have little{\\,\\,}whirls\\\\
which feed on their velocity,\\\\
And little{\\,\\,}whirls have lesser{\\,\\,}whirls\\\\
And so on to viscosity.\\\\
"""
poem_words = [s for s in poem_text.split(" ") if s]
poem = TextMobject(*poem_words, alignment="")
poem.next_to(picture, RIGHT, LARGE_BUFF)
self.add(picture)
self.play(FadeInFrom(title, DOWN))
self.wait()
for word in poem:
if "whirl" in word.get_tex_string():
word.set_color(BLUE)
self.play(ShowWord(word))
self.wait(0.005 * len(word)**1.5)
class SwirlDiameterD(Scene):
def construct(self):
kwargs = {
"path_arc": PI,
"buff": SMALL_BUFF,
"color": WHITE
}
swirl = VGroup(
Arrow(RIGHT, LEFT, **kwargs),
Arrow(LEFT, RIGHT, **kwargs),
)
swirl.set_stroke(width=5)
f = 1.5
swirl.scale(f)
h_line = DashedLine(
f * LEFT, f * RIGHT,
color=YELLOW,
)
D_label = TexMobject("D")
D_label.scale(2)
D_label.next_to(h_line, UP, SMALL_BUFF)
D_label.match_color(h_line)
# diam = VGroup(h_line, D_label)
self.play(*map(ShowCreation, swirl))
self.play(
GrowFromCenter(h_line),
FadeInFrom(D_label, UP),
)
self.wait()
class KolmogorovGraph(Scene):
def construct(self):
axes = Axes(
x_min=-1,
y_min=-1,
x_max=7,
y_max=9,
y_axis_config={
"unit_size": 0.7,
}
)
axes.center().shift(1.5 * RIGHT)
x_label = TexMobject("\\log(D)")
x_label.next_to(axes.x_axis.get_right(), UP)
y_label = TexMobject("\\log(\\text{K.E. at length scale D})")
y_label.scale(0.8)
y_label.next_to(axes.y_axis.get_top(), LEFT)
y_label.shift_onto_screen()
axes.add(x_label, y_label)
v_lines = VGroup(*[
DashedLine(
axes.coords_to_point(x, 0),
axes.coords_to_point(x, 9),
color=YELLOW,
stroke_width=1
)
for x in [0.5, 5]
])
inertial_subrange = TextMobject("``Inertial subrange''")
inertial_subrange.scale(0.7)
inertial_subrange.next_to(v_lines.get_bottom(), UP)
def func(x):
if 0.5 < x < 5:
return (5 / 3) * x
elif x < 0.5:
return 5 * (x - 0.5) + 0.5 * (5 / 3)
elif x > 5:
return np.log(x) + (5 / 3) * 5 - np.log(5)
graph = axes.get_graph(func, x_min=0.3, x_max=7)
prop_label = TexMobject("\\text{K.E.} \\propto D^{5/3}")
prop_label.next_to(
graph.point_from_proportion(0.5), UL,
buff=0
)
self.add(axes)
self.play(ShowCreation(graph))
self.play(FadeInFromDown(prop_label))
self.wait()
self.add(v_lines)
self.play(Write(inertial_subrange))
self.wait()
class TechnicalNote(Scene):
def construct(self):
title = TextMobject("Technical note:")
title.to_edge(UP)
title.set_color(RED)
self.add(title)
words = TextMobject("""
This idea of quantifying the energy held at different
length scales is typically defined
in terms of an ``energy spectrum'' involving the Fourier
transform of a function measuring the correlations
between the fluid's velocities at different points in space.
I know, yikes!
\\quad\\\\
\\quad\\\\
Building up the relevant background for that is a bit cumbersome,
so we'll be thinking about the energy at different scales in
terms of all eddy's with a given diameter. This is admittedly
a less well-defined notion, but it does capture the spirit
of Kolmogorov's result.
\\quad\\\\
\\quad\\\\
See the links in the description for more details,
if you're curious.
""", alignment="")
words.scale(0.75)
words.next_to(title, DOWN, LARGE_BUFF)
self.add(title, words)
class FiveThirds(TeacherStudentsScene):
def construct(self):
words = TextMobject(
"5/3", "is a sort of fundamental\\\\ constant of turbulence"
)
self.teacher_says(words)
self.change_student_modes("pondering", "maybe", "erm")
self.play(
FadeOut(self.teacher.bubble),
FadeOut(words[1]),
self.teacher.change, "raise_right_hand",
words[0].scale, 1.5,
words[0].move_to, self.hold_up_spot
)
self.change_student_modes("thinking", "pondering", "hooray")
self.wait(3)
class TurbulenceGifLabel(Scene):
def construct(self):
title = TextMobject("Turbulence in 2d")
title.to_edge(UP)
attribution = TextMobject(
"Animation by Gabe Weymouth (@gabrielweymouth)"
)
attribution.scale(0.5)
attribution.to_edge(DOWN)
self.play(Write(title))
self.play(FadeInFrom(attribution, UP))
self.wait()
class VortexStretchingLabel(Scene):
def construct(self):
title = TextMobject("Vortex stretching")
self.play(Write(title))
self.wait()
class VortedStretching(ThreeDScene):
CONFIG = {
"n_circles": 200,
}
def construct(self):
axes = ThreeDAxes()
axes.set_stroke(width=1)
self.add(axes)
self.move_camera(
phi=70 * DEGREES,
theta=-145 * DEGREES,
run_time=0,
)
self.begin_ambient_camera_rotation()
short_circles = self.get_cylinder_circles(2, 0.5, 0.5)
tall_circles = short_circles.copy().scale(0.125)
tall_circles.stretch(16 * 4, 2)
torus_circles = tall_circles.copy()
for circle in torus_circles:
circle.shift(RIGHT)
z = circle.get_center()[2]
circle.shift(z * IN)
angle = PI * z / 2
circle.rotate(angle, axis=DOWN, about_point=ORIGIN)
circles = short_circles.copy()
flow_lines = self.get_flow_lines(circles)
self.add(circles, flow_lines)
self.play(LaggedStartMap(ShowCreation, circles))
self.wait(5)
self.play(Transform(circles, tall_circles, run_time=3))
self.wait(10)
self.play(Transform(
circles, torus_circles,
run_time=3
))
self.wait(10)
def get_cylinder_circles(self, radius, radius_var, max_z):
return VGroup(*[
ParametricFunction(
lambda t: np.array([
np.cos(TAU * t) * r,
np.sin(TAU * t) * r,
z
]),
**self.get_circle_kwargs()
)
for z in sorted(max_z * np.random.random(self.n_circles))
for r in [radius + radius_var * random.random()]
]).center()
def get_torus_circles(self, out_r, in_r, in_r_var):
result = VGroup()
for u in sorted(np.random.random(self.n_circles)):
r = in_r + in_r_var * random.random()
circle = ParametricFunction(
lambda t: r * np.array([
np.cos(TAU * t),
np.sin(TAU * t),
0,
]),
**self.get_circle_kwargs()
)
circle.shift(out_r * RIGHT)
circle.rotate(
TAU * u - PI,
about_point=ORIGIN,
axis=DOWN,
)
result.add(circle)
return result
def get_flow_lines(self, circle_group):
window = 0.3
def update_circle(circle, dt):
circle.total_time += dt
diameter = get_norm(
circle.template.point_from_proportion(0) -
circle.template.point_from_proportion(0.5)
)
modulus = np.sqrt(diameter) + 0.1
alpha = (circle.total_time % modulus) / modulus
circle.pointwise_become_partial(
circle.template,
max(interpolate(-window, 1, alpha), 0),
min(interpolate(0, 1 + window, alpha), 1),
)
result = VGroup()
for template in circle_group:
circle = template.deepcopy()
circle.set_stroke(
color=interpolate_color(BLUE_A, BLUE_E, random.random()),
# width=3 * random.random()
width=1,
)
circle.template = template
circle.total_time = 4 * random.random()
circle.add_updater(update_circle)
result.add(circle)
return result
def get_circle_kwargs(self):
return {
"stroke_color": BLACK,
"stroke_width": 0,
}
class TurbulenceEndScreen(PatreonEndScreen):
CONFIG = {
"specific_patrons": [
"1stViewMaths",
"Adrian Robinson",
"Alexis Olson",
"Andrew Busey",
"Ankalagon",
"Art Ianuzzi",
"Awoo",
"Ayan Doss",
"Bernd Sing",
"Boris Veselinovich",
"Brian Staroselsky",
"Britt Selvitelle",
"Carla Kirby",
"Charles Southerland",
"Chris Connett",
"Christian Kaiser",
"Clark Gaebel",
"Cooper Jones",
"Danger Dai",
"Dave B",
"Dave Kester",
"David Clark",
"Delton Ding",
"Devarsh Desai",
"eaglle",
"Eric Younge",
"Eryq Ouithaqueue",
"Federico Lebron",
"Florian Chudigiewitsch",
"Giovanni Filippi",
"Hal Hildebrand",
"Igor Napolskikh",
"Jacob Magnuson",
"Jameel Syed",
"James Hughes",
"Jan Pijpers",
"Jason Hise",
"Jeff Linse",
"Jeff Straathof",
"Jerry Ling",
"John Griffith",
"John Haley",
"John V Wertheim",
"Jonathan Eppele",
"Jonathan Wilson",
"Jordan Scales",
"Joseph John Cox",
"Julian Pulgarin",
"Kai-Siang Ang",
"Kanan Gill",
"L0j1k",
"Linh Tran",
"Luc Ritchie",
"Ludwig Schubert",
"Lukas -krtek.net- Novy",
"Magister Mugit",
"Magnus Dahlström",
"Mark B Bahu",
"Markus Persson",
"Mathew Bramson",
"Mathias Jansson",
"Matt Langford",
"Matt Roveto",
"Matthew Cocke",
"Mehdi Razavi",
"Michael Faust",
"Michael Hardel",
"Mustafa Mahdi",
"Márton Vaitkus",
"Nero Li",
"Oliver Steele",
"Omar Zrien",
"Peter Ehrnstrom",
"Prasant Jagannath",
"Randy C. Will",
"Richard Burgmann",
"Ripta Pasay",
"Rish Kundalia",
"Robert Teed",
"Roobie",
"Ryan Atallah",
"Ryan Williams",
"Sindre Reino Trosterud",
"Solara570",
"Song Gao",
"Steven Soloway",
"Steven Tomlinson",
"Stevie Metke",
"Ted Suzman",
"Valeriy Skobelev",
"Xavier Bernard",
"Yaw Etse",
"YinYangBalance.Asia",
"Zach Cardwell",
],
}
class LaserWord(Scene):
def construct(self):
self.add(TextMobject("Laser").scale(2))
class TurbulenceWord(Scene):
def construct(self):
self.add(TextMobject("Turbulence").scale(2))
class ArrowScene(Scene):
def construct(self):
arrow = Arrow(LEFT, RIGHT, color=WHITE)
arrow.add_to_back(arrow.copy().set_stroke(BLACK, 5))
self.add(arrow)
|
// @flow
import React from 'react'
import styled from 'styled-components'
import { rgba } from 'styles/helpers'
import Box, { Tabbable } from 'components/base/Box'
type Item = {
label: React$Node,
key: string,
value?: any,
}
type Props = {
items: Array<Item>,
activeKey: string,
onChange: Item => void,
bordered?: boolean,
}
const Container = styled(Box).attrs(() => ({
horizontal: true,
}))``
const Pill = styled(Tabbable).attrs(p => ({
ff: p.bordered ? 'Open Sans|Bold' : p.isActive ? 'Open Sans|SemiBold' : 'Open Sans',
color: p.isActive ? 'wallet' : 'palette.text.shade80',
bg: p.isActive ? rgba(p.theme.colors.wallet, 0.1) : '',
px: p.bordered ? 2 : 3,
fontSize: 3,
borderRadius: 1,
alignItems: 'center',
justifyContent: 'center',
}))`
border: ${p => (p.bordered ? '1px solid' : 'none')};
border-color: ${p => (p.isActive ? p.theme.colors.wallet : p.theme.colors.palette.divider)};
height: 28px;
outline: none;
cursor: ${p => (p.isActive ? 'default' : 'pointer')};
width: ${p => (p.bordered ? '40px' : '')};
&:focus {
color: ${p => p.theme.colors.wallet};
background-color: ${p => (p.isActive ? '' : rgba(p.theme.colors.palette.text.shade100, 0.02))};
}
`
function Pills(props: Props) {
const { items, activeKey, onChange, bordered, ...p } = props
return (
<Container flow={1} {...p}>
{items.map(item => {
const isActive = item.key === activeKey
return (
<Pill
isActive={isActive}
onClick={() => onChange(item)}
key={item.key}
bordered={bordered}
data-e2e={`tabs_${item.key}`}
>
{item.label}
</Pill>
)
})}
</Container>
)
}
export default Pills
|
const Index = () =>
<div>
<h2>Hello nextjs !</h2>
</div>
export default Index;
|
'use strict';
var bitcore = require('../..');
var BN = require('../../lib/crypto/bn');
var BufferReader = bitcore.encoding.BufferReader;
var BufferWriter = bitcore.encoding.BufferWriter;
var BlockHeader = bitcore.BlockHeader;
var fs = require('fs');
var should = require('chai').should();
// https://insight.colossusxt.org/block/0000000cc55c08ed64afb41c7c2f382a64901eadfcc6663c4e70987fdc0e8401
var dataRawBlockBuffer = fs.readFileSync('test/data/blk19976-testnet.dat');
var dataRawBlockBinary = fs.readFileSync('test/data/blk19976-testnet.dat', 'binary');
var dataRawId = '0000000cc55c08ed64afb41c7c2f382a64901eadfcc6663c4e70987fdc0e8401';
var data = require('../data/blk19976-testnet');
describe('BlockHeader', function() {
var version = data.version;
var prevblockidbuf = new Buffer(data.prevblockidhex, 'hex');
var merklerootbuf = new Buffer(data.merkleroothex, 'hex');
var time = data.time;
var bits = data.bits;
var nonce = data.nonce;
var bh = new BlockHeader({
version: version,
prevHash: prevblockidbuf,
merkleRoot: merklerootbuf,
time: time,
bits: bits,
nonce: nonce
});
var bhhex = data.blockheaderhex;
var bhbuf = new Buffer(bhhex, 'hex');
it('should make a new blockheader', function() {
BlockHeader(bhbuf).toBuffer().toString('hex').should.equal(bhhex);
});
it('should not make an empty block', function() {
(function() {
BlockHeader();
}).should.throw('Unrecognized argument for BlockHeader');
});
describe('#constructor', function() {
it('should set all the variables', function() {
var bh = new BlockHeader({
version: version,
prevHash: prevblockidbuf,
merkleRoot: merklerootbuf,
time: time,
bits: bits,
nonce: nonce
});
should.exist(bh.version);
should.exist(bh.prevHash);
should.exist(bh.merkleRoot);
should.exist(bh.time);
should.exist(bh.bits);
should.exist(bh.nonce);
});
it('will throw an error if the argument object hash property doesn\'t match', function() {
(function() {
var bh = new BlockHeader({
hash: '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f',
version: version,
prevHash: prevblockidbuf,
merkleRoot: merklerootbuf,
time: time,
bits: bits,
nonce: nonce
});
}).should.throw('Argument object hash property does not match block hash.');
});
});
describe('version', function() {
it('is interpreted as an int32le', function() {
var hex = 'ffffffff00000000000000000000000000000000000000000000000000000000000000004141414141414141414141414141414141414141414141414141414141414141010000000200000003000000';
var header = BlockHeader.fromBuffer(new Buffer(hex, 'hex'));
header.version.should.equal(-1);
header.timestamp.should.equal(1);
});
});
describe('#fromObject', function() {
it('should set all the variables', function() {
var bh = BlockHeader.fromObject({
version: version,
prevHash: prevblockidbuf.toString('hex'),
merkleRoot: merklerootbuf.toString('hex'),
time: time,
bits: bits,
nonce: nonce
});
should.exist(bh.version);
should.exist(bh.prevHash);
should.exist(bh.merkleRoot);
should.exist(bh.time);
should.exist(bh.bits);
should.exist(bh.nonce);
});
});
describe('#toJSON', function() {
it('should set all the variables', function() {
var json = bh.toJSON();
should.exist(json.version);
should.exist(json.prevHash);
should.exist(json.merkleRoot);
should.exist(json.time);
should.exist(json.bits);
should.exist(json.nonce);
});
});
describe('#fromJSON', function() {
it('should parse this known json string', function() {
var jsonString = JSON.stringify({
version: version,
prevHash: prevblockidbuf,
merkleRoot: merklerootbuf,
time: time,
bits: bits,
nonce: nonce
});
var json = new BlockHeader(JSON.parse(jsonString));
should.exist(json.version);
should.exist(json.prevHash);
should.exist(json.merkleRoot);
should.exist(json.time);
should.exist(json.bits);
should.exist(json.nonce);
});
});
describe('#fromString/#toString', function() {
it('should output/input a block hex string', function() {
var b = BlockHeader.fromString(bhhex);
b.toString().should.equal(bhhex);
});
});
describe('#fromBuffer', function() {
it('should parse this known buffer', function() {
BlockHeader.fromBuffer(bhbuf).toBuffer().toString('hex').should.equal(bhhex);
});
});
describe('#fromBufferReader', function() {
it('should parse this known buffer', function() {
BlockHeader.fromBufferReader(BufferReader(bhbuf)).toBuffer().toString('hex').should.equal(bhhex);
});
});
describe('#toBuffer', function() {
it('should output this known buffer', function() {
BlockHeader.fromBuffer(bhbuf).toBuffer().toString('hex').should.equal(bhhex);
});
});
describe('#toBufferWriter', function() {
it('should output this known buffer', function() {
BlockHeader.fromBuffer(bhbuf).toBufferWriter().concat().toString('hex').should.equal(bhhex);
});
it('doesn\'t create a bufferWriter if one provided', function() {
var writer = new BufferWriter();
var blockHeader = BlockHeader.fromBuffer(bhbuf);
blockHeader.toBufferWriter(writer).should.equal(writer);
});
});
describe('#inspect', function() {
it('should return the correct inspect of the genesis block', function() {
var block = BlockHeader.fromRawBlock(dataRawBlockBinary);
block.inspect().should.equal('<BlockHeader '+dataRawId+'>');
});
});
describe('#fromRawBlock', function() {
it('should instantiate from a raw block binary', function() {
var x = BlockHeader.fromRawBlock(dataRawBlockBinary);
x.version.should.equal(4);
new BN(x.bits).toString('hex').should.equal('1d5298ed');
});
it('should instantiate from raw block buffer', function() {
var x = BlockHeader.fromRawBlock(dataRawBlockBuffer);
x.version.should.equal(4);
new BN(x.bits).toString('hex').should.equal('1d5298ed');
});
});
describe('#validTimestamp', function() {
var x = BlockHeader.fromRawBlock(dataRawBlockBuffer);
it('should validate timpstamp as true', function() {
var valid = x.validTimestamp(x);
valid.should.equal(true);
});
it('should validate timestamp as false', function() {
x.time = Math.round(new Date().getTime() / 1000) + BlockHeader.Constants.MAX_TIME_OFFSET + 100;
var valid = x.validTimestamp(x);
valid.should.equal(false);
});
});
describe('#validProofOfWork', function() {
it('should validate proof-of-work as true', function() {
var x = BlockHeader.fromRawBlock(dataRawBlockBuffer);
var valid = x.validProofOfWork(x);
valid.should.equal(true);
});
it('should validate proof of work as false because incorrect proof of work', function() {
var x = BlockHeader.fromRawBlock(dataRawBlockBuffer);
var nonce = x.nonce;
x.nonce = 0;
var valid = x.validProofOfWork(x);
valid.should.equal(false);
x.nonce = nonce;
});
});
describe('#getDifficulty', function() {
it('should get the correct difficulty for block 19976', function() {
var x = BlockHeader.fromRawBlock(dataRawBlockBuffer);
x.bits.should.equal(0x1d5298ed);
x.getDifficulty().should.equal(121067.3);
});
it('should get the correct difficulty for testnet block 52065', function() {
var x = new BlockHeader({
bits: 0x1e01594c
});
x.getDifficulty().should.equal(2896.01);
});
it('should get the correct difficulty for livenet block 273043', function() {
var x = new BlockHeader({
bits: 0x1b1c9b89
});
x.getDifficulty().should.equal(2290.82808262);
});
it('should get the correct difficulty for livenet block 330000', function() {
var x = new BlockHeader({
bits: 0x1b20dc66
});
x.getDifficulty().should.equal(1994.31352718);
});
it('should use exponent notation if difficulty is larger than Javascript number', function() {
var x = new BlockHeader({
bits: 0x0900c2a8
});
x.getDifficulty().should.equal(1.9220482782645836e+48);
});
});
it('coverage: caches the "_id" property', function() {
var blockHeader = BlockHeader.fromRawBlock(dataRawBlockBuffer);
blockHeader.id.should.equal(blockHeader.id);
});
});
|
import {createMuiTheme} from '@material-ui/core';
import * as _ from 'lodash';
import {legacyTypography} from './overrides/typography';
import {legacyButton, legacyIconButton} from './overrides/button';
import {legacyCheckbox} from './overrides/checkbox';
import {legacyCollapse} from './overrides/collapse';
import {legacyDialogActions, legacyDialogContent, legacyDialogContentText, legacyDialogTitle} from './overrides/dialog';
import {legacyFormControl, legacyFormControlLabel, legacyFormHelperText} from './overrides/form';
import {legacyInput} from './overrides/input';
import {legacyListItemText} from './overrides/list';
import {legacyMenuItem} from './overrides/menu';
import {legacyPaper} from './overrides/paper';
import {legacySelect} from './overrides/select';
import {legacySwitch} from './overrides/switch';
import {legacyTableCell, legacyTableRow} from './overrides/table';
import {commonBaseThemeConfig} from '../common-base/config';
const legacyThemeConfig = {
overrides: {
MuiButton: legacyButton,
MuiIconButton: legacyIconButton,
MuiCheckbox: legacyCheckbox,
MuiCollapse: legacyCollapse,
MuiDialogTitle: legacyDialogTitle,
MuiDialogContent: legacyDialogContent,
MuiDialogContentText: legacyDialogContentText,
MuiDialogActions: legacyDialogActions,
MuiFormControl: legacyFormControl,
MuiFormHelperText: legacyFormHelperText,
MuiFormControlLabel: legacyFormControlLabel,
MuiInput: legacyInput,
MuiListItemText: legacyListItemText,
MuiMenuItem: legacyMenuItem,
MuiPaper: legacyPaper,
MuiSelect: legacySelect,
MuiSwitch: legacySwitch,
MuiTableRow: legacyTableRow,
MuiTableCell: legacyTableCell,
MuiTypography: legacyTypography
}
};
const legacyTheme = createMuiTheme(_.merge({}, commonBaseThemeConfig, legacyThemeConfig));
export {legacyTheme};
|
import torch
import numpy as np
def svd_decomposition(tensor):
if len(tensor.shape) > 2:
raise RuntimeError("Only 2dim tensors can be decomposed with SVD")
if tensor.shape[0] < tensor.shape[1]:
raise RuntimeError(f"SVD requires the number of datapoints ({tensor.shape[0]}) to be larger than the number of neurons ({tensor.shape[1]}).")
vars_mean = tensor.mean(0)
tensor -= vars_mean
u, s, v = tensor.svd()
tensor += vars_mean
return u, s, v
def svd_reduction(tensor, var_fract_kept=.99):
'''
Performs an svd reduction of the given tensor keeping the singular values accounting for var_fract_kept% of the total variance (default .99)
'''
u, s, _ = svd_decomposition(tensor)
var = s*s
# cumulative proportion of variance explained for each singular value
var_cumulative_prop = var.cumsum(0) / var.sum()
# index in s corresponding to the last eigenvalue accounting for the var_fract_kept% of variance
max_index_keep = len(np.where(var_cumulative_prop <= var_fract_kept)[0]) + 1
# return the reduction of the layer
return u[:, :max_index_keep] @ torch.diag(s[:max_index_keep])
#return np.dot(u[:,:max_index_keep], np.diag(s[:max_index_keep]))
def reshape_4d_tensor(tensor, neurons_are_channels=False):
'''
Given a 4d torch.Tensor (datapoints, channels, height, width) reshapes it into a 2d tensor.
If neurons_are_channels, the tensor is reshaped s.t.
(datapoints, channels, height, width) -> (datapoints*height*width, channels)
Otherwise, the reshaping is performed such that the columns are the proper neurons of the representation
(datapoints, channels, height, width) -> (datapoints, channels*height*width)
'''
if len(tensor.shape) != 4:
raise AttributeError("The tensor needs to be 4D")
if neurons_are_channels:
dp_dim = tensor.size(0) * tensor.size(2) * tensor.size(3)
ch_dim = tensor.size(1)
return tensor.permute(0, 2, 3, 1).reshape(dp_dim, ch_dim)
else: #classic 4d -> 2d reshaping
return tensor.view(tensor.size(0), -1)
|
/**
* webapi.js 封装app所需的所有web请求
* 供app测试使用,app加入网站后webpai应该由网站通过config,提供给每个app
*/
import { fetch } from 'mk-utils'
export default {
product: {
query: (option) => fetch.post('/v1/product/query', option)
}
}
|
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ARGMAX_PARSER_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ARGMAX_PARSER_H
#include "tools/converter/parser/onnx/onnx_node_parser.h"
#include "tools/converter/parser/onnx/onnx_node_parser_registry.h"
namespace mindspore {
namespace lite {
class OnnxArgMaxParser : public OnnxNodeParser {
public:
OnnxArgMaxParser() : OnnxNodeParser("ArgMax") {}
~OnnxArgMaxParser() override = default;
lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override;
};
} // namespace lite
} // namespace mindspore
#endif // MMINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ARGMAX_PARSER_H
|
# -*- coding: utf-8 -*-
import hashlib
import json
import uuid
from datetime import datetime, timedelta
from io import BytesIO
import pytest
from mock import patch
from playhouse.test_utils import assert_query_count
from app import docker_v2_signing_key, storage
from data import model
from data.database import (
TagManifestLabelMap,
TagManifestToManifest,
Manifest,
ManifestBlob,
ManifestLegacyImage,
ManifestLabel,
TagManifest,
TagManifestLabel,
Tag,
TagToRepositoryTag,
ImageStorageLocation,
)
from data.cache.impl import InMemoryDataModelCache
from data.registry_model.registry_oci_model import OCIModel
from data.registry_model.datatypes import RepositoryReference
from data.registry_model.blobuploader import upload_blob, BlobUploadSettings
from data.model.oci.retriever import RepositoryContentRetriever
from data.model.blob import store_blob_record_and_temp_link
from image.shared.types import ManifestImageLayer
from image.docker.schema1 import (
DockerSchema1ManifestBuilder,
DOCKER_SCHEMA1_CONTENT_TYPES,
DockerSchema1Manifest,
)
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
from image.docker.schema2.list import DockerSchema2ManifestListBuilder
from image.oci.manifest import OCIManifestBuilder
from image.oci.index import OCIIndexBuilder
from util.bytes import Bytes
from test.fixtures import *
@pytest.fixture(
params=[OCIModel(),]
)
def registry_model(request, initialized_db):
return request.param
@pytest.fixture()
def oci_model(initialized_db):
return OCIModel()
@pytest.mark.parametrize(
"names, expected",
[
(["unknown"], None),
(["latest"], {"latest"}),
(["latest", "prod"], {"latest", "prod"}),
(["latest", "prod", "another"], {"latest", "prod"}),
(["foo", "prod"], {"prod"}),
],
)
def test_find_matching_tag(names, expected, registry_model):
repo = model.repository.get_repository("devtable", "simple")
repository_ref = RepositoryReference.for_repo_obj(repo)
found = registry_model.find_matching_tag(repository_ref, names)
if expected is None:
assert found is None
else:
assert found.name in expected
assert found.repository.name == "simple"
@pytest.mark.parametrize(
"repo_namespace, repo_name, expected",
[("devtable", "simple", {"latest", "prod"}), ("buynlarge", "orgrepo", {"latest", "prod"}),],
)
def test_get_most_recent_tag(repo_namespace, repo_name, expected, registry_model):
repo = model.repository.get_repository(repo_namespace, repo_name)
repository_ref = RepositoryReference.for_repo_obj(repo)
found = registry_model.get_most_recent_tag(repository_ref)
if expected is None:
assert found is None
else:
assert found.name in expected
@pytest.mark.parametrize(
"repo_namespace, repo_name, expected",
[
("devtable", "simple", True),
("buynlarge", "orgrepo", True),
("buynlarge", "unknownrepo", False),
],
)
def test_lookup_repository(repo_namespace, repo_name, expected, registry_model):
repo_ref = registry_model.lookup_repository(repo_namespace, repo_name)
if expected:
assert repo_ref
else:
assert repo_ref is None
@pytest.mark.parametrize(
"repo_namespace, repo_name", [("devtable", "simple"), ("buynlarge", "orgrepo"),]
)
def test_lookup_manifests(repo_namespace, repo_name, registry_model):
repo = model.repository.get_repository(repo_namespace, repo_name)
repository_ref = RepositoryReference.for_repo_obj(repo)
found_tag = registry_model.find_matching_tag(repository_ref, ["latest"])
found_manifest = registry_model.get_manifest_for_tag(found_tag)
found = registry_model.lookup_manifest_by_digest(repository_ref, found_manifest.digest)
assert found._db_id == found_manifest._db_id
assert found.digest == found_manifest.digest
schema1_parsed = registry_model.get_schema1_parsed_manifest(found, "foo", "bar", "baz", storage)
assert schema1_parsed is not None
def test_lookup_unknown_manifest(registry_model):
repo = model.repository.get_repository("devtable", "simple")
repository_ref = RepositoryReference.for_repo_obj(repo)
found = registry_model.lookup_manifest_by_digest(repository_ref, "sha256:deadbeef")
assert found is None
def test_manifest_labels(registry_model):
repo = model.repository.get_repository("devtable", "simple")
repository_ref = RepositoryReference.for_repo_obj(repo)
found_tag = registry_model.find_matching_tag(repository_ref, ["latest"])
found_manifest = registry_model.get_manifest_for_tag(found_tag)
# Create a new label.
created = registry_model.create_manifest_label(found_manifest, "foo", "bar", "api")
assert created.key == "foo"
assert created.value == "bar"
assert created.source_type_name == "api"
assert created.media_type_name == "text/plain"
# Ensure we can look it up.
assert registry_model.get_manifest_label(found_manifest, created.uuid) == created
# Ensure it is in our list of labels.
assert created in registry_model.list_manifest_labels(found_manifest)
assert created in registry_model.list_manifest_labels(found_manifest, key_prefix="fo")
# Ensure it is *not* in our filtered list.
assert created not in registry_model.list_manifest_labels(found_manifest, key_prefix="ba")
# Delete the label and ensure it is gone.
assert registry_model.delete_manifest_label(found_manifest, created.uuid)
assert registry_model.get_manifest_label(found_manifest, created.uuid) is None
assert created not in registry_model.list_manifest_labels(found_manifest)
def test_manifest_label_handlers(registry_model):
repo = model.repository.get_repository("devtable", "simple")
repository_ref = RepositoryReference.for_repo_obj(repo)
found_tag = registry_model.get_repo_tag(repository_ref, "latest")
found_manifest = registry_model.get_manifest_for_tag(found_tag)
# Ensure the tag has no expiration.
assert found_tag.lifetime_end_ts is None
# Create a new label with an expires-after.
registry_model.create_manifest_label(found_manifest, "quay.expires-after", "2h", "api")
# Ensure the tag now has an expiration.
updated_tag = registry_model.get_repo_tag(repository_ref, "latest")
assert updated_tag.lifetime_end_ts == (updated_tag.lifetime_start_ts + (60 * 60 * 2))
def test_batch_labels(registry_model):
repo = model.repository.get_repository("devtable", "history")
repository_ref = RepositoryReference.for_repo_obj(repo)
found_tag = registry_model.find_matching_tag(repository_ref, ["latest"])
found_manifest = registry_model.get_manifest_for_tag(found_tag)
with registry_model.batch_create_manifest_labels(found_manifest) as add_label:
add_label("foo", "1", "api")
add_label("bar", "2", "api")
add_label("baz", "3", "api")
# Ensure we can look them up.
assert len(registry_model.list_manifest_labels(found_manifest)) == 3
@pytest.mark.parametrize(
"repo_namespace, repo_name",
[
("devtable", "simple"),
("devtable", "complex"),
("devtable", "history"),
("buynlarge", "orgrepo"),
],
)
def test_repository_tags(repo_namespace, repo_name, registry_model):
repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
tags = registry_model.list_all_active_repository_tags(repository_ref)
assert len(tags)
tags_map = registry_model.get_legacy_tags_map(repository_ref, storage)
for tag in tags:
found_tag = registry_model.get_repo_tag(repository_ref, tag.name)
assert found_tag == tag
retriever = RepositoryContentRetriever(repository_ref.id, storage)
legacy_image = tag.manifest.lookup_legacy_image(0, retriever)
found_image = registry_model.get_legacy_image(
repository_ref, found_tag.manifest.legacy_image_root_id, storage
)
if found_image is not None:
assert found_image.docker_image_id == legacy_image.docker_image_id
assert tags_map[tag.name] == found_image.docker_image_id
@pytest.mark.parametrize(
"namespace, name, expected_tag_count, has_expired",
[
("devtable", "simple", 2, False),
("devtable", "history", 2, True),
("devtable", "gargantuan", 8, False),
("public", "publicrepo", 1, False),
],
)
@pytest.mark.parametrize("with_size_fallback", [False, True,])
def test_repository_tag_history(
namespace, name, expected_tag_count, has_expired, registry_model, with_size_fallback
):
# Pre-cache media type loads to ensure consistent query count.
Manifest.media_type.get_name(1)
# If size fallback is requested, delete the sizes on the manifest rows.
if with_size_fallback:
Manifest.update(layers_compressed_size=None).execute()
repository_ref = registry_model.lookup_repository(namespace, name)
with assert_query_count(2 if with_size_fallback else 1):
history, has_more = registry_model.list_repository_tag_history(repository_ref)
assert not has_more
assert len(history) == expected_tag_count
for tag in history:
# Retrieve the manifest to ensure it doesn't issue extra queries.
tag.manifest
# Verify that looking up the size doesn't issue extra queries.
tag.manifest_layers_size
if has_expired:
# Ensure the latest tag is marked expired, since there is an expired one.
with assert_query_count(1):
assert registry_model.has_expired_tag(repository_ref, "latest")
def test_repository_tag_history_future_expires(registry_model):
# Set the expiration of a tag to the future.
repository_ref = registry_model.lookup_repository("devtable", "simple")
tag = registry_model.get_repo_tag(repository_ref, "latest")
registry_model.change_repository_tag_expiration(tag, datetime.utcnow() + timedelta(days=7))
# List the tag history and ensure the tag is returned with the correct expiration.
history, has_more = registry_model.list_repository_tag_history(repository_ref)
assert not has_more
assert history
for tag in history:
if tag.name == "latest":
assert tag.lifetime_end_ms is not None
@pytest.mark.parametrize(
"repositories, expected_tag_count",
[([], 0), ([("devtable", "simple"), ("devtable", "building")], 1),],
)
def test_get_most_recent_tag_lifetime_start(repositories, expected_tag_count, registry_model):
last_modified_map = registry_model.get_most_recent_tag_lifetime_start(
[registry_model.lookup_repository(name, namespace) for name, namespace in repositories]
)
assert len(last_modified_map) == expected_tag_count
for repo_id, last_modified in list(last_modified_map.items()):
tag = registry_model.get_most_recent_tag(RepositoryReference.for_id(repo_id))
assert last_modified == tag.lifetime_start_ms // 1000
@pytest.mark.parametrize(
"repo_namespace, repo_name",
[
("devtable", "simple"),
("devtable", "complex"),
("devtable", "history"),
("buynlarge", "orgrepo"),
],
)
@pytest.mark.parametrize("via_manifest", [False, True,])
def test_delete_tags(repo_namespace, repo_name, via_manifest, registry_model):
repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
tags = registry_model.list_all_active_repository_tags(repository_ref)
assert len(tags)
# Save history before the deletions.
previous_history, _ = registry_model.list_repository_tag_history(repository_ref, size=1000)
assert len(previous_history) >= len(tags)
# Delete every tag in the repository.
for tag in tags:
if via_manifest:
assert registry_model.delete_tag(repository_ref, tag.name)
else:
manifest = registry_model.get_manifest_for_tag(tag)
if manifest is not None:
registry_model.delete_tags_for_manifest(manifest)
# Make sure the tag is no longer found.
with assert_query_count(1):
found_tag = registry_model.get_repo_tag(repository_ref, tag.name)
assert found_tag is None
# Ensure all tags have been deleted.
tags = registry_model.list_all_active_repository_tags(repository_ref)
assert not len(tags)
# Ensure that the tags all live in history.
history, _ = registry_model.list_repository_tag_history(repository_ref, size=1000)
assert len(history) == len(previous_history)
@pytest.mark.parametrize("use_manifest", [True, False,])
def test_retarget_tag_history(use_manifest, registry_model):
repository_ref = registry_model.lookup_repository("devtable", "history")
history, _ = registry_model.list_repository_tag_history(repository_ref)
if use_manifest:
manifest_or_legacy_image = registry_model.lookup_manifest_by_digest(
repository_ref, history[0].manifest_digest, allow_dead=True
)
else:
manifest_or_legacy_image = registry_model.get_legacy_image(
repository_ref, history[0].manifest.legacy_image_root_id, storage
)
# Retarget the tag.
assert manifest_or_legacy_image
updated_tag = registry_model.retarget_tag(
repository_ref,
"latest",
manifest_or_legacy_image,
storage,
docker_v2_signing_key,
is_reversion=True,
)
# Ensure the tag has changed targets.
if use_manifest:
assert updated_tag.manifest_digest == manifest_or_legacy_image.digest
else:
assert updated_tag.manifest.legacy_image_root_id == manifest_or_legacy_image.docker_image_id
# Ensure history has been updated.
new_history, _ = registry_model.list_repository_tag_history(repository_ref)
assert len(new_history) == len(history) + 1
def test_change_repository_tag_expiration(registry_model):
repository_ref = registry_model.lookup_repository("devtable", "simple")
tag = registry_model.get_repo_tag(repository_ref, "latest")
assert tag.lifetime_end_ts is None
new_datetime = datetime.utcnow() + timedelta(days=2)
previous, okay = registry_model.change_repository_tag_expiration(tag, new_datetime)
assert okay
assert previous is None
tag = registry_model.get_repo_tag(repository_ref, "latest")
assert tag.lifetime_end_ts is not None
def test_get_security_status(registry_model):
repository_ref = registry_model.lookup_repository("devtable", "simple")
tags = registry_model.list_all_active_repository_tags(repository_ref)
assert len(tags)
for tag in tags:
legacy_image = registry_model.get_legacy_image(
repository_ref, tag.manifest.legacy_image_root_id, storage
)
assert legacy_image
assert registry_model.get_security_status(legacy_image)
registry_model.reset_security_status(legacy_image)
assert registry_model.get_security_status(legacy_image)
@pytest.fixture()
def clear_rows(initialized_db):
# Remove all new-style rows so we can backfill.
TagToRepositoryTag.delete().execute()
Tag.delete().execute()
TagManifestLabelMap.delete().execute()
ManifestLabel.delete().execute()
ManifestBlob.delete().execute()
ManifestLegacyImage.delete().execute()
TagManifestToManifest.delete().execute()
Manifest.delete().execute()
TagManifestLabel.delete().execute()
TagManifest.delete().execute()
@pytest.mark.parametrize(
"namespace, expect_enabled", [("devtable", True), ("buynlarge", True), ("disabled", False),]
)
def test_is_namespace_enabled(namespace, expect_enabled, registry_model):
assert registry_model.is_namespace_enabled(namespace) == expect_enabled
@pytest.mark.parametrize(
"repo_namespace, repo_name",
[
("devtable", "simple"),
("devtable", "complex"),
("devtable", "history"),
("buynlarge", "orgrepo"),
],
)
def test_layers_and_blobs(repo_namespace, repo_name, registry_model):
repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
tags = registry_model.list_all_active_repository_tags(repository_ref)
assert tags
for tag in tags:
manifest = registry_model.get_manifest_for_tag(tag)
assert manifest
parsed = manifest.get_parsed_manifest()
assert parsed
layers = registry_model.list_parsed_manifest_layers(repository_ref, parsed, storage)
assert layers
layers = registry_model.list_parsed_manifest_layers(
repository_ref, parsed, storage, include_placements=True
)
assert layers
for index, manifest_layer in enumerate(layers):
assert manifest_layer.blob.storage_path
assert manifest_layer.blob.placements
repo_blob = registry_model.get_repo_blob_by_digest(
repository_ref, manifest_layer.blob.digest
)
assert repo_blob.digest == manifest_layer.blob.digest
assert manifest_layer.estimated_size(1) is not None
assert isinstance(manifest_layer.layer_info, ManifestImageLayer)
blobs = registry_model.get_manifest_local_blobs(manifest, storage, include_placements=True)
assert {b.digest for b in blobs} == set(parsed.local_blob_digests)
def test_manifest_remote_layers(oci_model):
# Create a config blob for testing.
config_json = json.dumps(
{
"config": {},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [
{"created": "2018-04-03T18:37:09.284840891Z", "created_by": "do something",},
],
}
)
app_config = {"TESTING": True}
repository_ref = oci_model.lookup_repository("devtable", "simple")
with upload_blob(repository_ref, storage, BlobUploadSettings(500, 500)) as upload:
upload.upload_chunk(app_config, BytesIO(config_json.encode("utf-8")))
blob = upload.commit_to_blob(app_config)
# Create the manifest in the repo.
builder = DockerSchema2ManifestBuilder()
builder.set_config_digest(blob.digest, blob.compressed_size)
builder.add_layer("sha256:abcd", 1234, urls=["http://hello/world"])
manifest = builder.build()
created_manifest, _ = oci_model.create_manifest_and_retarget_tag(
repository_ref, manifest, "sometag", storage
)
assert created_manifest
layers = oci_model.list_parsed_manifest_layers(
repository_ref, created_manifest.get_parsed_manifest(), storage
)
assert len(layers) == 1
assert layers[0].layer_info.is_remote
assert layers[0].layer_info.urls == ["http://hello/world"]
assert layers[0].blob is None
def test_blob_uploads(registry_model):
repository_ref = registry_model.lookup_repository("devtable", "simple")
blob_upload = registry_model.create_blob_upload(
repository_ref, str(uuid.uuid4()), "local_us", {"some": "metadata"}
)
assert blob_upload
assert blob_upload.storage_metadata == {"some": "metadata"}
assert blob_upload.location_name == "local_us"
# Ensure we can find the blob upload.
assert registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id) == blob_upload
# Update and ensure the changes are saved.
assert registry_model.update_blob_upload(
blob_upload, 1, {"new": "metadata"}, 2, 3, blob_upload.sha_state,
)
updated = registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
assert updated
assert updated.uncompressed_byte_count == 1
assert updated.storage_metadata == {"new": "metadata"}
assert updated.byte_count == 2
assert updated.chunk_count == 3
# Delete the upload.
registry_model.delete_blob_upload(blob_upload)
# Ensure it can no longer be found.
assert not registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
def test_commit_blob_upload(registry_model):
repository_ref = registry_model.lookup_repository("devtable", "simple")
blob_upload = registry_model.create_blob_upload(
repository_ref, str(uuid.uuid4()), "local_us", {"some": "metadata"}
)
# Commit the blob upload and make sure it is written as a blob.
digest = "sha256:" + hashlib.sha256(b"hello").hexdigest()
blob = registry_model.commit_blob_upload(blob_upload, digest, 60)
assert blob.digest == digest
# Ensure the upload can no longer be found.
assert not registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
def test_mount_blob_into_repository(registry_model):
repository_ref = registry_model.lookup_repository("devtable", "simple")
latest_tag = registry_model.get_repo_tag(repository_ref, "latest")
manifest = registry_model.get_manifest_for_tag(latest_tag)
target_repository_ref = registry_model.lookup_repository("devtable", "complex")
blobs = registry_model.get_manifest_local_blobs(manifest, storage, include_placements=True)
assert blobs
for blob in blobs:
# Ensure the blob doesn't exist under the repository.
assert not registry_model.get_repo_blob_by_digest(target_repository_ref, blob.digest)
# Mount the blob into the repository.
assert registry_model.mount_blob_into_repository(blob, target_repository_ref, 60)
# Ensure it now exists.
found = registry_model.get_repo_blob_by_digest(target_repository_ref, blob.digest)
assert found == blob
class SomeException(Exception):
pass
def test_get_cached_repo_blob(registry_model):
model_cache = InMemoryDataModelCache()
repository_ref = registry_model.lookup_repository("devtable", "simple")
latest_tag = registry_model.get_repo_tag(repository_ref, "latest")
manifest = registry_model.get_manifest_for_tag(latest_tag)
blobs = registry_model.get_manifest_local_blobs(manifest, storage, include_placements=True)
assert blobs
blob = blobs[0]
# Load a blob to add it to the cache.
found = registry_model.get_cached_repo_blob(model_cache, "devtable", "simple", blob.digest)
assert found.digest == blob.digest
assert found.uuid == blob.uuid
assert found.compressed_size == blob.compressed_size
assert found.uncompressed_size == blob.uncompressed_size
assert found.uploading == blob.uploading
assert found.placements == blob.placements
# Disconnect from the database by overwriting the connection.
def fail(x, y):
raise SomeException("Not connected!")
with patch(
"data.registry_model.registry_oci_model.model.oci.blob.get_repository_blob_by_digest", fail,
):
# Make sure we can load again, which should hit the cache.
cached = registry_model.get_cached_repo_blob(model_cache, "devtable", "simple", blob.digest)
assert cached.digest == blob.digest
assert cached.uuid == blob.uuid
assert cached.compressed_size == blob.compressed_size
assert cached.uncompressed_size == blob.uncompressed_size
assert cached.uploading == blob.uploading
assert cached.placements == blob.placements
# Try another blob, which should fail since the DB is not connected and the cache
# does not contain the blob.
with pytest.raises(SomeException):
registry_model.get_cached_repo_blob(
model_cache, "devtable", "simple", "some other digest"
)
def test_create_manifest_and_retarget_tag(registry_model):
repository_ref = registry_model.lookup_repository("devtable", "simple")
latest_tag = registry_model.get_repo_tag(repository_ref, "latest")
manifest = registry_model.get_manifest_for_tag(latest_tag).get_parsed_manifest()
builder = DockerSchema1ManifestBuilder("devtable", "simple", "anothertag")
builder.add_layer(manifest.blob_digests[0], '{"id": "%s"}' % "someid")
sample_manifest = builder.build(docker_v2_signing_key)
assert sample_manifest is not None
another_manifest, tag = registry_model.create_manifest_and_retarget_tag(
repository_ref, sample_manifest, "anothertag", storage
)
assert another_manifest is not None
assert tag is not None
assert tag.name == "anothertag"
assert another_manifest.get_parsed_manifest().manifest_dict == sample_manifest.manifest_dict
def test_get_schema1_parsed_manifest(registry_model):
repository_ref = registry_model.lookup_repository("devtable", "simple")
latest_tag = registry_model.get_repo_tag(repository_ref, "latest")
manifest = registry_model.get_manifest_for_tag(latest_tag)
assert registry_model.get_schema1_parsed_manifest(manifest, "", "", "", storage)
def test_convert_manifest(registry_model):
repository_ref = registry_model.lookup_repository("devtable", "simple")
latest_tag = registry_model.get_repo_tag(repository_ref, "latest")
manifest = registry_model.get_manifest_for_tag(latest_tag)
mediatypes = DOCKER_SCHEMA1_CONTENT_TYPES
assert registry_model.convert_manifest(manifest, "", "", "", mediatypes, storage)
mediatypes = []
assert registry_model.convert_manifest(manifest, "", "", "", mediatypes, storage) is None
def test_create_manifest_and_retarget_tag_with_labels(registry_model):
repository_ref = registry_model.lookup_repository("devtable", "simple")
latest_tag = registry_model.get_repo_tag(repository_ref, "latest")
manifest = registry_model.get_manifest_for_tag(latest_tag).get_parsed_manifest()
json_metadata = {
"id": "someid",
"config": {"Labels": {"quay.expires-after": "2w",},},
}
builder = DockerSchema1ManifestBuilder("devtable", "simple", "anothertag")
builder.add_layer(manifest.blob_digests[0], json.dumps(json_metadata))
sample_manifest = builder.build(docker_v2_signing_key)
assert sample_manifest is not None
another_manifest, tag = registry_model.create_manifest_and_retarget_tag(
repository_ref, sample_manifest, "anothertag", storage
)
assert another_manifest is not None
assert tag is not None
assert tag.name == "anothertag"
assert another_manifest.get_parsed_manifest().manifest_dict == sample_manifest.manifest_dict
# Ensure the labels were applied.
assert tag.lifetime_end_ms is not None
def _populate_blob(digest):
location = ImageStorageLocation.get(name="local_us")
store_blob_record_and_temp_link("devtable", "simple", digest, location, 1, 120)
def test_known_issue_schema1(registry_model):
test_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(test_dir, "../../../image/docker/test/validate_manifest_known_issue.json")
with open(path, "r") as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes))
for blob_digest in manifest.local_blob_digests:
_populate_blob(blob_digest)
digest = manifest.digest
assert digest == "sha256:44518f5a4d1cb5b7a6347763116fb6e10f6a8563b6c40bb389a0a982f0a9f47a"
# Create the manifest in the database.
repository_ref = registry_model.lookup_repository("devtable", "simple")
created_manifest, _ = registry_model.create_manifest_and_retarget_tag(
repository_ref, manifest, "latest", storage
)
assert created_manifest
assert created_manifest.digest == manifest.digest
assert (
created_manifest.internal_manifest_bytes.as_encoded_str() == manifest.bytes.as_encoded_str()
)
# Look it up again and validate.
found = registry_model.lookup_manifest_by_digest(
repository_ref, manifest.digest, allow_dead=True
)
assert found
assert found.digest == digest
assert found.internal_manifest_bytes.as_encoded_str() == manifest.bytes.as_encoded_str()
assert found.get_parsed_manifest().digest == digest
def test_unicode_emoji(registry_model):
builder = DockerSchema1ManifestBuilder("devtable", "simple", "latest")
builder.add_layer(
"sha256:abcde", json.dumps({"id": "someid", "author": "😱",}, ensure_ascii=False)
)
manifest = builder.build(ensure_ascii=False)
manifest._validate()
for blob_digest in manifest.local_blob_digests:
_populate_blob(blob_digest)
# Create the manifest in the database.
repository_ref = registry_model.lookup_repository("devtable", "simple")
created_manifest, _ = registry_model.create_manifest_and_retarget_tag(
repository_ref, manifest, "latest", storage
)
assert created_manifest
assert created_manifest.digest == manifest.digest
assert (
created_manifest.internal_manifest_bytes.as_encoded_str() == manifest.bytes.as_encoded_str()
)
# Look it up again and validate.
found = registry_model.lookup_manifest_by_digest(
repository_ref, manifest.digest, allow_dead=True
)
assert found
assert found.digest == manifest.digest
assert found.internal_manifest_bytes.as_encoded_str() == manifest.bytes.as_encoded_str()
assert found.get_parsed_manifest().digest == manifest.digest
@pytest.mark.parametrize("test_cached", [False, True,])
def test_lookup_active_repository_tags(test_cached, oci_model):
repository_ref = oci_model.lookup_repository("devtable", "simple")
latest_tag = oci_model.get_repo_tag(repository_ref, "latest")
manifest = oci_model.get_manifest_for_tag(latest_tag)
tag_count = 500
# Create a bunch of tags.
tags_expected = set()
for index in range(0, tag_count):
tags_expected.add("somenewtag%s" % index)
oci_model.retarget_tag(
repository_ref, "somenewtag%s" % index, manifest, storage, docker_v2_signing_key
)
assert tags_expected
# List the tags.
tags_found = set()
tag_id = None
while True:
if test_cached:
model_cache = InMemoryDataModelCache()
tags = oci_model.lookup_cached_active_repository_tags(
model_cache, repository_ref, tag_id, 11
)
else:
tags = oci_model.lookup_active_repository_tags(repository_ref, tag_id, 11)
assert len(tags) <= 11
for tag in tags[0:10]:
assert tag.name not in tags_found
if tag.name in tags_expected:
tags_found.add(tag.name)
tags_expected.remove(tag.name)
if len(tags) < 11:
break
tag_id = tags[10].id
# Make sure we've found all the tags.
assert tags_found
assert not tags_expected
def test_create_manifest_with_temp_tag(initialized_db, registry_model):
builder = DockerSchema1ManifestBuilder("devtable", "simple", "latest")
builder.add_layer(
"sha256:abcde", json.dumps({"id": "someid", "author": "some user",}, ensure_ascii=False)
)
manifest = builder.build(ensure_ascii=False)
for blob_digest in manifest.local_blob_digests:
_populate_blob(blob_digest)
# Create the manifest in the database.
repository_ref = registry_model.lookup_repository("devtable", "simple")
created = registry_model.create_manifest_with_temp_tag(repository_ref, manifest, 300, storage)
assert created.digest == manifest.digest
# Ensure it cannot be found normally, since it is simply temp-tagged.
assert registry_model.lookup_manifest_by_digest(repository_ref, manifest.digest) is None
# Ensure it can be found, which means it is temp-tagged.
found = registry_model.lookup_manifest_by_digest(
repository_ref, manifest.digest, allow_dead=True
)
assert found is not None
|
import sys
import json
import docker
from magiclog import log
handle_types = ['container']
stop_status = ['stop']
container_status = ['start']
log_events = ['config',
'container',
'network',
'secret',
'service',
'volume']
class KzPilot:
def __init__(self,
url='unix:///var/run/docker.sock'):
self.url = url
self.data = dict()
def start(self):
self.client = self.clientConnect()
for event in self.client.events(decode=True):
event_type = get_event_type(event)
if event_type in log_events:
self.logEvent(event, event_type)
def clientConnect(self):
try:
return docker.DockerClient(base_url=self.url)
except
def logEvent(self, event, event_type):
if 'Actor' in event:
actor = event['Actor']
else:
actor = ''
log.debug('{} {}: {}'.format(event['Action'], event_type, actor))
def shutdown(self):
if self.client is not None:
self.client.close()
def get_event_type(event):
event['Type']
def get_event_time(event):
event['time']
|
/**
* @file 小说列表无限下拉滚动异步接口模拟数据
* @author xuexb <fe.xiaowu@gmail.com>
*/
const data = {
"status": 0,
"data": {
"items": [
{
"img": "img/titianxingdao.png",
"author": "石章鱼",
"desc": "风本无形,我欲猎风!九州笑傲,替天行盗! 他风华正茂,她国色天香,他本该书生意气,挥斥方遒,她本该巧笑倩兮,葬花弄月。然生于乱世,国恨家仇,山河破碎,列强割据,先祖蒙羞。于是他丢掉了诗书,她拿起了刀枪,护龙脉,探九幽,夺天棺,战妖星。十步杀一人,千里不留行。事了拂衣去,深藏功与名!"
},
{
"img": "img/wuhengtiandi.png",
"author": "无巫",
"desc": "人天生有九道脉门,九门齐开,能伸手翻云,反手复雨,他天生拥有最强修炼体质,但却天生‘死门’打开,作为必死之人,他能怎样与天争斗,怎么步入修炼之路?"
},
{
"img": "img/xitongjunguncu.png",
"author": "淡娘",
"desc": "一转眼已经在硝烟弥漫的抗战前夕;租界上海滩歌舞升平;从老北京来的旦角轰动江南;谷白练就恰好穿成了那个旦角。。。的疯狂追爱着。最最疯狂的一位。说好的快穿从校园小清新走起呢?谷白练对系统无声抗议。系统弱弱回了句,我不喜欢你的名字。白练,白莲,合该你倒霉。这只是故事的开端而已。"
},
{
"img": "img/xiuluotiandi.png",
"author": "实验小白鼠",
"desc": "八年前,雷霆古城一夜惊变,少城主秦命押入青云宗为仆,二十万民众赶进大青山为奴。八年后,淬灵入武,修罗觉醒,不屈少年逆天崛起。给我一柄刀,可破苍穹,给我一柄剑,可指霄汉。金麟岂是池中物,一遇风云便化龙。当修罗子、不死王、雷霆战尊、古海蛮皇等等一个个封号落在…"
}
]
}
};
module.exports = (req, res, next) => {
setTimeout(() => res.jsonp(data), 3000);
};
|
from tqdm import tqdm
import numpy as np
from dataclasses import dataclass
from typing import Dict, List, Tuple, Union
import ipdb
import collections
import random
import torch
from copy import deepcopy
from torch.nn.utils.rnn import pad_sequence
from transformers.tokenization_utils_base import BatchEncoding
def _sample_by_model_predict_prob(prob_tensor, labels):
dynamic_mask_predict_prob = prob_tensor.clone()
batch_mask_predict_prob = dynamic_mask_predict_prob.expand(labels.shape[0],
dynamic_mask_predict_prob.shape[0]).to(labels.device)
probability_matrix = batch_mask_predict_prob.gather(1, labels)
avg_model_predict_prob = float(torch.mean(probability_matrix))
probability_matrix = 1 - probability_matrix
return probability_matrix, avg_model_predict_prob
def _sample_by_tfidf(idf_tensor, labels):
token_tf_idfs = []
for label_i, label in enumerate(labels):
token_count = torch.bincount(label) # -> shape: (max_index_of_label(e.g. 49000), )
token_freq_pad = torch.zeros_like(idf_tensor)
token_freq_pad[:len(token_count)] = token_count
token_tf_idfs.append(token_freq_pad)
token_tfs = torch.stack(token_tf_idfs)
token_inver_tfs = 1 / token_tfs
token_tf_idfs = token_inver_tfs * idf_tensor # compute the tf-idf value
probability_matrix = token_tf_idfs.gather(1, labels)
return probability_matrix
def _tf_idf_decay_func(tf_idf_init_prob, step_i, decay=0.998):
if step_i == 0:
return tf_idf_init_prob
else:
return tf_idf_init_prob * decay ** step_i
@dataclass
class BpeDataCollatorForLanguageModeling:
"""
Data collator used for language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
"""
# use_time_embed = False
def __init__(self,
tokenizer,
use_time_embed,
mlm_probability,
pretrain_task,
clm_sample_n,
use_random_mlm_probability=None,
mlm_prob_min=None,
mlm_prob_max=None,
mask_type='normal',
mask_softmax_t=0.5,
masker_recorder=None,
tf_idf_warmup_decay=None,
is_record_mask_ratio=False,
return_timestamps=True,
softmax_t_decay_mode=None,
part_prob_percent=None,
part_prob_range=None
):
self.tokenizer = tokenizer
self.use_time_embed = use_time_embed
self.mlm_probability = mlm_probability
self.pretrain_task = pretrain_task
self.clm_sample_n = clm_sample_n
self.CLM_MIN_LEN = 32
self.use_random_mlm_probability = use_random_mlm_probability
self.mlm_prob_min = mlm_prob_min
self.mlm_prob_max = mlm_prob_max
self.mask_type = mask_type
self.masker_recorder = masker_recorder
self._mask_softmax_t = mask_softmax_t
self.tf_idf_warmup_decay = tf_idf_warmup_decay
self.is_record_mask_ratio = is_record_mask_ratio
self.return_timestamps = return_timestamps
self.softmax_t_decay_mode = softmax_t_decay_mode
self.softmax_t_range = (0.0001, 0.8)
self.total_training_step = 0
self.part_prob_percent = part_prob_percent
self.part_prob_range = part_prob_range
if self.mask_type == 'part_prob_linear_increase':
pmin, pmax = self.part_prob_range
assert pmin < pmax
self.part_prob_percent = pmin
if self.use_random_mlm_probability is not None:
assert self.mlm_prob_min
assert self.mlm_prob_max
@property
def mask_softmax_t(self):
return max(self._mask_softmax_t, 1e-10)
def _linear_decay_t(self, t_min, t_max, total_step, step):
return (t_min - t_max) / total_step * step + t_max
def adjust_part_prob_percent(self, step_now):
if self.mask_type == 'part_prob_linear_increase':
p_min, p_max = self.part_prob_range
self.part_prob_percent = p_max - self._linear_decay_t(p_min, p_max, self.total_training_step, step_now)
def adjust_mask_softmax_t(self, step_now):
t_min, t_max = self.softmax_t_range
if self.softmax_t_decay_mode in {'linear'}:
self._mask_softmax_t = self._linear_decay_t(t_min, t_max, self.total_training_step, step_now)
# 凹
elif self.softmax_t_decay_mode == 'exponential_concave':
tau = 0.2
linear_t = self._linear_decay_t(t_min, t_max, self.total_training_step, step_now)
self._mask_softmax_t = - np.exp(-linear_t / tau) + t_max
# 凸
elif self.softmax_t_decay_mode == 'exponential_convex':
tau = 0.2
linear_t = self._linear_decay_t(t_min, t_max, self.total_training_step, step_now)
self._mask_softmax_t = np.exp(-(1 - linear_t) / tau)
elif self.softmax_t_decay_mode == 'by_prob':
self._mask_softmax_t = self.masker_recorder.mean_prob_tensor
# print(f"Set mask softmax t to {self._mask_softmax_t}")
else:
return None
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
if isinstance(examples[0], (dict, BatchEncoding)):
examples = [e["input_ids"] for e in examples]
batch_ids, batch_timestamps = self._tensorize_batch(examples)
if self.pretrain_task == 'mlm':
inputs, labels, batch_timestamps, attention_mask = self.mlm_mask_tokens(batch_ids, batch_timestamps)
elif self.pretrain_task == 'clm':
inputs, labels, batch_timestamps, attention_mask = self.clm_mask_tokens(batch_ids, batch_timestamps)
else:
raise NotImplementedError
if self.return_timestamps:
return {"input_ids": inputs,
"labels": labels,
'timestamps': batch_timestamps,
'attention_mask': attention_mask}
else:
return {"input_ids": inputs,
"labels": labels,
'attention_mask': attention_mask}
def _tensorize_batch(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
):
batch_ids = []
batch_timestamps = []
for cn_char_subword, timestamp_subword in examples:
batch_ids.append(cn_char_subword)
batch_timestamps.append(timestamp_subword)
batch_ids = torch.stack(batch_ids)
return batch_ids, batch_timestamps
#
# # In order to accept both lists of lists and lists of Tensors
# if isinstance(examples[0], (list, tuple)):
# examples = [torch.tensor(e, dtype=torch.long) for e in examples]
# length_of_first = examples[0].size(0)
# are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
# if are_tensors_same_length:
# return torch.stack(examples, dim=0)
# else:
# if self.tokenizer._pad_token is None:
# raise ValueError(
# "You are attempting to pad samples but the tokenizer you are using"
# f" ({self.tokenizer.__class__.__name__}) does not have one."
# )
# return pad_sequence(examples, batch_first=True, padding_value=self.tokenizer.pad_token_id)
def _compute_pad_len(self, labels):
pad_lens = []
for label in labels:
non_pad_length = len(label[label != self.tokenizer.pad_token_id])
pad_lens.append(non_pad_length)
pad_lens = torch.tensor(pad_lens).unsqueeze(1)
return pad_lens
def _handle_prob_overshoot(self,
probability_matrix,
pad_lens,
overshoot_threshold=1.0):
is_exist_overshoot_indices = bool((probability_matrix > overshoot_threshold).any())
if is_exist_overshoot_indices:
for seq_i, seq_prob in enumerate(probability_matrix):
gt_1_mask = seq_prob > overshoot_threshold
if bool(gt_1_mask.any()):
overshoot_value = int(seq_prob[gt_1_mask])
distribute_value = float((overshoot_value - overshoot_threshold)) / float(pad_lens[seq_i] - 1)
seq_prob[~gt_1_mask] = seq_prob[~gt_1_mask] + distribute_value
seq_prob[gt_1_mask] = overshoot_value
return True
else:
return False
def mlm_mask_tokens(self, inputs: torch.Tensor, batch_timestamps, verbose=False):
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
if self.use_random_mlm_probability:
mlm_probability = random.uniform(self.mlm_prob_min, self.mlm_prob_max)
else:
mlm_probability = self.mlm_probability
self.mlm_probability = mlm_probability
assert 0 < mlm_probability < 1
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
if self.mask_type.startswith('part_prob'):
assert self.part_prob_percent is not None
random_value = random.random()
if random_value < self.part_prob_percent:
mask_type = 'posterior_prob'
else:
mask_type = 'normal'
# print(
# f"random_value: {random_value}, set mask_type to {mask_type}, part_prob_value: {self.part_prob_percent}")
else:
mask_type = self.mask_type
if mask_type in {'posterior_prob', 'tf_idf', 'posterior_prob_with_tf_idf_warmup'}:
if mask_type == 'posterior_prob':
probability_matrix, avg_model_predict_prob = _sample_by_model_predict_prob(
self.masker_recorder.prob_tensor, labels)
print(f"Model avg predict prob: {avg_model_predict_prob}, t: {self.mask_softmax_t}")
elif mask_type == 'tf_idf':
probability_matrix = _sample_by_tfidf(self.masker_recorder.idf_tensor, labels)
elif mask_type == 'posterior_prob_with_tf_idf_warmup':
tf_idf_prob = _tf_idf_decay_func(1.0,
self.masker_recorder.train_step,
decay=self.tf_idf_warmup_decay)
self.masker_recorder.tf_idf_warm_up_probs.append(tf_idf_prob)
random_prob = random.random()
if verbose:
print(f"[Warm up by tfidf] step i: {self.masker_recorder.train_step},"
f" tf_idf_prob: {tf_idf_prob},"
f" random_prob: {random_prob}")
if random_prob < tf_idf_prob:
probability_matrix = _sample_by_tfidf(self.masker_recorder.idf_tensor, labels)
else:
probability_matrix, avg_model_predict_prob = _sample_by_model_predict_prob(
self.masker_recorder.prob_tensor, labels)
else:
raise NotImplementedError
# temp compute the freq of tokens in each sample
pad_token_indices = torch.where(labels == self.tokenizer.pad_token_id)
# TODO, 这里获取NO PAD长度的写的不太好,但是目前也想不出不用for llop的办法
pad_lens = self._compute_pad_len(labels)
probability_matrix[pad_token_indices] = float('-inf')
probability_matrix = torch.softmax(probability_matrix / self.mask_softmax_t, dim=1)
probability_matrix = probability_matrix * mlm_probability * pad_lens
# is_overshoot = self._handle_prob_overshoot(probability_matrix,
# pad_lens,
# overshoot_threshold=1.0)
is_overshoot = False
if is_overshoot:
self.masker_recorder.overshoot_count += 1
probability_matrix[probability_matrix >= 1.0] = 1.0
probability_matrix[probability_matrix <= 0.0] = 0.0
# ----------------------------------------------------------------------------------------------------------
# Print for debug
# ----------------------------------------------------------------------------------------------------------
if verbose:
non_pad_token_indices = torch.where(labels != self.tokenizer.pad_token_id)
print(f"[Probability Matrix] min-{torch.min(probability_matrix[non_pad_token_indices])},"
f"max-{torch.max(probability_matrix[non_pad_token_indices])},"
f"avg-{torch.mean(probability_matrix[non_pad_token_indices])},"
f"softmax_t: {self.mask_softmax_t}")
print_masked_indices = torch.bernoulli(probability_matrix).bool()
for pad_len_i, pad_len in enumerate(pad_lens):
print(
f"[Mask ratio-{pad_len_i}]: "
f"{collections.Counter(print_masked_indices[pad_len_i].tolist())[True] / int(pad_len)}")
# ----------------------------------------------------------------------------------------------------------
try:
masked_indices = torch.bernoulli(probability_matrix).bool()
except:
ipdb.set_trace()
elif mask_type == 'lowest_prob':
# # ----------------------------------------------------------------------------------------------------------
# # OLD version
# # ----------------------------------------------------------------------------------------------------------
# RANDOM_RATIO = 0.0
# dynamic_mask_predict_prob = self.masker_recorder.prob_tensor.clone()
# # device = dynamic_mask_predict_prob.to(dynamic_mask_predict_prob.device)
# probability_matrix = torch.zeros(labels.shape)
# seq_len = probability_matrix.shape[1]
# pad_start_indices = []
# for label_i, label in enumerate(labels):
# padding_indices = torch.where(label == self.tokenizer.pad_token_id)[0]
# if padding_indices.shape[0] == 0:
# pad_start_index = len(label)
# else:
# pad_start_index = int(padding_indices[0])
# pad_start_indices.append(pad_start_index)
# probability_matrix[label_i] = dynamic_mask_predict_prob[label]
# probability_matrix[label_i][padding_indices] = float('inf')
#
# # label_prob = dynamic_mask_predict_prob[label]
# # label_prob[padding_indices] = float('inf')
# # ipdb.set_trace()
# # label_prob = (1 - RANDOM_RATIO) * label_prob + RANDOM_RATIO * torch.rand((len(label_prob, ))).to(
# # label_prob.device)
# # top_percent_label_indices = torch.argsort(label_prob)[:int(len(label_prob) * self.mlm_probability)]
# # masked_index = torch.zeros_like(label_prob, dtype=int)
# # masked_index[top_percent_label_indices] =
# # masked_index = masked_index.bool()
# # masked_indices.append(masked_index)
# # masked_indices = torch.stack(masked_indices) # batch_size x max_seq_length
#
# probability_matrix = (1 - RANDOM_RATIO) * probability_matrix + RANDOM_RATIO * torch.rand_like(
# probability_matrix)
# top_percent_label_indices = torch.argsort(probability_matrix)[:, :int(seq_len * self.mlm_probability)]
# masked_indices = torch.zeros_like(probability_matrix, dtype=int)
# for masked_index_i, masked_index in enumerate(masked_indices):
# top_percent_label_index = top_percent_label_indices[masked_index_i]
# top_percent_label_index = top_percent_label_index[
# top_percent_label_index < pad_start_indices[masked_index_i]]
# masked_index[top_percent_label_index] = 1
# masked_indices = masked_indices.bool()
# # ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# NEW version
# ----------------------------------------------------------------------------------------------------------
RANDOM_RATIO = 1e-6 # 1e-6
dynamic_mask_predict_prob = self.masker_recorder.prob_tensor.clone()
batch_mask_predict_prob = dynamic_mask_predict_prob.expand(labels.shape[0],
dynamic_mask_predict_prob.shape[0]).to(
labels.device)
probability_matrix = batch_mask_predict_prob.gather(1, labels)
# seq_len = probability_matrix.shape[1]
pad_token_indices = torch.where(labels == self.tokenizer.pad_token_id)
probability_matrix[pad_token_indices] = float('inf') # batch_size x max_seq_len
probability_matrix = (1 - RANDOM_RATIO) * probability_matrix + RANDOM_RATIO * torch.rand_like(
probability_matrix)
pad_lens = self._compute_pad_len(labels)
top_percent_label_indices = []
argsort_probability_matrix = torch.argsort(probability_matrix)
for pad_len_i, pad_len in enumerate(pad_lens):
top_percent_label_indices.append(
argsort_probability_matrix[pad_len_i][:int(pad_len * self.mlm_probability)])
# top_percent_label_indices = torch.argsort(probability_matrix)[:, :int(seq_len * self.mlm_probability)]
temp_indices = torch.cat([torch.full(x.shape, i) for i, x in enumerate(top_percent_label_indices)]).long()
top_percent_label_fancy_index = (temp_indices, torch.cat(top_percent_label_indices))
masked_indices = torch.zeros_like(probability_matrix, dtype=int)
masked_indices[top_percent_label_fancy_index] = 1
masked_indices[pad_token_indices] = 0
masked_indices = masked_indices.bool()
# ----------------------------------------------------------------------------------------------------------
# # ----------------------------------------------------------------------------------------------------------
# # Compute softmax version & compare
# # ----------------------------------------------------------------------------------------------------------
# mask_softmax_t = 0.00001
# probability_matrix_softmax = batch_mask_predict_prob.gather(1, labels)
# probability_matrix_softmax = torch.softmax(probability_matrix_softmax / mask_softmax_t, dim=1)
# probability_matrix_softmax = probability_matrix_softmax * mlm_probability * pad_lens
# masked_indices_softmax = torch.bernoulli(probability_matrix_softmax).bool()
#
# for pad_len_i, pad_len in enumerate(pad_lens):
# print("-" * 78)
# print(
# f"[lowest_prob][Mask ratio-{pad_len_i}]: "
# f"{collections.Counter(masked_indices[pad_len_i].tolist())[True] / int(pad_len)}")
# print(
# f"[Softmax][Mask Mask-{pad_len_i}]: "
# f"{collections.Counter(masked_indices_softmax[pad_len_i].tolist())[True] / int(pad_len)}")
# # ----------------------------------------------------------------------------------------------------------
elif mask_type == 'normal':
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
pad_lens = None
masked_indices = torch.bernoulli(probability_matrix).bool() # batch_size x max_seq_length
else:
raise NotImplementedError
if self.masker_recorder is not None:
if self.masker_recorder.record_snapshot:
self.masker_recorder.step_mask_probabilities.extend(
probability_matrix[probability_matrix > 0.0].tolist())
if self.mask_type == 'part_prob' and mask_type == 'posterior_prob':
keep_N = 16
record_point_N = 200 # For debug
max_sample_per_step = 32
if self.masker_recorder.train_step % max(int(self.masker_recorder.total_steps / record_point_N),
10) == 0:
train_step_counts = collections.Counter(
self.masker_recorder.step_sample_mask_distributions['train_step'])
current_step_count = train_step_counts[self.masker_recorder.train_step]
if current_step_count >= max_sample_per_step:
pass
else:
for label, mask_prob in zip(labels[:keep_N], probability_matrix[:keep_N]):
label = label[label != 1]
mask_prob = mask_prob[:len(label)]
self.masker_recorder.step_sample_mask_distributions['train_step'].append(
self.masker_recorder.train_step)
self.masker_recorder.step_sample_mask_distributions['tokens'].append(
tuple(label.tolist()))
self.masker_recorder.step_sample_mask_distributions['mask_prob'].append(
tuple(mask_prob.tolist()))
self.masker_recorder.step_sample_mask_distributions['softmax_t'].append(
self.mask_softmax_t)
self.masker_recorder.step_sample_mask_distributions['avg_model_prob'].append(
max(avg_model_predict_prob, 1e-10))
# # save label maskes
# self.masker_recorder
if self.is_record_mask_ratio:
if pad_lens is None:
pad_lens = self._compute_pad_len(labels)
for masked_index_i, masked_index in enumerate(masked_indices):
mask_ratio = collections.Counter(masked_index.tolist())[True] / int(pad_lens[masked_index_i])
self.masker_recorder.mask_ratios.append(mask_ratio)
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(self.tokenizer.max_len, labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# Compute Attention Mask
# ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
attention_mask = torch.ones_like(inputs)
attention_mask[inputs.eq(self.tokenizer.pad_token_id)] = 0
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels, batch_timestamps, attention_mask
def clm_mask_tokens(self, inputs: torch.Tensor, batch_timestamps):
"""
Parameters
----------
inputs: tensor, shape: batch_size x max_seq_len
example:
tensor([[15086, 8773, 10116, ..., 1, 1, 1],
[13689, 1683, 1613, ..., 1, 1, 1]])
batch_timestamps: List
Returns
-------
"""
inputs_clone = inputs.clone()
input_lens = [len(x[x != 1]) for x in inputs]
new_inputs = []
new_labels = []
attention_masks = []
for i, input_len in enumerate(input_lens):
if input_len <= self.CLM_MIN_LEN:
continue
clm_samples = inputs_clone[i].repeat(self.clm_sample_n, 1)
sample_pad_mask = torch.ones(self.clm_sample_n, inputs_clone.shape[1]).bool()
sample_pad_mask_view = sample_pad_mask.view(-1)
# 最小是当前样本长度的1/4或者是32
clm_sample_len = random.sample(range(self.CLM_MIN_LEN, input_len), self.clm_sample_n)
unmask_view_indices = []
for i, x in enumerate(clm_sample_len):
unmask_view_indices.extend(list(range(i * sample_pad_mask.shape[1], i * sample_pad_mask.shape[1] + x)))
sample_pad_mask_view[unmask_view_indices] = False
clm_samples[sample_pad_mask] = self.tokenizer.pad_token_id
# set labels
cls_labels = clm_samples.clone()
cls_label_mask = torch.zeros_like(cls_labels).bool()
cls_label_mask = ~torch.scatter(cls_label_mask, 1, (torch.tensor(clm_sample_len) - 1).unsqueeze(1), True)
cls_labels[cls_label_mask] = -100
clm_samples[~cls_label_mask] = self.tokenizer.pad_token_id
# set attentions
attention_pad_mask = sample_pad_mask.clone()
attention_pad_mask = ~torch.scatter(attention_pad_mask, 1, (torch.tensor(clm_sample_len) - 1).unsqueeze(1),
True)
attention_pad_mask = attention_pad_mask.long()
# some assertions
temp_assert_index = len(clm_samples[0][clm_samples[0] != 1])
assert cls_labels[0][temp_assert_index] != -100
assert clm_samples[0][temp_assert_index] == self.tokenizer.pad_token_id
assert clm_samples[0][temp_assert_index - 1] != self.tokenizer.pad_token_id, ipdb.set_trace()
assert attention_pad_mask[0][temp_assert_index] == 0 # Mask for label position
assert attention_pad_mask[0][temp_assert_index - 1] == 1 # Unmask for previous position
new_inputs.append(clm_samples)
new_labels.append(cls_labels)
attention_masks.append(attention_pad_mask)
new_inputs = torch.cat(new_inputs)
new_labels = torch.cat(new_labels)
attention_masks = torch.cat(attention_masks)
return new_inputs, new_labels, batch_timestamps, attention_masks
|
// THIS FILE IS AUTO GENERATED
var GenIcon = require('../lib').GenIcon
module.exports.GiMatterStates = function GiMatterStates (props) {
return GenIcon({"tag":"svg","attr":{"viewBox":"0 0 512 512"},"child":[{"tag":"path","attr":{"d":"M288.3 13.4c-12.3-.01-23 6.49-27.3 15.31l-2.9 5.95-6.6-1.01c-5.5-.85-11.3-1.32-17.1-1.32-18.8 0-35.7 4.5-47.3 11.14-11.5 6.64-16.9 14.59-16.9 22 0 7.42 5.4 15.37 16.9 22 11.6 6.64 28.5 11.13 47.3 11.13 7.5-.02 14.9-.74 21.8-2.13l6.3-1.23 3.2 5.46c5.6 9.2 23.7 18.2 44.7 18.2 13.9 0 26.4-3.6 34.8-8.8 8.4-5.2 12.2-11.23 12.2-16.9 0-5.09-3-10.48-10-15.44-6.9-4.97-17.6-8.87-30-9.95l-18.3-1.59 12.5-13.49c4.1-4.41 6.1-9.6 6.1-14.87 0-12.88-12.4-24.46-29.4-24.46zM152.2 65.46C84.8 102.3 39 173.9 39 256c0 32.1 7 62.6 19.54 90 5.95-7.9 10.48-13.4 12.67-16C62.05 307.2 57 282.2 57 256c0-74 40.27-138.5 100.1-172.78-3.1-5.39-4.9-11.34-4.9-17.74v-.02zm210.7 1.75c7.5 6.93 12.5 15.9 12.5 25.99 0 1.14-.1 2.27-.2 3.38C423.7 132.9 455 190.7 455 256c0 23.7-4.2 46.5-11.8 67.6l18.3 2.2c7.4-21.9 11.5-45.4 11.5-69.8 0-80.8-44.4-151.5-110.1-188.79zm58.6 270.49c-11.1.2-23 1.1-35.9 3.1l-.5.1-.5.1c-25.8 7.3-37.1 15.2-46.4 24.7l-2.2 2.2-.3 3.1c-3.4 29.6-5.8 60 0 91.8l1.3 6.8 6.8.6c30.6 2.6 58.7 1.4 86.2 0l1-.1.9-.2c18.5-5.2 34.4-12.8 46.4-24.6l1.7-1.6.6-2.2c10-33.4 3.4-63.8.4-92.7l-.8-7.4-7.3-.7c-16.2-1.5-32.8-3.2-51.4-3zM79.62 348.2c-4.94 6.1-11.43 14.5-18.58 25.6-12.29 18.9-22.53 42.4-20.51 54.9 1.43 8.8 6.97 19.6 14.51 27.6 7.54 7.9 16.69 12.8 24.58 12.8 7.87 0 17.03-4.9 24.58-12.8 7.5-8 13.1-18.8 14.5-27.6 2-12.5-8.2-36-20.5-54.9-7.15-11-13.64-19.5-18.58-25.6zM427 355.7c1.7 0 3.3 0 5 .1 5.4.1 10.7.4 16 .8-5.9 3.4-12.1 6.8-19.5 9.9l-2.4.2c-19.5 1.4-37.7.3-55.4-2 4.7-2 10.5-4 18.3-6.2 13.8-2.2 26.3-2.9 38-2.8zm38 11.6c2.8 22.9 5 44.5-1 66.6-7 6.3-16 11.4-27 15.4 3-22.5 2-44.8-.5-66.6 11.3-5 20.2-10.2 28.5-15.4zm-112.2 13.1c20.7 3.2 42.3 5.4 65.8 4.5 2.5 23 3.5 45.6-.4 67.8-21.5 1-43.1 1.5-65.8-.1-3.5-24-2.2-47.9.4-72.2zm-216.4 34.7c.9 5.5 1 11 .1 16.4-.3 1.6-.6 3.3-1 4.9C170 459.5 211.4 473 256 473c21.6 0 42.5-3.2 62.3-9.1l-2-18.2c-19 6-39.3 9.3-60.3 9.3-44.9 0-86.3-14.8-119.6-39.9z"}}]})(props);
};
|
#!/usr/bin/env python3
import argparse
import cubePCA
from astropy.io import fits as pyfits
from tqdm import tqdm
__author__ = "Bernd Husemann"
__copyright__ = "Copyright 2020, Bernd Husemann"
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Bernd Husemann"
__email__ = "berndhusemann@gmx.de"
__status__ = "Production"
def main():
parser = argparse.ArgumentParser(description="Script to subtract sky residuals from a data cube by creating a PCA spectral library")
parser.add_argument('input_cube', metavar='CUBE_IN', type=str, help='Input FITS datacube from which sky spectra are selected')
parser.add_argument('output_cube', metavar='CUBE_OUT', type=str, help='Output FITS datacube with sky residuals subtracted')
parser.add_argument('sky_mask', metavar='MASK', type=str, help='sky region mask')
parser.add_argument('-m', '--wave_mask', type=str, default='',help='File name of the wavelength selection for the PCA subtraction. If not given a default wavelength set is being used. An example is given in the wave.txt file.')
parser.add_argument('-e', '--extension', type=int, default=1, help='extension in which the data should be taken from (Default: 40)' )
parser.add_argument('-c', '--components', type=int, default=40, help='Number of PCA components to be used (Default: 40)')
parser.add_argument('-s', '--spectra', type=int, default=20000, help='Maximum random subset of spectra used for PCA analysis if number of selected spectra are larger than this number (Default: 20000)')
parser.add_argument('-f', '--filter_width', type=int, default=50, help='Size of median filter in wavelength direction to remove continuum signal before sky residual subtraction (Default: 50)')
parser.add_argument('-p', '--processes', type=str, default='auto', help='number of processes to be used for multiprocessing, auto for maximum otherwise put number (Default: auto)')
parser.add_argument('--verbose', action='store_true', help='Set if infos are printed to the command line')
args = parser.parse_args()
if args.verbose:
print('Opening data cube {} for processing '.format(args.input_cube))
cube = cubePCA.IFUCube(args.input_cube,extension=args.extension)
if args.verbose:
print('Opening sky mask {} for processing '.format(args.sky_mask))
mask = cubePCA.MASKimg(args.sky_mask)
cube.getNANMask()
if args.verbose:
print('Creating PCA spectral libary. This may take a while... ')
PCA_out,sky = cube.create_PCA_sky(mask, cont_filt=args.filter_width, spectra=args.spectra)
if args.verbose:
print('Start subtracting sky line residuals.')
pbar = tqdm(total=cube.getSpax())
else:
pbar = None
cube.subtract_PCA_sky(PCA_out, cont_filt=args.filter_width, components=args.components, file_wavemask= args.wave_mask, max_cpu=args.processes, pbar=pbar, verbose=args.verbose)
if args.verbose:
print('Store cleaned cube at {}.'.format(args.output_cube))
cube.writeFits(args.output_cube)
if args.verbose:
print('Done')
if __name__ == '__main__':
main()
|
/*globals describe, before, beforeEach, afterEach, it*/
var testUtils = require('../../utils'),
should = require('should'),
when = require('when'),
_ = require('underscore'),
errors = require('../../../server/errorHandling'),
sinon = require('sinon'),
uuid = require('node-uuid'),
// Stuff we are testing
Models = require('../../../server/models');
describe('User Model', function run() {
var UserModel = Models.User;
before(function (done) {
testUtils.clearData().then(function () {
done();
}, done);
});
afterEach(function (done) {
testUtils.clearData().then(function () {
done();
}, done);
});
describe('Registration', function runRegistration() {
beforeEach(function (done) {
testUtils.initData().then(function () {
done();
}, done);
});
it('can add first', function (done) {
var userData = testUtils.DataGenerator.forModel.users[0],
gravatarStub = sinon.stub(UserModel, 'gravatarLookup', function (userData) {
return when.resolve(userData);
});
UserModel.add(userData).then(function (createdUser) {
should.exist(createdUser);
createdUser.has('uuid').should.equal(true);
createdUser.attributes.password.should.not.equal(userData.password, "password was hashed");
createdUser.attributes.email.should.eql(userData.email, "email address correct");
gravatarStub.restore();
done();
}).then(null, done);
});
it('does NOT lowercase email', function (done) {
var userData = testUtils.DataGenerator.forModel.users[2],
gravatarStub = sinon.stub(UserModel, 'gravatarLookup', function (userData) {
return when.resolve(userData);
});
UserModel.add(userData).then(function (createdUser) {
should.exist(createdUser);
createdUser.has('uuid').should.equal(true);
createdUser.attributes.email.should.eql(userData.email, "email address correct");
gravatarStub.restore();
done();
}).then(null, done);
});
it('can find gravatar', function (done) {
var userData = testUtils.DataGenerator.forModel.users[4],
gravatarStub = sinon.stub(UserModel, 'gravatarLookup', function (userData) {
userData.image = 'http://www.gravatar.com/avatar/2fab21a4c4ed88e76add10650c73bae1?d=404'
return when.resolve(userData);
});
UserModel.add(userData).then(function (createdUser) {
should.exist(createdUser);
createdUser.has('uuid').should.equal(true);
createdUser.attributes.image.should.eql('http://www.gravatar.com/avatar/2fab21a4c4ed88e76add10650c73bae1?d=404', 'Gravatar found');
gravatarStub.restore();
done();
}).then(null, done);
});
it('can handle no gravatar', function (done) {
var userData = testUtils.DataGenerator.forModel.users[0],
gravatarStub = sinon.stub(UserModel, 'gravatarLookup', function (userData) {
return when.resolve(userData);
});
UserModel.add(userData).then(function (createdUser) {
should.exist(createdUser);
createdUser.has('uuid').should.equal(true);
should.not.exist(createdUser.image);
gravatarStub.restore();
done();
}).then(null, done);
});
it('can find by email and is case insensitive', function (done) {
var userData = testUtils.DataGenerator.forModel.users[2],
email = testUtils.DataGenerator.forModel.users[2].email;
UserModel.add(userData).then(function () {
// Test same case
return UserModel.getByEmail(email).then(function (user) {
should.exist(user);
user.attributes.email.should.eql(email);
});
}).then(function () {
// Test entered in lowercase
return UserModel.getByEmail(email.toLowerCase()).then(function (user) {
should.exist(user);
user.attributes.email.should.eql(email);
});
}).then(function () {
// Test entered in uppercase
return UserModel.getByEmail(email.toUpperCase()).then(function (user) {
should.exist(user);
user.attributes.email.should.eql(email);
});
}).then(function () {
// Test incorrect email address - swapped capital O for number 0
return UserModel.getByEmail('jb0gendAth@example.com').then(null, function (error) {
should.exist(error);
error.message.should.eql('NotFound');
});
}).then(function () {
done();
}).then(null, done);
});
});
describe('Basic Operations', function () {
beforeEach(function (done) {
testUtils.initData()
.then(function () {
return when(testUtils.insertDefaultUser());
})
.then(function () {
done();
}, done);
});
it('can\'t add second', function (done) {
var userData = testUtils.DataGenerator.forModel.users[1];
return UserModel.add(userData).then(done, function (failure) {
failure.message.should.eql('A user is already registered. Only one user for now!');
done();
}).then(null, done);
});
it('can browse', function (done) {
UserModel.browse().then(function (results) {
should.exist(results);
results.length.should.be.above(0);
done();
}).then(null, done);
});
it('can read', function (done) {
var firstUser;
UserModel.browse().then(function (results) {
should.exist(results);
results.length.should.be.above(0);
firstUser = results.models[0];
return UserModel.read({email: firstUser.attributes.email});
}).then(function (found) {
should.exist(found);
found.attributes.name.should.equal(firstUser.attributes.name);
done();
}).then(null, done);
});
it('can edit', function (done) {
var firstUser;
UserModel.browse().then(function (results) {
should.exist(results);
results.length.should.be.above(0);
firstUser = results.models[0];
return UserModel.edit({id: firstUser.id, website: "some.newurl.com"});
}).then(function (edited) {
should.exist(edited);
edited.attributes.website.should.equal('some.newurl.com');
done();
}).then(null, done);
});
it("can get effective permissions", function (done) {
UserModel.effectivePermissions(1).then(function (effectivePermissions) {
should.exist(effectivePermissions);
effectivePermissions.length.should.be.above(0);
done();
}).then(null, done);
});
it('can delete', function (done) {
var firstUserId;
UserModel.browse().then(function (results) {
should.exist(results);
results.length.should.be.above(0);
firstUserId = results.models[0].id;
return UserModel.destroy(firstUserId);
}).then(function () {
return UserModel.browse();
}).then(function (newResults) {
var ids, hasDeletedId;
if (newResults.length < 1) {
// Bug out if we only had one user and deleted it.
return done();
}
ids = _.pluck(newResults.models, "id");
hasDeletedId = _.any(ids, function (id) {
return id === firstUserId;
});
hasDeletedId.should.equal(false);
done();
}).then(null, done);
});
it('can generate reset token', function (done) {
// Expires in one minute
var expires = Date.now() + 60000,
dbHash = uuid.v4();
UserModel.browse().then(function (results) {
return UserModel.generateResetToken(results.models[0].attributes.email, expires, dbHash);
}).then(function (token) {
should.exist(token);
token.length.should.be.above(0);
done();
}).then(null, done);
});
it('can validate a reset token', function (done) {
// Expires in one minute
var expires = Date.now() + 60000,
dbHash = uuid.v4();
UserModel.browse().then(function (results) {
return UserModel.generateResetToken(results.models[0].attributes.email, expires, dbHash);
}).then(function (token) {
return UserModel.validateToken(token, dbHash);
}).then(function () {
done();
}).then(null, done);
});
it('can reset a password with a valid token', function (done) {
// Expires in one minute
var origPassword,
expires = Date.now() + 60000,
dbHash = uuid.v4();
UserModel.browse().then(function (results) {
var firstUser = results.models[0],
origPassword = firstUser.attributes.password;
should.exist(origPassword);
return UserModel.generateResetToken(firstUser.attributes.email, expires, dbHash);
}).then(function (token) {
return UserModel.resetPassword(token, 'newpassword', 'newpassword', dbHash);
}).then(function (resetUser) {
var resetPassword = resetUser.get('password');
should.exist(resetPassword);
resetPassword.should.not.equal(origPassword);
done();
}).then(null, done);
});
it('doesn\'t allow expired timestamp tokens', function (done) {
var email,
// Expired one minute ago
expires = Date.now() - 60000,
dbHash = uuid.v4();
UserModel.browse().then(function (results) {
// Store email for later
email = results.models[0].attributes.email;
return UserModel.generateResetToken(email, expires, dbHash);
}).then(function (token) {
return UserModel.validateToken(token, dbHash);
}).then(function () {
throw new Error("Allowed expired token");
}, function (err) {
should.exist(err);
err.message.should.equal("Expired token");
done();
});
});
it('doesn\'t allow tampered timestamp tokens', function (done) {
// Expired one minute ago
var expires = Date.now() - 60000,
dbHash = uuid.v4();
UserModel.browse().then(function (results) {
return UserModel.generateResetToken(results.models[0].attributes.email, expires, dbHash);
}).then(function (token) {
var tokenText = new Buffer(token, 'base64').toString('ascii'),
parts = tokenText.split('|'),
fakeExpires,
fakeToken;
fakeExpires = Date.now() + 60000;
fakeToken = [String(fakeExpires), parts[1], parts[2]].join('|');
fakeToken = new Buffer(fakeToken).toString('base64');
return UserModel.validateToken(fakeToken, dbHash);
}).then(function () {
throw new Error("allowed invalid token");
}, function (err) {
should.exist(err);
err.message.should.equal("Invalid token");
done();
});
});
});
});
|
<%_ if (generate_api_doc) { _%>
/**
* @api {DELETE} /api/<%= schema.identifiers.plural.snake %>/:id Destroy
* @APIname Destroy
* @APIgroup <%= schema.identifiers.singular.pascal %> Controller
* @apidescription Destroy a single <%= schema.identifiers.singular.label %>
* @apiSuccess {json} The destroyed <%= schema.identifiers.singular.label %>
* @apiError (Error) 500 Internal server error
*/
<%_ } else { _%>
// DELETE /api/<%= schema.identifiers.plural.snake %>/:id Destroy
<%_ } _%>
module.exports.delete = (req, res, next) => {
return <%= schema.identifiers.singular.pascal %>.remove({ _id: req.params.id })
.then((response) => {
return res
.status(200)
.send(response)
.end();
})
// .catch( err => next(boom.badImplementation(err)));
};
|
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 2004-2015 The OpenLDAP Foundation.
* Portions Copyright 2004 Pierangelo Masarati.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/* ACKNOWLEDGEMENTS:
* This work was initially developed by Pierangelo Masarati for inclusion
* in OpenLDAP Software.
*/
#include "portable.h"
#include <stdio.h>
#include <ac/stdlib.h>
#include <ac/ctype.h>
#include <ac/string.h>
#include <ac/socket.h>
#include <ac/unistd.h>
#include <lber.h>
#include <ldif.h>
#include <lutil.h>
#include "slapcommon.h"
static int
do_check( Connection *c, Operation *op, struct berval *id )
{
struct berval authcdn;
int rc;
rc = slap_sasl_getdn( c, op, id, realm, &authcdn, SLAP_GETDN_AUTHCID );
if ( rc != LDAP_SUCCESS ) {
fprintf( stderr, "ID: <%s> check failed %d (%s)\n",
id->bv_val, rc,
ldap_err2string( rc ) );
rc = 1;
} else {
if ( !BER_BVISNULL( &authzID ) ) {
rc = slap_sasl_authorized( op, &authcdn, &authzID );
fprintf( stderr,
"ID: <%s>\n"
"authcDN: <%s>\n"
"authzDN: <%s>\n"
"authorization %s\n",
id->bv_val,
authcdn.bv_val,
authzID.bv_val,
rc == LDAP_SUCCESS ? "OK" : "failed" );
} else {
fprintf( stderr, "ID: <%s> check succeeded\n"
"authcID: <%s>\n",
id->bv_val,
authcdn.bv_val );
op->o_tmpfree( authcdn.bv_val, op->o_tmpmemctx );
}
rc = 0;
}
return rc;
}
int
slapauth( int argc, char **argv )
{
int rc = EXIT_SUCCESS;
const char *progname = "slapauth";
Connection conn = {0};
OperationBuffer opbuf;
Operation *op;
void *thrctx;
slap_tool_init( progname, SLAPAUTH, argc, argv );
argv = &argv[ optind ];
argc -= optind;
thrctx = ldap_pvt_thread_pool_context();
connection_fake_init( &conn, &opbuf, thrctx );
op = &opbuf.ob_op;
conn.c_sasl_bind_mech = mech;
if ( !BER_BVISNULL( &authzID ) ) {
struct berval authzdn;
rc = slap_sasl_getdn( &conn, op, &authzID, NULL, &authzdn,
SLAP_GETDN_AUTHZID );
if ( rc != LDAP_SUCCESS ) {
fprintf( stderr, "authzID: <%s> check failed %d (%s)\n",
authzID.bv_val, rc,
ldap_err2string( rc ) );
rc = 1;
BER_BVZERO( &authzID );
goto destroy;
}
authzID = authzdn;
}
if ( !BER_BVISNULL( &authcID ) ) {
if ( !BER_BVISNULL( &authzID ) || argc == 0 ) {
rc = do_check( &conn, op, &authcID );
goto destroy;
}
for ( ; argc--; argv++ ) {
struct berval authzdn;
ber_str2bv( argv[ 0 ], 0, 0, &authzID );
rc = slap_sasl_getdn( &conn, op, &authzID, NULL, &authzdn,
SLAP_GETDN_AUTHZID );
if ( rc != LDAP_SUCCESS ) {
fprintf( stderr, "authzID: <%s> check failed %d (%s)\n",
authzID.bv_val, rc,
ldap_err2string( rc ) );
rc = -1;
BER_BVZERO( &authzID );
if ( !continuemode ) {
goto destroy;
}
}
authzID = authzdn;
rc = do_check( &conn, op, &authcID );
op->o_tmpfree( authzID.bv_val, op->o_tmpmemctx );
BER_BVZERO( &authzID );
if ( rc && !continuemode ) {
goto destroy;
}
}
goto destroy;
}
for ( ; argc--; argv++ ) {
struct berval id;
ber_str2bv( argv[ 0 ], 0, 0, &id );
rc = do_check( &conn, op, &id );
if ( rc && !continuemode ) {
goto destroy;
}
}
destroy:;
if ( !BER_BVISNULL( &authzID ) ) {
op->o_tmpfree( authzID.bv_val, op->o_tmpmemctx );
}
if ( slap_tool_destroy())
rc = EXIT_FAILURE;
return rc;
}
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ForkBlockchain = void 0;
const block_1 = require("@ethereumjs/block");
const ethereumjs_util_1 = require("ethereumjs-util");
const errors_1 = require("../../../core/providers/errors");
const BlockchainBase_1 = require("../BlockchainBase");
const output_1 = require("../output");
const ReadOnlyValidEIP2930Transaction_1 = require("../transactions/ReadOnlyValidEIP2930Transaction");
const ReadOnlyValidTransaction_1 = require("../transactions/ReadOnlyValidTransaction");
const ReadOnlyValidEIP1559Transaction_1 = require("../transactions/ReadOnlyValidEIP1559Transaction");
const rpcToBlockData_1 = require("./rpcToBlockData");
const rpcToTxData_1 = require("./rpcToTxData");
/* eslint-disable @nomiclabs/hardhat-internal-rules/only-hardhat-error */
class ForkBlockchain extends BlockchainBase_1.BlockchainBase {
constructor(_jsonRpcClient, _forkBlockNumber, common) {
super(common);
this._jsonRpcClient = _jsonRpcClient;
this._forkBlockNumber = _forkBlockNumber;
this._latestBlockNumber = this._forkBlockNumber;
}
getLatestBlockNumber() {
return this._latestBlockNumber;
}
async getBlock(blockHashOrNumber) {
if ((typeof blockHashOrNumber === "number" || ethereumjs_util_1.BN.isBN(blockHashOrNumber)) &&
this._data.isReservedBlock(new ethereumjs_util_1.BN(blockHashOrNumber))) {
this._data.fulfillBlockReservation(new ethereumjs_util_1.BN(blockHashOrNumber));
}
let block;
if (Buffer.isBuffer(blockHashOrNumber)) {
block = await this._getBlockByHash(blockHashOrNumber);
return block !== null && block !== void 0 ? block : null;
}
block = await this._getBlockByNumber(new ethereumjs_util_1.BN(blockHashOrNumber));
return block !== null && block !== void 0 ? block : null;
}
async addBlock(block) {
const blockNumber = new ethereumjs_util_1.BN(block.header.number);
if (!blockNumber.eq(this._latestBlockNumber.addn(1))) {
throw new Error(`Invalid block number ${blockNumber.toNumber()}. Expected ${this._latestBlockNumber
.addn(1)
.toNumber()}`);
}
// When forking a network whose consensus is not the classic PoW,
// we can't calculate the hash correctly.
// Thus, we avoid this check for the first block after the fork.
if (blockNumber.gt(this._forkBlockNumber.addn(1))) {
const parent = await this.getLatestBlock();
if (!block.header.parentHash.equals(parent.hash())) {
throw new Error("Invalid parent hash");
}
}
this._latestBlockNumber = this._latestBlockNumber.addn(1);
const totalDifficulty = await this._computeTotalDifficulty(block);
this._data.addBlock(block, totalDifficulty);
return block;
}
reserveBlocks(count, interval, previousBlockStateRoot, previousBlockTotalDifficulty) {
super.reserveBlocks(count, interval, previousBlockStateRoot, previousBlockTotalDifficulty);
this._latestBlockNumber = this._latestBlockNumber.add(count);
}
deleteLaterBlocks(block) {
const blockNumber = new ethereumjs_util_1.BN(block.header.number);
const savedBlock = this._data.getBlockByNumber(blockNumber);
if (savedBlock === undefined || !savedBlock.hash().equals(block.hash())) {
throw new Error("Invalid block");
}
const nextBlockNumber = blockNumber.addn(1);
if (this._forkBlockNumber.gte(nextBlockNumber)) {
throw new Error("Cannot delete remote block");
}
this._delBlock(nextBlockNumber);
}
async getTotalDifficulty(blockHash) {
let td = this._data.getTotalDifficulty(blockHash);
if (td !== undefined) {
return td;
}
const block = await this.getBlock(blockHash);
if (block === null) {
throw new Error("Block not found");
}
td = this._data.getTotalDifficulty(blockHash);
if (td === undefined) {
throw new Error("This should never happen");
}
return td;
}
async getTransaction(transactionHash) {
const tx = this.getLocalTransaction(transactionHash);
if (tx === undefined) {
const remote = await this._jsonRpcClient.getTransactionByHash(transactionHash);
return this._processRemoteTransaction(remote);
}
return tx;
}
async getBlockByTransactionHash(transactionHash) {
let block = this._data.getBlockByTransactionHash(transactionHash);
if (block === undefined) {
const remote = await this._jsonRpcClient.getTransactionByHash(transactionHash);
this._processRemoteTransaction(remote);
if (remote !== null && remote.blockHash !== null) {
await this.getBlock(remote.blockHash);
block = this._data.getBlockByTransactionHash(transactionHash);
}
}
return block !== null && block !== void 0 ? block : null;
}
async getTransactionReceipt(transactionHash) {
const local = this._data.getTransactionReceipt(transactionHash);
if (local !== undefined) {
return local;
}
const remote = await this._jsonRpcClient.getTransactionReceipt(transactionHash);
if (remote !== null) {
const receipt = await this._processRemoteReceipt(remote);
return receipt !== null && receipt !== void 0 ? receipt : null;
}
return null;
}
getForkBlockNumber() {
return this._forkBlockNumber;
}
async getLogs(filterParams) {
if (filterParams.fromBlock.lte(this._forkBlockNumber)) {
let toBlock = filterParams.toBlock;
let localLogs = [];
if (toBlock.gt(this._forkBlockNumber)) {
toBlock = this._forkBlockNumber;
localLogs = this._data.getLogs(Object.assign(Object.assign({}, filterParams), { fromBlock: this._forkBlockNumber.addn(1) }));
}
const remoteLogs = await this._jsonRpcClient.getLogs({
fromBlock: filterParams.fromBlock,
toBlock,
address: filterParams.addresses.length === 1
? filterParams.addresses[0]
: filterParams.addresses,
topics: filterParams.normalizedTopics,
});
return remoteLogs.map(output_1.toRpcLogOutput).concat(localLogs);
}
return this._data.getLogs(filterParams);
}
async _getBlockByHash(blockHash) {
const block = this._data.getBlockByHash(blockHash);
if (block !== undefined) {
return block;
}
const rpcBlock = await this._jsonRpcClient.getBlockByHash(blockHash, true);
return this._processRemoteBlock(rpcBlock);
}
async _getBlockByNumber(blockNumber) {
if (blockNumber.gt(this._latestBlockNumber)) {
return undefined;
}
const block = await super.getBlock(blockNumber);
if (block !== null) {
return block;
}
const rpcBlock = await this._jsonRpcClient.getBlockByNumber(blockNumber, true);
return this._processRemoteBlock(rpcBlock);
}
async _processRemoteBlock(rpcBlock) {
if (rpcBlock === null ||
rpcBlock.hash === null ||
rpcBlock.number === null ||
rpcBlock.number.gt(this._forkBlockNumber)) {
return undefined;
}
// We copy the common and set it to London or Berlin if the remote block
// had EIP-1559 activated or not. The reason for this is that ethereumjs
// throws if we have a base fee for an older hardfork, and set a default
// one for London.
const common = this._common.copy();
if (rpcBlock.baseFeePerGas !== undefined) {
common.setHardfork("london"); // TODO: consider changing this to "latest hardfork"
}
else {
common.setHardfork("berlin");
}
// we don't include the transactions to add our own custom tx objects,
// otherwise they are recreated with upstream classes
const blockData = (0, rpcToBlockData_1.rpcToBlockData)(Object.assign(Object.assign({}, rpcBlock), { transactions: [] }));
const block = block_1.Block.fromBlockData(blockData, {
common,
// We use freeze false here because we add the transactions manually
freeze: false,
});
for (const transaction of rpcBlock.transactions) {
let tx;
if (transaction.type === undefined || transaction.type.eqn(0)) {
tx = new ReadOnlyValidTransaction_1.ReadOnlyValidTransaction(new ethereumjs_util_1.Address(transaction.from), (0, rpcToTxData_1.rpcToTxData)(transaction));
}
else if (transaction.type.eqn(1)) {
tx = new ReadOnlyValidEIP2930Transaction_1.ReadOnlyValidEIP2930Transaction(new ethereumjs_util_1.Address(transaction.from), (0, rpcToTxData_1.rpcToTxData)(transaction));
}
else if (transaction.type.eqn(2)) {
tx = new ReadOnlyValidEIP1559Transaction_1.ReadOnlyValidEIP1559Transaction(new ethereumjs_util_1.Address(transaction.from), (0, rpcToTxData_1.rpcToTxData)(transaction));
}
else {
throw new errors_1.InternalError(`Unknown transaction type ${transaction.type}`);
}
block.transactions.push(tx);
}
this._data.addBlock(block, rpcBlock.totalDifficulty);
return block;
}
_delBlock(blockNumber) {
if (blockNumber.lte(this._forkBlockNumber)) {
throw new Error("Cannot delete remote block");
}
super._delBlock(blockNumber);
this._latestBlockNumber = blockNumber.subn(1);
}
_processRemoteTransaction(rpcTransaction) {
if (rpcTransaction === null ||
rpcTransaction.blockNumber === null ||
rpcTransaction.blockNumber.gt(this._forkBlockNumber)) {
return undefined;
}
const transaction = new ReadOnlyValidTransaction_1.ReadOnlyValidTransaction(new ethereumjs_util_1.Address(rpcTransaction.from), (0, rpcToTxData_1.rpcToTxData)(rpcTransaction));
this._data.addTransaction(transaction);
return transaction;
}
async _processRemoteReceipt(txReceipt) {
if (txReceipt === null || txReceipt.blockNumber.gt(this._forkBlockNumber)) {
return undefined;
}
const tx = await this.getTransaction(txReceipt.transactionHash);
const receipt = (0, output_1.remoteReceiptToRpcReceiptOutput)(txReceipt, tx, (0, output_1.shouldShowTransactionTypeForHardfork)(this._common), (0, output_1.shouldShowEffectiveGasPriceForHardfork)(this._common));
this._data.addTransactionReceipt(receipt);
return receipt;
}
}
exports.ForkBlockchain = ForkBlockchain;
//# sourceMappingURL=ForkBlockchain.js.map
|
// @flow
import path from "path";
import gensync, { type Handler } from "gensync";
import Plugin from "./plugin";
import { mergeOptions } from "./util";
import { createItemFromDescriptor } from "./item";
import {
buildRootChain,
type ConfigContext,
type FileHandling,
} from "./config-chain";
import { getEnv } from "./helpers/environment";
import {
validate,
type ValidatedOptions,
type NormalizedOptions,
type RootMode,
} from "./validation/options";
import {
findConfigUpwards,
resolveShowConfigPath,
ROOT_CONFIG_FILENAMES,
type ConfigFile,
type IgnoreFile,
} from "./files";
import { resolveTargets } from "./resolve-targets";
function* resolveRootMode(
rootDir: string,
rootMode: RootMode,
): Handler<string> {
switch (rootMode) {
case "root":
return rootDir;
case "upward-optional": {
const upwardRootDir = yield* findConfigUpwards(rootDir);
return upwardRootDir === null ? rootDir : upwardRootDir;
}
case "upward": {
const upwardRootDir = yield* findConfigUpwards(rootDir);
if (upwardRootDir !== null) return upwardRootDir;
throw Object.assign(
(new Error(
`Babel was run with rootMode:"upward" but a root could not ` +
`be found when searching upward from "${rootDir}".\n` +
`One of the following config files must be in the directory tree: ` +
`"${ROOT_CONFIG_FILENAMES.join(", ")}".`,
): any),
{
code: "BABEL_ROOT_NOT_FOUND",
dirname: rootDir,
},
);
}
default:
throw new Error(`Assertion failure - unknown rootMode value.`);
}
}
type PrivPartialConfig = {
options: NormalizedOptions,
context: ConfigContext,
fileHandling: FileHandling,
ignore: IgnoreFile | void,
babelrc: ConfigFile | void,
config: ConfigFile | void,
files: Set<string>,
};
export default function* loadPrivatePartialConfig(
inputOpts: mixed,
): Handler<PrivPartialConfig | null> {
if (
inputOpts != null &&
(typeof inputOpts !== "object" || Array.isArray(inputOpts))
) {
throw new Error("Babel options must be an object, null, or undefined");
}
const args = inputOpts ? validate("arguments", inputOpts) : {};
const {
envName = getEnv(),
cwd = ".",
root: rootDir = ".",
rootMode = "root",
caller,
cloneInputAst = true,
} = args;
const absoluteCwd = path.resolve(cwd);
const absoluteRootDir = yield* resolveRootMode(
path.resolve(absoluteCwd, rootDir),
rootMode,
);
const filename =
typeof args.filename === "string"
? path.resolve(cwd, args.filename)
: undefined;
const showConfigPath = yield* resolveShowConfigPath(absoluteCwd);
const context: ConfigContext = {
filename,
cwd: absoluteCwd,
root: absoluteRootDir,
envName,
caller,
showConfig: showConfigPath === filename,
};
const configChain = yield* buildRootChain(args, context);
if (!configChain) return null;
const merged: ValidatedOptions = {
assumptions: {},
};
configChain.options.forEach(opts => {
mergeOptions((merged: any), opts);
});
const options: NormalizedOptions = {
...merged,
targets: resolveTargets(merged, absoluteRootDir),
// Tack the passes onto the object itself so that, if this object is
// passed back to Babel a second time, it will be in the right structure
// to not change behavior.
cloneInputAst,
babelrc: false,
configFile: false,
browserslistConfigFile: false,
passPerPreset: false,
envName: context.envName,
cwd: context.cwd,
root: context.root,
rootMode: "root",
filename:
typeof context.filename === "string" ? context.filename : undefined,
plugins: configChain.plugins.map(descriptor =>
createItemFromDescriptor(descriptor),
),
presets: configChain.presets.map(descriptor =>
createItemFromDescriptor(descriptor),
),
};
return {
options,
context,
fileHandling: configChain.fileHandling,
ignore: configChain.ignore,
babelrc: configChain.babelrc,
config: configChain.config,
files: configChain.files,
};
}
type LoadPartialConfigOpts = {
showIgnoredFiles?: boolean,
...
};
export const loadPartialConfig = gensync<[any], PartialConfig | null>(
function* (opts?: LoadPartialConfigOpts): Handler<PartialConfig | null> {
let showIgnoredFiles = false;
// We only extract showIgnoredFiles if opts is an object, so that
// loadPrivatePartialConfig can throw the appropriate error if it's not.
if (typeof opts === "object" && opts !== null && !Array.isArray(opts)) {
({ showIgnoredFiles, ...opts } = opts);
}
const result: ?PrivPartialConfig = yield* loadPrivatePartialConfig(opts);
if (!result) return null;
const { options, babelrc, ignore, config, fileHandling, files } = result;
if (fileHandling === "ignored" && !showIgnoredFiles) {
return null;
}
(options.plugins || []).forEach(item => {
if (item.value instanceof Plugin) {
throw new Error(
"Passing cached plugin instances is not supported in " +
"babel.loadPartialConfig()",
);
}
});
return new PartialConfig(
options,
babelrc ? babelrc.filepath : undefined,
ignore ? ignore.filepath : undefined,
config ? config.filepath : undefined,
fileHandling,
files,
);
},
);
export type { PartialConfig };
class PartialConfig {
/**
* These properties are public, so any changes to them should be considered
* a breaking change to Babel's API.
*/
options: NormalizedOptions;
babelrc: string | void;
babelignore: string | void;
config: string | void;
fileHandling: FileHandling;
files: Set<string>;
constructor(
options: NormalizedOptions,
babelrc: string | void,
ignore: string | void,
config: string | void,
fileHandling: FileHandling,
files: Set<string>,
) {
this.options = options;
this.babelignore = ignore;
this.babelrc = babelrc;
this.config = config;
this.fileHandling = fileHandling;
this.files = files;
// Freeze since this is a public API and it should be extremely obvious that
// reassigning properties on here does nothing.
Object.freeze(this);
}
/**
* Returns true if there is a config file in the filesystem for this config.
*/
hasFilesystemConfig(): boolean {
return this.babelrc !== undefined || this.config !== undefined;
}
}
Object.freeze(PartialConfig.prototype);
|
function solve(input) {
let distanceInMeters = input
let distanceInKm = distanceInMeters/1000
console.log(distanceInKm.toFixed(2));
}
solve(1852)
|
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import SimpleITK as sitk
from platipy.imaging.utils.crop import label_to_roi, crop_to_roi
def compute_volume(label):
"""Computes the volume in cubic centimetres
Args:
label (SimpleITK.Image): A binary mask.
Returns:
float: The volume (in cubic centimetres)
"""
return sitk.GetArrayFromImage(label).sum() * np.product(label.GetSpacing()) / 1000
def compute_surface_metrics(label_a, label_b, verbose=False):
"""Compute surface distance metrics between two labels. Surface metrics computed are:
hausdorffDistance, meanSurfaceDistance, medianSurfaceDistance, maximumSurfaceDistance,
sigmaSurfaceDistance
Args:
label_a (sitk.Image): A mask to compare
label_b (sitk.Image): Another mask to compare
verbose (bool, optional): Whether to print verbose output. Defaults to False.
Returns:
dict: Dictionary object containing surface distance metrics
"""
hausdorff_distance = sitk.HausdorffDistanceImageFilter()
hausdorff_distance.Execute(label_a, label_b)
hd = hausdorff_distance.GetHausdorffDistance()
mean_sd_list = []
max_sd_list = []
std_sd_list = []
median_sd_list = []
num_points = []
for (la, lb) in ((label_a, label_b), (label_b, label_a)):
label_intensity_stat = sitk.LabelIntensityStatisticsImageFilter()
reference_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(la, squaredDistance=False, useImageSpacing=True)
)
moving_label_contour = sitk.LabelContour(lb)
label_intensity_stat.Execute(moving_label_contour, reference_distance_map)
mean_sd_list.append(label_intensity_stat.GetMean(1))
max_sd_list.append(label_intensity_stat.GetMaximum(1))
std_sd_list.append(label_intensity_stat.GetStandardDeviation(1))
median_sd_list.append(label_intensity_stat.GetMedian(1))
num_points.append(label_intensity_stat.GetNumberOfPixels(1))
if verbose:
print(" Boundary points: {0} {1}".format(num_points[0], num_points[1]))
mean_surf_dist = np.dot(mean_sd_list, num_points) / np.sum(num_points)
max_surf_dist = np.max(max_sd_list)
std_surf_dist = np.sqrt(
np.dot(
num_points,
np.add(np.square(std_sd_list), np.square(np.subtract(mean_sd_list, mean_surf_dist))),
)
)
median_surf_dist = np.mean(median_sd_list)
result = {}
result["hausdorffDistance"] = hd
result["meanSurfaceDistance"] = mean_surf_dist
result["medianSurfaceDistance"] = median_surf_dist
result["maximumSurfaceDistance"] = max_surf_dist
result["sigmaSurfaceDistance"] = std_surf_dist
return result
def compute_volume_metrics(label_a, label_b):
"""Compute volume metrics between two labels. Volume metrics computed are:
DSC, volumeOverlap fractionOverlap truePositiveFraction trueNegativeFraction
falsePositiveFraction falseNegativeFraction
Args:
label_a (sitk.Image): A mask to compare
label_b (sitk.Image): Another mask to compare
Returns:
dict: Dictionary object containing volume metrics
"""
arr_a = sitk.GetArrayFromImage(label_a).astype(bool)
arr_b = sitk.GetArrayFromImage(label_b).astype(bool)
arr_intersection = arr_a & arr_b
arr_union = arr_a | arr_b
voxel_volume = np.product(label_a.GetSpacing()) / 1000.0 # Conversion to cm^3
# 2|A & B|/(|A|+|B|)
dsc = (2.0 * arr_intersection.sum()) / (arr_a.sum() + arr_b.sum())
# |A & B|/|A | B|
frac_overlap = arr_intersection.sum() / arr_union.sum().astype(float)
vol_overlap = arr_intersection.sum() * voxel_volume
true_pos = arr_intersection.sum()
true_neg = (np.invert(arr_a) & np.invert(arr_b)).sum()
false_pos = arr_b.sum() - true_pos
false_neg = arr_a.sum() - true_pos
true_pos_frac = (1.0 * true_pos) / (true_pos + false_neg)
true_neg_frac = (1.0 * true_neg) / (true_neg + false_pos)
false_pos_frac = (1.0 * false_pos) / (true_neg + false_pos)
false_neg_frac = (1.0 * false_neg) / (true_pos + false_neg)
result = {}
result["DSC"] = dsc
result["volumeOverlap"] = vol_overlap
result["fractionOverlap"] = frac_overlap
result["truePositiveFraction"] = true_pos_frac
result["trueNegativeFraction"] = true_neg_frac
result["falsePositiveFraction"] = false_pos_frac
result["falseNegativeFraction"] = false_neg_frac
return result
def compute_metric_dsc(label_a, label_b, auto_crop=True):
"""Compute the Dice Similarity Coefficient between two labels
Args:
label_a (sitk.Image): A mask to compare
label_b (sitk.Image): Another mask to compare
Returns:
float: The Dice Similarity Coefficient
"""
if auto_crop:
largest_region = (label_a + label_b) > 0
crop_box_size, crop_box_index = label_to_roi(largest_region)
label_a = crop_to_roi(label_a, size=crop_box_size, index=crop_box_index)
label_b = crop_to_roi(label_b, size=crop_box_size, index=crop_box_index)
arr_a = sitk.GetArrayFromImage(label_a).astype(bool)
arr_b = sitk.GetArrayFromImage(label_b).astype(bool)
return 2 * ((arr_a & arr_b).sum()) / (arr_a.sum() + arr_b.sum())
def compute_metric_specificity(label_a, label_b, auto_crop=True):
"""Compute the specificity between two labels
Args:
label_a (sitk.Image): A mask to compare
label_b (sitk.Image): Another mask to compare
Returns:
float: The specificity between the two labels
"""
if auto_crop:
largest_region = (label_a + label_b) > 0
crop_box_size, crop_box_index = label_to_roi(largest_region)
label_a = crop_to_roi(label_a, size=crop_box_size, index=crop_box_index)
label_b = crop_to_roi(label_b, size=crop_box_size, index=crop_box_index)
arr_a = sitk.GetArrayFromImage(label_a).astype(bool)
arr_b = sitk.GetArrayFromImage(label_b).astype(bool)
arr_intersection = arr_a & arr_b
true_pos = arr_intersection.sum()
true_neg = (np.invert(arr_a) & np.invert(arr_b)).sum()
false_pos = arr_b.sum() - true_pos
return float((1.0 * true_neg) / (true_neg + false_pos))
def compute_metric_sensitivity(label_a, label_b, auto_crop=True):
"""Compute the sensitivity between two labels
Args:
label_a (sitk.Image): A mask to compare
label_b (sitk.Image): Another mask to compare
Returns:
float: The sensitivity between the two labels
"""
if auto_crop:
largest_region = (label_a + label_b) > 0
crop_box_size, crop_box_index = label_to_roi(largest_region)
label_a = crop_to_roi(label_a, size=crop_box_size, index=crop_box_index)
label_b = crop_to_roi(label_b, size=crop_box_size, index=crop_box_index)
arr_a = sitk.GetArrayFromImage(label_a).astype(bool)
arr_b = sitk.GetArrayFromImage(label_b).astype(bool)
arr_intersection = arr_a & arr_b
true_pos = arr_intersection.sum()
false_neg = arr_a.sum() - true_pos
return float((1.0 * true_pos) / (true_pos + false_neg))
def compute_metric_masd(label_a, label_b, auto_crop=True):
"""Compute the mean absolute distance between two labels
Args:
label_a (sitk.Image): A mask to compare
label_b (sitk.Image): Another mask to compare
Returns:
float: The mean absolute surface distance
"""
if auto_crop:
largest_region = (label_a + label_b) > 0
crop_box_size, crop_box_index = label_to_roi(largest_region)
label_a = crop_to_roi(label_a, size=crop_box_size, index=crop_box_index)
label_b = crop_to_roi(label_b, size=crop_box_size, index=crop_box_index)
mean_sd_list = []
num_points = []
for (la, lb) in ((label_a, label_b), (label_b, label_a)):
label_intensity_stat = sitk.LabelIntensityStatisticsImageFilter()
reference_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(la, squaredDistance=False, useImageSpacing=True)
)
moving_label_contour = sitk.LabelContour(lb)
label_intensity_stat.Execute(moving_label_contour, reference_distance_map)
mean_sd_list.append(label_intensity_stat.GetMean(1))
num_points.append(label_intensity_stat.GetNumberOfPixels(1))
mean_surf_dist = np.dot(mean_sd_list, num_points) / np.sum(num_points)
return float(mean_surf_dist)
def compute_metric_hd(label_a, label_b, auto_crop=True):
"""Compute the Hausdorff distance between two labels
Args:
label_a (sitk.Image): A mask to compare
label_b (sitk.Image): Another mask to compare
Returns:
float: The maximum Hausdorff distance
"""
if auto_crop:
largest_region = (label_a + label_b) > 0
crop_box_size, crop_box_index = label_to_roi(largest_region)
label_a = crop_to_roi(label_a, size=crop_box_size, index=crop_box_index)
label_b = crop_to_roi(label_b, size=crop_box_size, index=crop_box_index)
hausdorff_distance = sitk.HausdorffDistanceImageFilter()
hausdorff_distance.Execute(label_a, label_b)
hausdorff_distance_value = hausdorff_distance.GetHausdorffDistance()
return hausdorff_distance_value
|
# -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
import ddkit
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
class YOLO(object):
_defaults = {
"model_path": 'model_data/ecp-keras.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/ecp_classes_op2.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
# "selected_objects": [2, 5, 7]
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
out_boxes = [[box[1], box[0], box[3], box[2]] for box in out_boxes]
image = ddkit.draw_detections(image, out_boxes, out_classes, self.class_names, out_scores)
end = timer()
print(end - start)
elapsed_time = end - start
rgb_image = image.convert('RGB')
rgb_image.save("result.jpg")
return image # image type : <class 'PIL.JpegImagePlugin.JpegImageFile'>
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
# video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_FourCC = cv2.VideoWriter_fourcc(*'MP4V')
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
if not return_value:
break
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
# cv2.namedWindow("result", cv2.WINDOW_NORMAL)
# cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
out.release()
yolo.close_session()
|
/*
FreeRTOS V8.2.1 - Copyright (C) 2015 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
#ifndef QUEUE_H
#define QUEUE_H
#ifndef INC_FREERTOS_H
#error "include FreeRTOS.h" must appear in source files before "include queue.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* Type by which queues are referenced. For example, a call to xQueueCreate()
* returns an QueueHandle_t variable that can then be used as a parameter to
* xQueueSend(), xQueueReceive(), etc.
*/
typedef void * QueueHandle_t;
/**
* Type by which queue sets are referenced. For example, a call to
* xQueueCreateSet() returns an xQueueSet variable that can then be used as a
* parameter to xQueueSelectFromSet(), xQueueAddToSet(), etc.
*/
typedef void * QueueSetHandle_t;
/**
* Queue sets can contain both queues and semaphores, so the
* QueueSetMemberHandle_t is defined as a type to be used where a parameter or
* return value can be either an QueueHandle_t or an SemaphoreHandle_t.
*/
typedef void * QueueSetMemberHandle_t;
/* For internal use only. */
#define queueSEND_TO_BACK ( ( BaseType_t ) 0 )
#define queueSEND_TO_FRONT ( ( BaseType_t ) 1 )
#define queueOVERWRITE ( ( BaseType_t ) 2 )
/* For internal use only. These definitions *must* match those in queue.c. */
#define queueQUEUE_TYPE_BASE ( ( uint8_t ) 0U )
#define queueQUEUE_TYPE_SET ( ( uint8_t ) 0U )
#define queueQUEUE_TYPE_MUTEX ( ( uint8_t ) 1U )
#define queueQUEUE_TYPE_COUNTING_SEMAPHORE ( ( uint8_t ) 2U )
#define queueQUEUE_TYPE_BINARY_SEMAPHORE ( ( uint8_t ) 3U )
#define queueQUEUE_TYPE_RECURSIVE_MUTEX ( ( uint8_t ) 4U )
/**
* queue. h
* <pre>
QueueHandle_t xQueueCreate(
UBaseType_t uxQueueLength,
UBaseType_t uxItemSize
);
* </pre>
*
* Creates a new queue instance. This allocates the storage required by the
* new queue and returns a handle for the queue.
*
* @param uxQueueLength The maximum number of items that the queue can contain.
*
* @param uxItemSize The number of bytes each item in the queue will require.
* Items are queued by copy, not by reference, so this is the number of bytes
* that will be copied for each posted item. Each item on the queue must be
* the same size.
*
* @return If the queue is successfully create then a handle to the newly
* created queue is returned. If the queue cannot be created then 0 is
* returned.
*
* Example usage:
<pre>
struct AMessage
{
char ucMessageID;
char ucData[ 20 ];
};
void vATask( void *pvParameters )
{
QueueHandle_t xQueue1, xQueue2;
// Create a queue capable of containing 10 uint32_t values.
xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
if( xQueue1 == 0 )
{
// Queue was not created and must not be used.
}
// Create a queue capable of containing 10 pointers to AMessage structures.
// These should be passed by pointer as they contain a lot of data.
xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
if( xQueue2 == 0 )
{
// Queue was not created and must not be used.
}
// ... Rest of task code.
}
</pre>
* \defgroup xQueueCreate xQueueCreate
* \ingroup QueueManagement
*/
#define xQueueCreate( uxQueueLength, uxItemSize ) xQueueGenericCreate( uxQueueLength, uxItemSize, queueQUEUE_TYPE_BASE )
/**
* queue. h
* <pre>
BaseType_t xQueueSendToToFront(
QueueHandle_t xQueue,
const void *pvItemToQueue,
TickType_t xTicksToWait
);
* </pre>
*
* This is a macro that calls xQueueGenericSend().
*
* Post an item to the front of a queue. The item is queued by copy, not by
* reference. This function must not be called from an interrupt service
* routine. See xQueueSendFromISR () for an alternative which may be used
* in an ISR.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @param xTicksToWait The maximum amount of time the task should block
* waiting for space to become available on the queue, should it already
* be full. The call will return immediately if this is set to 0 and the
* queue is full. The time is defined in tick periods so the constant
* portTICK_PERIOD_MS should be used to convert to real time if this is required.
*
* @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
*
* Example usage:
<pre>
struct AMessage
{
char ucMessageID;
char ucData[ 20 ];
} xMessage;
uint32_t ulVar = 10UL;
void vATask( void *pvParameters )
{
QueueHandle_t xQueue1, xQueue2;
struct AMessage *pxMessage;
// Create a queue capable of containing 10 uint32_t values.
xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
// Create a queue capable of containing 10 pointers to AMessage structures.
// These should be passed by pointer as they contain a lot of data.
xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
// ...
if( xQueue1 != 0 )
{
// Send an uint32_t. Wait for 10 ticks for space to become
// available if necessary.
if( xQueueSendToFront( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
{
// Failed to post the message, even after 10 ticks.
}
}
if( xQueue2 != 0 )
{
// Send a pointer to a struct AMessage object. Don't block if the
// queue is already full.
pxMessage = & xMessage;
xQueueSendToFront( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
}
// ... Rest of task code.
}
</pre>
* \defgroup xQueueSend xQueueSend
* \ingroup QueueManagement
*/
#define xQueueSendToFront( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_FRONT )
/**
* queue. h
* <pre>
BaseType_t xQueueSendToBack(
QueueHandle_t xQueue,
const void *pvItemToQueue,
TickType_t xTicksToWait
);
* </pre>
*
* This is a macro that calls xQueueGenericSend().
*
* Post an item to the back of a queue. The item is queued by copy, not by
* reference. This function must not be called from an interrupt service
* routine. See xQueueSendFromISR () for an alternative which may be used
* in an ISR.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @param xTicksToWait The maximum amount of time the task should block
* waiting for space to become available on the queue, should it already
* be full. The call will return immediately if this is set to 0 and the queue
* is full. The time is defined in tick periods so the constant
* portTICK_PERIOD_MS should be used to convert to real time if this is required.
*
* @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
*
* Example usage:
<pre>
struct AMessage
{
char ucMessageID;
char ucData[ 20 ];
} xMessage;
uint32_t ulVar = 10UL;
void vATask( void *pvParameters )
{
QueueHandle_t xQueue1, xQueue2;
struct AMessage *pxMessage;
// Create a queue capable of containing 10 uint32_t values.
xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
// Create a queue capable of containing 10 pointers to AMessage structures.
// These should be passed by pointer as they contain a lot of data.
xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
// ...
if( xQueue1 != 0 )
{
// Send an uint32_t. Wait for 10 ticks for space to become
// available if necessary.
if( xQueueSendToBack( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
{
// Failed to post the message, even after 10 ticks.
}
}
if( xQueue2 != 0 )
{
// Send a pointer to a struct AMessage object. Don't block if the
// queue is already full.
pxMessage = & xMessage;
xQueueSendToBack( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
}
// ... Rest of task code.
}
</pre>
* \defgroup xQueueSend xQueueSend
* \ingroup QueueManagement
*/
#define xQueueSendToBack( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK )
/**
* queue. h
* <pre>
BaseType_t xQueueSend(
QueueHandle_t xQueue,
const void * pvItemToQueue,
TickType_t xTicksToWait
);
* </pre>
*
* This is a macro that calls xQueueGenericSend(). It is included for
* backward compatibility with versions of FreeRTOS.org that did not
* include the xQueueSendToFront() and xQueueSendToBack() macros. It is
* equivalent to xQueueSendToBack().
*
* Post an item on a queue. The item is queued by copy, not by reference.
* This function must not be called from an interrupt service routine.
* See xQueueSendFromISR () for an alternative which may be used in an ISR.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @param xTicksToWait The maximum amount of time the task should block
* waiting for space to become available on the queue, should it already
* be full. The call will return immediately if this is set to 0 and the
* queue is full. The time is defined in tick periods so the constant
* portTICK_PERIOD_MS should be used to convert to real time if this is required.
*
* @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
*
* Example usage:
<pre>
struct AMessage
{
char ucMessageID;
char ucData[ 20 ];
} xMessage;
uint32_t ulVar = 10UL;
void vATask( void *pvParameters )
{
QueueHandle_t xQueue1, xQueue2;
struct AMessage *pxMessage;
// Create a queue capable of containing 10 uint32_t values.
xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
// Create a queue capable of containing 10 pointers to AMessage structures.
// These should be passed by pointer as they contain a lot of data.
xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
// ...
if( xQueue1 != 0 )
{
// Send an uint32_t. Wait for 10 ticks for space to become
// available if necessary.
if( xQueueSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
{
// Failed to post the message, even after 10 ticks.
}
}
if( xQueue2 != 0 )
{
// Send a pointer to a struct AMessage object. Don't block if the
// queue is already full.
pxMessage = & xMessage;
xQueueSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
}
// ... Rest of task code.
}
</pre>
* \defgroup xQueueSend xQueueSend
* \ingroup QueueManagement
*/
#define xQueueSend( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK )
/**
* queue. h
* <pre>
BaseType_t xQueueOverwrite(
QueueHandle_t xQueue,
const void * pvItemToQueue
);
* </pre>
*
* Only for use with queues that have a length of one - so the queue is either
* empty or full.
*
* Post an item on a queue. If the queue is already full then overwrite the
* value held in the queue. The item is queued by copy, not by reference.
*
* This function must not be called from an interrupt service routine.
* See xQueueOverwriteFromISR () for an alternative which may be used in an ISR.
*
* @param xQueue The handle of the queue to which the data is being sent.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @return xQueueOverwrite() is a macro that calls xQueueGenericSend(), and
* therefore has the same return values as xQueueSendToFront(). However, pdPASS
* is the only value that can be returned because xQueueOverwrite() will write
* to the queue even when the queue is already full.
*
* Example usage:
<pre>
void vFunction( void *pvParameters )
{
QueueHandle_t xQueue;
uint32_t ulVarToSend, ulValReceived;
// Create a queue to hold one uint32_t value. It is strongly
// recommended *not* to use xQueueOverwrite() on queues that can
// contain more than one value, and doing so will trigger an assertion
// if configASSERT() is defined.
xQueue = xQueueCreate( 1, sizeof( uint32_t ) );
// Write the value 10 to the queue using xQueueOverwrite().
ulVarToSend = 10;
xQueueOverwrite( xQueue, &ulVarToSend );
// Peeking the queue should now return 10, but leave the value 10 in
// the queue. A block time of zero is used as it is known that the
// queue holds a value.
ulValReceived = 0;
xQueuePeek( xQueue, &ulValReceived, 0 );
if( ulValReceived != 10 )
{
// Error unless the item was removed by a different task.
}
// The queue is still full. Use xQueueOverwrite() to overwrite the
// value held in the queue with 100.
ulVarToSend = 100;
xQueueOverwrite( xQueue, &ulVarToSend );
// This time read from the queue, leaving the queue empty once more.
// A block time of 0 is used again.
xQueueReceive( xQueue, &ulValReceived, 0 );
// The value read should be the last value written, even though the
// queue was already full when the value was written.
if( ulValReceived != 100 )
{
// Error!
}
// ...
}
</pre>
* \defgroup xQueueOverwrite xQueueOverwrite
* \ingroup QueueManagement
*/
#define xQueueOverwrite( xQueue, pvItemToQueue ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), 0, queueOVERWRITE )
/**
* queue. h
* <pre>
BaseType_t xQueueGenericSend(
QueueHandle_t xQueue,
const void * pvItemToQueue,
TickType_t xTicksToWait
BaseType_t xCopyPosition
);
* </pre>
*
* It is preferred that the macros xQueueSend(), xQueueSendToFront() and
* xQueueSendToBack() are used in place of calling this function directly.
*
* Post an item on a queue. The item is queued by copy, not by reference.
* This function must not be called from an interrupt service routine.
* See xQueueSendFromISR () for an alternative which may be used in an ISR.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @param xTicksToWait The maximum amount of time the task should block
* waiting for space to become available on the queue, should it already
* be full. The call will return immediately if this is set to 0 and the
* queue is full. The time is defined in tick periods so the constant
* portTICK_PERIOD_MS should be used to convert to real time if this is required.
*
* @param xCopyPosition Can take the value queueSEND_TO_BACK to place the
* item at the back of the queue, or queueSEND_TO_FRONT to place the item
* at the front of the queue (for high priority messages).
*
* @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
*
* Example usage:
<pre>
struct AMessage
{
char ucMessageID;
char ucData[ 20 ];
} xMessage;
uint32_t ulVar = 10UL;
void vATask( void *pvParameters )
{
QueueHandle_t xQueue1, xQueue2;
struct AMessage *pxMessage;
// Create a queue capable of containing 10 uint32_t values.
xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
// Create a queue capable of containing 10 pointers to AMessage structures.
// These should be passed by pointer as they contain a lot of data.
xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
// ...
if( xQueue1 != 0 )
{
// Send an uint32_t. Wait for 10 ticks for space to become
// available if necessary.
if( xQueueGenericSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10, queueSEND_TO_BACK ) != pdPASS )
{
// Failed to post the message, even after 10 ticks.
}
}
if( xQueue2 != 0 )
{
// Send a pointer to a struct AMessage object. Don't block if the
// queue is already full.
pxMessage = & xMessage;
xQueueGenericSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0, queueSEND_TO_BACK );
}
// ... Rest of task code.
}
</pre>
* \defgroup xQueueSend xQueueSend
* \ingroup QueueManagement
*/
BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
/**
* queue. h
* <pre>
BaseType_t xQueuePeek(
QueueHandle_t xQueue,
void *pvBuffer,
TickType_t xTicksToWait
);</pre>
*
* This is a macro that calls the xQueueGenericReceive() function.
*
* Receive an item from a queue without removing the item from the queue.
* The item is received by copy so a buffer of adequate size must be
* provided. The number of bytes copied into the buffer was defined when
* the queue was created.
*
* Successfully received items remain on the queue so will be returned again
* by the next call, or a call to xQueueReceive().
*
* This macro must not be used in an interrupt service routine. See
* xQueuePeekFromISR() for an alternative that can be called from an interrupt
* service routine.
*
* @param xQueue The handle to the queue from which the item is to be
* received.
*
* @param pvBuffer Pointer to the buffer into which the received item will
* be copied.
*
* @param xTicksToWait The maximum amount of time the task should block
* waiting for an item to receive should the queue be empty at the time
* of the call. The time is defined in tick periods so the constant
* portTICK_PERIOD_MS should be used to convert to real time if this is required.
* xQueuePeek() will return immediately if xTicksToWait is 0 and the queue
* is empty.
*
* @return pdTRUE if an item was successfully received from the queue,
* otherwise pdFALSE.
*
* Example usage:
<pre>
struct AMessage
{
char ucMessageID;
char ucData[ 20 ];
} xMessage;
QueueHandle_t xQueue;
// Task to create a queue and post a value.
void vATask( void *pvParameters )
{
struct AMessage *pxMessage;
// Create a queue capable of containing 10 pointers to AMessage structures.
// These should be passed by pointer as they contain a lot of data.
xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) );
if( xQueue == 0 )
{
// Failed to create the queue.
}
// ...
// Send a pointer to a struct AMessage object. Don't block if the
// queue is already full.
pxMessage = & xMessage;
xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 );
// ... Rest of task code.
}
// Task to peek the data from the queue.
void vADifferentTask( void *pvParameters )
{
struct AMessage *pxRxedMessage;
if( xQueue != 0 )
{
// Peek a message on the created queue. Block for 10 ticks if a
// message is not immediately available.
if( xQueuePeek( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) )
{
// pcRxedMessage now points to the struct AMessage variable posted
// by vATask, but the item still remains on the queue.
}
}
// ... Rest of task code.
}
</pre>
* \defgroup xQueueReceive xQueueReceive
* \ingroup QueueManagement
*/
#define xQueuePeek( xQueue, pvBuffer, xTicksToWait ) xQueueGenericReceive( ( xQueue ), ( pvBuffer ), ( xTicksToWait ), pdTRUE )
/**
* queue. h
* <pre>
BaseType_t xQueuePeekFromISR(
QueueHandle_t xQueue,
void *pvBuffer,
);</pre>
*
* A version of xQueuePeek() that can be called from an interrupt service
* routine (ISR).
*
* Receive an item from a queue without removing the item from the queue.
* The item is received by copy so a buffer of adequate size must be
* provided. The number of bytes copied into the buffer was defined when
* the queue was created.
*
* Successfully received items remain on the queue so will be returned again
* by the next call, or a call to xQueueReceive().
*
* @param xQueue The handle to the queue from which the item is to be
* received.
*
* @param pvBuffer Pointer to the buffer into which the received item will
* be copied.
*
* @return pdTRUE if an item was successfully received from the queue,
* otherwise pdFALSE.
*
* \defgroup xQueuePeekFromISR xQueuePeekFromISR
* \ingroup QueueManagement
*/
BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
/**
* queue. h
* <pre>
BaseType_t xQueueReceive(
QueueHandle_t xQueue,
void *pvBuffer,
TickType_t xTicksToWait
);</pre>
*
* This is a macro that calls the xQueueGenericReceive() function.
*
* Receive an item from a queue. The item is received by copy so a buffer of
* adequate size must be provided. The number of bytes copied into the buffer
* was defined when the queue was created.
*
* Successfully received items are removed from the queue.
*
* This function must not be used in an interrupt service routine. See
* xQueueReceiveFromISR for an alternative that can.
*
* @param xQueue The handle to the queue from which the item is to be
* received.
*
* @param pvBuffer Pointer to the buffer into which the received item will
* be copied.
*
* @param xTicksToWait The maximum amount of time the task should block
* waiting for an item to receive should the queue be empty at the time
* of the call. xQueueReceive() will return immediately if xTicksToWait
* is zero and the queue is empty. The time is defined in tick periods so the
* constant portTICK_PERIOD_MS should be used to convert to real time if this is
* required.
*
* @return pdTRUE if an item was successfully received from the queue,
* otherwise pdFALSE.
*
* Example usage:
<pre>
struct AMessage
{
char ucMessageID;
char ucData[ 20 ];
} xMessage;
QueueHandle_t xQueue;
// Task to create a queue and post a value.
void vATask( void *pvParameters )
{
struct AMessage *pxMessage;
// Create a queue capable of containing 10 pointers to AMessage structures.
// These should be passed by pointer as they contain a lot of data.
xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) );
if( xQueue == 0 )
{
// Failed to create the queue.
}
// ...
// Send a pointer to a struct AMessage object. Don't block if the
// queue is already full.
pxMessage = & xMessage;
xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 );
// ... Rest of task code.
}
// Task to receive from the queue.
void vADifferentTask( void *pvParameters )
{
struct AMessage *pxRxedMessage;
if( xQueue != 0 )
{
// Receive a message on the created queue. Block for 10 ticks if a
// message is not immediately available.
if( xQueueReceive( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) )
{
// pcRxedMessage now points to the struct AMessage variable posted
// by vATask.
}
}
// ... Rest of task code.
}
</pre>
* \defgroup xQueueReceive xQueueReceive
* \ingroup QueueManagement
*/
#define xQueueReceive( xQueue, pvBuffer, xTicksToWait ) xQueueGenericReceive( ( xQueue ), ( pvBuffer ), ( xTicksToWait ), pdFALSE )
/**
* queue. h
* <pre>
BaseType_t xQueueGenericReceive(
QueueHandle_t xQueue,
void *pvBuffer,
TickType_t xTicksToWait
BaseType_t xJustPeek
);</pre>
*
* It is preferred that the macro xQueueReceive() be used rather than calling
* this function directly.
*
* Receive an item from a queue. The item is received by copy so a buffer of
* adequate size must be provided. The number of bytes copied into the buffer
* was defined when the queue was created.
*
* This function must not be used in an interrupt service routine. See
* xQueueReceiveFromISR for an alternative that can.
*
* @param xQueue The handle to the queue from which the item is to be
* received.
*
* @param pvBuffer Pointer to the buffer into which the received item will
* be copied.
*
* @param xTicksToWait The maximum amount of time the task should block
* waiting for an item to receive should the queue be empty at the time
* of the call. The time is defined in tick periods so the constant
* portTICK_PERIOD_MS should be used to convert to real time if this is required.
* xQueueGenericReceive() will return immediately if the queue is empty and
* xTicksToWait is 0.
*
* @param xJustPeek When set to true, the item received from the queue is not
* actually removed from the queue - meaning a subsequent call to
* xQueueReceive() will return the same item. When set to false, the item
* being received from the queue is also removed from the queue.
*
* @return pdTRUE if an item was successfully received from the queue,
* otherwise pdFALSE.
*
* Example usage:
<pre>
struct AMessage
{
char ucMessageID;
char ucData[ 20 ];
} xMessage;
QueueHandle_t xQueue;
// Task to create a queue and post a value.
void vATask( void *pvParameters )
{
struct AMessage *pxMessage;
// Create a queue capable of containing 10 pointers to AMessage structures.
// These should be passed by pointer as they contain a lot of data.
xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) );
if( xQueue == 0 )
{
// Failed to create the queue.
}
// ...
// Send a pointer to a struct AMessage object. Don't block if the
// queue is already full.
pxMessage = & xMessage;
xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 );
// ... Rest of task code.
}
// Task to receive from the queue.
void vADifferentTask( void *pvParameters )
{
struct AMessage *pxRxedMessage;
if( xQueue != 0 )
{
// Receive a message on the created queue. Block for 10 ticks if a
// message is not immediately available.
if( xQueueGenericReceive( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) )
{
// pcRxedMessage now points to the struct AMessage variable posted
// by vATask.
}
}
// ... Rest of task code.
}
</pre>
* \defgroup xQueueReceive xQueueReceive
* \ingroup QueueManagement
*/
BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeek ) PRIVILEGED_FUNCTION;
/**
* queue. h
* <pre>UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue );</pre>
*
* Return the number of messages stored in a queue.
*
* @param xQueue A handle to the queue being queried.
*
* @return The number of messages available in the queue.
*
* \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting
* \ingroup QueueManagement
*/
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
/**
* queue. h
* <pre>UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue );</pre>
*
* Return the number of free spaces available in a queue. This is equal to the
* number of items that can be sent to the queue before the queue becomes full
* if no items are removed.
*
* @param xQueue A handle to the queue being queried.
*
* @return The number of spaces available in the queue.
*
* \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting
* \ingroup QueueManagement
*/
UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
/**
* queue. h
* <pre>void vQueueDelete( QueueHandle_t xQueue );</pre>
*
* Delete a queue - freeing all the memory allocated for storing of items
* placed on the queue.
*
* @param xQueue A handle to the queue to be deleted.
*
* \defgroup vQueueDelete vQueueDelete
* \ingroup QueueManagement
*/
void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
/**
* queue. h
* <pre>
BaseType_t xQueueSendToFrontFromISR(
QueueHandle_t xQueue,
const void *pvItemToQueue,
BaseType_t *pxHigherPriorityTaskWoken
);
</pre>
*
* This is a macro that calls xQueueGenericSendFromISR().
*
* Post an item to the front of a queue. It is safe to use this macro from
* within an interrupt service routine.
*
* Items are queued by copy not reference so it is preferable to only
* queue small items, especially when called from an ISR. In most cases
* it would be preferable to store a pointer to the item being queued.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @param pxHigherPriorityTaskWoken xQueueSendToFrontFromISR() will set
* *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
* to unblock, and the unblocked task has a priority higher than the currently
* running task. If xQueueSendToFromFromISR() sets this value to pdTRUE then
* a context switch should be requested before the interrupt is exited.
*
* @return pdTRUE if the data was successfully sent to the queue, otherwise
* errQUEUE_FULL.
*
* Example usage for buffered IO (where the ISR can obtain more than one value
* per call):
<pre>
void vBufferISR( void )
{
char cIn;
BaseType_t xHigherPrioritTaskWoken;
// We have not woken a task at the start of the ISR.
xHigherPriorityTaskWoken = pdFALSE;
// Loop until the buffer is empty.
do
{
// Obtain a byte from the buffer.
cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
// Post the byte.
xQueueSendToFrontFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
} while( portINPUT_BYTE( BUFFER_COUNT ) );
// Now the buffer is empty we can switch context if necessary.
if( xHigherPriorityTaskWoken )
{
taskYIELD ();
}
}
</pre>
*
* \defgroup xQueueSendFromISR xQueueSendFromISR
* \ingroup QueueManagement
*/
#define xQueueSendToFrontFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_FRONT )
/**
* queue. h
* <pre>
BaseType_t xQueueSendToBackFromISR(
QueueHandle_t xQueue,
const void *pvItemToQueue,
BaseType_t *pxHigherPriorityTaskWoken
);
</pre>
*
* This is a macro that calls xQueueGenericSendFromISR().
*
* Post an item to the back of a queue. It is safe to use this macro from
* within an interrupt service routine.
*
* Items are queued by copy not reference so it is preferable to only
* queue small items, especially when called from an ISR. In most cases
* it would be preferable to store a pointer to the item being queued.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @param pxHigherPriorityTaskWoken xQueueSendToBackFromISR() will set
* *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
* to unblock, and the unblocked task has a priority higher than the currently
* running task. If xQueueSendToBackFromISR() sets this value to pdTRUE then
* a context switch should be requested before the interrupt is exited.
*
* @return pdTRUE if the data was successfully sent to the queue, otherwise
* errQUEUE_FULL.
*
* Example usage for buffered IO (where the ISR can obtain more than one value
* per call):
<pre>
void vBufferISR( void )
{
char cIn;
BaseType_t xHigherPriorityTaskWoken;
// We have not woken a task at the start of the ISR.
xHigherPriorityTaskWoken = pdFALSE;
// Loop until the buffer is empty.
do
{
// Obtain a byte from the buffer.
cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
// Post the byte.
xQueueSendToBackFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
} while( portINPUT_BYTE( BUFFER_COUNT ) );
// Now the buffer is empty we can switch context if necessary.
if( xHigherPriorityTaskWoken )
{
taskYIELD ();
}
}
</pre>
*
* \defgroup xQueueSendFromISR xQueueSendFromISR
* \ingroup QueueManagement
*/
#define xQueueSendToBackFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK )
/**
* queue. h
* <pre>
BaseType_t xQueueOverwriteFromISR(
QueueHandle_t xQueue,
const void * pvItemToQueue,
BaseType_t *pxHigherPriorityTaskWoken
);
* </pre>
*
* A version of xQueueOverwrite() that can be used in an interrupt service
* routine (ISR).
*
* Only for use with queues that can hold a single item - so the queue is either
* empty or full.
*
* Post an item on a queue. If the queue is already full then overwrite the
* value held in the queue. The item is queued by copy, not by reference.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @param pxHigherPriorityTaskWoken xQueueOverwriteFromISR() will set
* *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
* to unblock, and the unblocked task has a priority higher than the currently
* running task. If xQueueOverwriteFromISR() sets this value to pdTRUE then
* a context switch should be requested before the interrupt is exited.
*
* @return xQueueOverwriteFromISR() is a macro that calls
* xQueueGenericSendFromISR(), and therefore has the same return values as
* xQueueSendToFrontFromISR(). However, pdPASS is the only value that can be
* returned because xQueueOverwriteFromISR() will write to the queue even when
* the queue is already full.
*
* Example usage:
<pre>
QueueHandle_t xQueue;
void vFunction( void *pvParameters )
{
// Create a queue to hold one uint32_t value. It is strongly
// recommended *not* to use xQueueOverwriteFromISR() on queues that can
// contain more than one value, and doing so will trigger an assertion
// if configASSERT() is defined.
xQueue = xQueueCreate( 1, sizeof( uint32_t ) );
}
void vAnInterruptHandler( void )
{
// xHigherPriorityTaskWoken must be set to pdFALSE before it is used.
BaseType_t xHigherPriorityTaskWoken = pdFALSE;
uint32_t ulVarToSend, ulValReceived;
// Write the value 10 to the queue using xQueueOverwriteFromISR().
ulVarToSend = 10;
xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken );
// The queue is full, but calling xQueueOverwriteFromISR() again will still
// pass because the value held in the queue will be overwritten with the
// new value.
ulVarToSend = 100;
xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken );
// Reading from the queue will now return 100.
// ...
if( xHigherPrioritytaskWoken == pdTRUE )
{
// Writing to the queue caused a task to unblock and the unblocked task
// has a priority higher than or equal to the priority of the currently
// executing task (the task this interrupt interrupted). Perform a context
// switch so this interrupt returns directly to the unblocked task.
portYIELD_FROM_ISR(); // or portEND_SWITCHING_ISR() depending on the port.
}
}
</pre>
* \defgroup xQueueOverwriteFromISR xQueueOverwriteFromISR
* \ingroup QueueManagement
*/
#define xQueueOverwriteFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueOVERWRITE )
/**
* queue. h
* <pre>
BaseType_t xQueueSendFromISR(
QueueHandle_t xQueue,
const void *pvItemToQueue,
BaseType_t *pxHigherPriorityTaskWoken
);
</pre>
*
* This is a macro that calls xQueueGenericSendFromISR(). It is included
* for backward compatibility with versions of FreeRTOS.org that did not
* include the xQueueSendToBackFromISR() and xQueueSendToFrontFromISR()
* macros.
*
* Post an item to the back of a queue. It is safe to use this function from
* within an interrupt service routine.
*
* Items are queued by copy not reference so it is preferable to only
* queue small items, especially when called from an ISR. In most cases
* it would be preferable to store a pointer to the item being queued.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @param pxHigherPriorityTaskWoken xQueueSendFromISR() will set
* *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
* to unblock, and the unblocked task has a priority higher than the currently
* running task. If xQueueSendFromISR() sets this value to pdTRUE then
* a context switch should be requested before the interrupt is exited.
*
* @return pdTRUE if the data was successfully sent to the queue, otherwise
* errQUEUE_FULL.
*
* Example usage for buffered IO (where the ISR can obtain more than one value
* per call):
<pre>
void vBufferISR( void )
{
char cIn;
BaseType_t xHigherPriorityTaskWoken;
// We have not woken a task at the start of the ISR.
xHigherPriorityTaskWoken = pdFALSE;
// Loop until the buffer is empty.
do
{
// Obtain a byte from the buffer.
cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
// Post the byte.
xQueueSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
} while( portINPUT_BYTE( BUFFER_COUNT ) );
// Now the buffer is empty we can switch context if necessary.
if( xHigherPriorityTaskWoken )
{
// Actual macro used here is port specific.
portYIELD_FROM_ISR ();
}
}
</pre>
*
* \defgroup xQueueSendFromISR xQueueSendFromISR
* \ingroup QueueManagement
*/
#define xQueueSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK )
/**
* queue. h
* <pre>
BaseType_t xQueueGenericSendFromISR(
QueueHandle_t xQueue,
const void *pvItemToQueue,
BaseType_t *pxHigherPriorityTaskWoken,
BaseType_t xCopyPosition
);
</pre>
*
* It is preferred that the macros xQueueSendFromISR(),
* xQueueSendToFrontFromISR() and xQueueSendToBackFromISR() be used in place
* of calling this function directly. xQueueGiveFromISR() is an
* equivalent for use by semaphores that don't actually copy any data.
*
* Post an item on a queue. It is safe to use this function from within an
* interrupt service routine.
*
* Items are queued by copy not reference so it is preferable to only
* queue small items, especially when called from an ISR. In most cases
* it would be preferable to store a pointer to the item being queued.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @param pxHigherPriorityTaskWoken xQueueGenericSendFromISR() will set
* *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
* to unblock, and the unblocked task has a priority higher than the currently
* running task. If xQueueGenericSendFromISR() sets this value to pdTRUE then
* a context switch should be requested before the interrupt is exited.
*
* @param xCopyPosition Can take the value queueSEND_TO_BACK to place the
* item at the back of the queue, or queueSEND_TO_FRONT to place the item
* at the front of the queue (for high priority messages).
*
* @return pdTRUE if the data was successfully sent to the queue, otherwise
* errQUEUE_FULL.
*
* Example usage for buffered IO (where the ISR can obtain more than one value
* per call):
<pre>
void vBufferISR( void )
{
char cIn;
BaseType_t xHigherPriorityTaskWokenByPost;
// We have not woken a task at the start of the ISR.
xHigherPriorityTaskWokenByPost = pdFALSE;
// Loop until the buffer is empty.
do
{
// Obtain a byte from the buffer.
cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
// Post each byte.
xQueueGenericSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWokenByPost, queueSEND_TO_BACK );
} while( portINPUT_BYTE( BUFFER_COUNT ) );
// Now the buffer is empty we can switch context if necessary. Note that the
// name of the yield function required is port specific.
if( xHigherPriorityTaskWokenByPost )
{
taskYIELD_YIELD_FROM_ISR();
}
}
</pre>
*
* \defgroup xQueueSendFromISR xQueueSendFromISR
* \ingroup QueueManagement
*/
BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
/**
* queue. h
* <pre>
BaseType_t xQueueReceiveFromISR(
QueueHandle_t xQueue,
void *pvBuffer,
BaseType_t *pxTaskWoken
);
* </pre>
*
* Receive an item from a queue. It is safe to use this function from within an
* interrupt service routine.
*
* @param xQueue The handle to the queue from which the item is to be
* received.
*
* @param pvBuffer Pointer to the buffer into which the received item will
* be copied.
*
* @param pxTaskWoken A task may be blocked waiting for space to become
* available on the queue. If xQueueReceiveFromISR causes such a task to
* unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will
* remain unchanged.
*
* @return pdTRUE if an item was successfully received from the queue,
* otherwise pdFALSE.
*
* Example usage:
<pre>
QueueHandle_t xQueue;
// Function to create a queue and post some values.
void vAFunction( void *pvParameters )
{
char cValueToPost;
const TickType_t xTicksToWait = ( TickType_t )0xff;
// Create a queue capable of containing 10 characters.
xQueue = xQueueCreate( 10, sizeof( char ) );
if( xQueue == 0 )
{
// Failed to create the queue.
}
// ...
// Post some characters that will be used within an ISR. If the queue
// is full then this task will block for xTicksToWait ticks.
cValueToPost = 'a';
xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
cValueToPost = 'b';
xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
// ... keep posting characters ... this task may block when the queue
// becomes full.
cValueToPost = 'c';
xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
}
// ISR that outputs all the characters received on the queue.
void vISR_Routine( void )
{
BaseType_t xTaskWokenByReceive = pdFALSE;
char cRxedChar;
while( xQueueReceiveFromISR( xQueue, ( void * ) &cRxedChar, &xTaskWokenByReceive) )
{
// A character was received. Output the character now.
vOutputCharacter( cRxedChar );
// If removing the character from the queue woke the task that was
// posting onto the queue cTaskWokenByReceive will have been set to
// pdTRUE. No matter how many times this loop iterates only one
// task will be woken.
}
if( cTaskWokenByPost != ( char ) pdFALSE;
{
taskYIELD ();
}
}
</pre>
* \defgroup xQueueReceiveFromISR xQueueReceiveFromISR
* \ingroup QueueManagement
*/
BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
/*
* Utilities to query queues that are safe to use from an ISR. These utilities
* should be used only from witin an ISR, or within a critical section.
*/
BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
/*
* xQueueAltGenericSend() is an alternative version of xQueueGenericSend().
* Likewise xQueueAltGenericReceive() is an alternative version of
* xQueueGenericReceive().
*
* The source code that implements the alternative (Alt) API is much
* simpler because it executes everything from within a critical section.
* This is the approach taken by many other RTOSes, but FreeRTOS.org has the
* preferred fully featured API too. The fully featured API has more
* complex code that takes longer to execute, but makes much less use of
* critical sections. Therefore the alternative API sacrifices interrupt
* responsiveness to gain execution speed, whereas the fully featured API
* sacrifices execution speed to ensure better interrupt responsiveness.
*/
BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition );
BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking );
#define xQueueAltSendToFront( xQueue, pvItemToQueue, xTicksToWait ) xQueueAltGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_FRONT )
#define xQueueAltSendToBack( xQueue, pvItemToQueue, xTicksToWait ) xQueueAltGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK )
#define xQueueAltReceive( xQueue, pvBuffer, xTicksToWait ) xQueueAltGenericReceive( ( xQueue ), ( pvBuffer ), ( xTicksToWait ), pdFALSE )
#define xQueueAltPeek( xQueue, pvBuffer, xTicksToWait ) xQueueAltGenericReceive( ( xQueue ), ( pvBuffer ), ( xTicksToWait ), pdTRUE )
/*
* The functions defined above are for passing data to and from tasks. The
* functions below are the equivalents for passing data to and from
* co-routines.
*
* These functions are called from the co-routine macro implementation and
* should not be called directly from application code. Instead use the macro
* wrappers defined within croutine.h.
*/
BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken );
BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxTaskWoken );
BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait );
BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait );
/*
* For internal use only. Use xSemaphoreCreateMutex(),
* xSemaphoreCreateCounting() or xSemaphoreGetMutexHolder() instead of calling
* these functions directly.
*/
QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) PRIVILEGED_FUNCTION;
QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) PRIVILEGED_FUNCTION;
void* xQueueGetMutexHolder( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION;
/*
* For internal use only. Use xSemaphoreTakeMutexRecursive() or
* xSemaphoreGiveMutexRecursive() instead of calling these functions directly.
*/
BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
BaseType_t xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) PRIVILEGED_FUNCTION;
/*
* Reset a queue back to its original empty state. The return value is now
* obsolete and is always set to pdPASS.
*/
#define xQueueReset( xQueue ) xQueueGenericReset( xQueue, pdFALSE )
/*
* The registry is provided as a means for kernel aware debuggers to
* locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add
* a queue, semaphore or mutex handle to the registry if you want the handle
* to be available to a kernel aware debugger. If you are not using a kernel
* aware debugger then this function can be ignored.
*
* configQUEUE_REGISTRY_SIZE defines the maximum number of handles the
* registry can hold. configQUEUE_REGISTRY_SIZE must be greater than 0
* within FreeRTOSConfig.h for the registry to be available. Its value
* does not effect the number of queues, semaphores and mutexes that can be
* created - just the number that the registry can hold.
*
* @param xQueue The handle of the queue being added to the registry. This
* is the handle returned by a call to xQueueCreate(). Semaphore and mutex
* handles can also be passed in here.
*
* @param pcName The name to be associated with the handle. This is the
* name that the kernel aware debugger will display. The queue registry only
* stores a pointer to the string - so the string must be persistent (global or
* preferably in ROM/Flash), not on the stack.
*/
#if configQUEUE_REGISTRY_SIZE > 0
void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcName ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
#endif
/*
* The registry is provided as a means for kernel aware debuggers to
* locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add
* a queue, semaphore or mutex handle to the registry if you want the handle
* to be available to a kernel aware debugger, and vQueueUnregisterQueue() to
* remove the queue, semaphore or mutex from the register. If you are not using
* a kernel aware debugger then this function can be ignored.
*
* @param xQueue The handle of the queue being removed from the registry.
*/
#if configQUEUE_REGISTRY_SIZE > 0
void vQueueUnregisterQueue( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
#endif
/*
* Generic version of the queue creation function, which is in turn called by
* any queue, semaphore or mutex creation function or macro.
*/
QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION;
#if( configNO_MALLOC )
QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType, void* buffer ) PRIVILEGED_FUNCTION;
#endif /* configNO_MALLOC */
/*
* Queue sets provide a mechanism to allow a task to block (pend) on a read
* operation from multiple queues or semaphores simultaneously.
*
* See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this
* function.
*
* A queue set must be explicitly created using a call to xQueueCreateSet()
* before it can be used. Once created, standard FreeRTOS queues and semaphores
* can be added to the set using calls to xQueueAddToSet().
* xQueueSelectFromSet() is then used to determine which, if any, of the queues
* or semaphores contained in the set is in a state where a queue read or
* semaphore take operation would be successful.
*
* Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html
* for reasons why queue sets are very rarely needed in practice as there are
* simpler methods of blocking on multiple objects.
*
* Note 2: Blocking on a queue set that contains a mutex will not cause the
* mutex holder to inherit the priority of the blocked task.
*
* Note 3: An additional 4 bytes of RAM is required for each space in a every
* queue added to a queue set. Therefore counting semaphores that have a high
* maximum count value should not be added to a queue set.
*
* Note 4: A receive (in the case of a queue) or take (in the case of a
* semaphore) operation must not be performed on a member of a queue set unless
* a call to xQueueSelectFromSet() has first returned a handle to that set member.
*
* @param uxEventQueueLength Queue sets store events that occur on
* the queues and semaphores contained in the set. uxEventQueueLength specifies
* the maximum number of events that can be queued at once. To be absolutely
* certain that events are not lost uxEventQueueLength should be set to the
* total sum of the length of the queues added to the set, where binary
* semaphores and mutexes have a length of 1, and counting semaphores have a
* length set by their maximum count value. Examples:
* + If a queue set is to hold a queue of length 5, another queue of length 12,
* and a binary semaphore, then uxEventQueueLength should be set to
* (5 + 12 + 1), or 18.
* + If a queue set is to hold three binary semaphores then uxEventQueueLength
* should be set to (1 + 1 + 1 ), or 3.
* + If a queue set is to hold a counting semaphore that has a maximum count of
* 5, and a counting semaphore that has a maximum count of 3, then
* uxEventQueueLength should be set to (5 + 3), or 8.
*
* @return If the queue set is created successfully then a handle to the created
* queue set is returned. Otherwise NULL is returned.
*/
QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) PRIVILEGED_FUNCTION;
/*
* Adds a queue or semaphore to a queue set that was previously created by a
* call to xQueueCreateSet().
*
* See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this
* function.
*
* Note 1: A receive (in the case of a queue) or take (in the case of a
* semaphore) operation must not be performed on a member of a queue set unless
* a call to xQueueSelectFromSet() has first returned a handle to that set member.
*
* @param xQueueOrSemaphore The handle of the queue or semaphore being added to
* the queue set (cast to an QueueSetMemberHandle_t type).
*
* @param xQueueSet The handle of the queue set to which the queue or semaphore
* is being added.
*
* @return If the queue or semaphore was successfully added to the queue set
* then pdPASS is returned. If the queue could not be successfully added to the
* queue set because it is already a member of a different queue set then pdFAIL
* is returned.
*/
BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
/*
* Removes a queue or semaphore from a queue set. A queue or semaphore can only
* be removed from a set if the queue or semaphore is empty.
*
* See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this
* function.
*
* @param xQueueOrSemaphore The handle of the queue or semaphore being removed
* from the queue set (cast to an QueueSetMemberHandle_t type).
*
* @param xQueueSet The handle of the queue set in which the queue or semaphore
* is included.
*
* @return If the queue or semaphore was successfully removed from the queue set
* then pdPASS is returned. If the queue was not in the queue set, or the
* queue (or semaphore) was not empty, then pdFAIL is returned.
*/
BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
/*
* xQueueSelectFromSet() selects from the members of a queue set a queue or
* semaphore that either contains data (in the case of a queue) or is available
* to take (in the case of a semaphore). xQueueSelectFromSet() effectively
* allows a task to block (pend) on a read operation on all the queues and
* semaphores in a queue set simultaneously.
*
* See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this
* function.
*
* Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html
* for reasons why queue sets are very rarely needed in practice as there are
* simpler methods of blocking on multiple objects.
*
* Note 2: Blocking on a queue set that contains a mutex will not cause the
* mutex holder to inherit the priority of the blocked task.
*
* Note 3: A receive (in the case of a queue) or take (in the case of a
* semaphore) operation must not be performed on a member of a queue set unless
* a call to xQueueSelectFromSet() has first returned a handle to that set member.
*
* @param xQueueSet The queue set on which the task will (potentially) block.
*
* @param xTicksToWait The maximum time, in ticks, that the calling task will
* remain in the Blocked state (with other tasks executing) to wait for a member
* of the queue set to be ready for a successful queue read or semaphore take
* operation.
*
* @return xQueueSelectFromSet() will return the handle of a queue (cast to
* a QueueSetMemberHandle_t type) contained in the queue set that contains data,
* or the handle of a semaphore (cast to a QueueSetMemberHandle_t type) contained
* in the queue set that is available, or NULL if no such queue or semaphore
* exists before before the specified block time expires.
*/
QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
/*
* A version of xQueueSelectFromSet() that can be used from an ISR.
*/
QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
/* Not public API functions. */
void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) PRIVILEGED_FUNCTION;
void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) PRIVILEGED_FUNCTION;
UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
#ifdef __cplusplus
}
#endif
#endif /* QUEUE_H */
|
import React from "react"
const PlayerTable = ({ players }) => (
<div className="table-container u-grow">
<table className="table is-fullwidth">
<thead>
<tr>
<th>Pos</th>
<th>Name</th>
<th>Sp</th>
<th>S</th>
<th>N</th>
<th>Score</th>
</tr>
</thead>
<tbody>
{players.map(
({
player: { id, name },
score,
won,
lost,
gamesPlayed,
position,
}) => {
return (
<tr key={id}>
<td>{position}</td>
<td>{name}</td>
<td>{gamesPlayed}</td>
<td>{won}</td>
<td>{lost}</td>
<td>{score}</td>
</tr>
)
}
)}
</tbody>
</table>
</div>
)
export default PlayerTable
|
/*!
* OpenUI5
* (c) Copyright 2009-2019 SAP SE or an SAP affiliate company.
* Licensed under the Apache License, Version 2.0 - see LICENSE.txt.
*/
sap.ui.define(["sap/ui/support/library"],function(e){"use strict";var t=e.Categories,i=e.Severity,a=e.Audiences;var n={id:"exclusiveValueDateValueBindingRule",audiences:[a.Control],categories:[t.Bindings],enabled:true,minversion:"1.28",title:"DatePicker: Only one of the value or dateValue properties can be bound",description:"Only one of the value or dateValue properties can be bound",resolution:"Choose and bind one of the properties value or dateValue. They both serve the same purpose",resolutionurls:[{text:"SAP Fiori Design Guidelines: DatePicker",href:"https://experience.sap.com/fiori-design-web/date-picker/"}],check:function(e,t,a){a.getElementsByClassName("sap.m.DatePicker").forEach(function(t){if(t.getBinding("value")&&t.getBinding("dateValue")){var a=t.getId(),n=t.getMetadata().getElementName();e.addIssue({severity:i.High,details:"DatePicker '"+n+"' ("+a+") has both value and dataValue properties bound.",context:{id:a}})}})}};var s={id:"dateTimeBindingConstraintRule",audiences:[a.Control],categories:[t.Bindings],enabled:true,minversion:"1.28",title:"DatePicker: sap.ui.model.odata.type.DateTime value binding should use displayFormat:'Date' constraint",description:"sap.ui.model.odata.type.DateTime value binding should use displayFormat:'Date' constraint",resolution:"If you are using binding type sap.ui.model.odata.type.DateTime you also need to specify binding constraint like this:\n"+"value: {path : 'path_to_value', type : 'sap.ui.model.odata.type.DateTime', constraints : {displayFormat : 'Date'}}",resolutionurls:[{text:"SAP Fiori Design Guidelines: DatePicker",href:"https://experience.sap.com/fiori-design-web/date-picker/"}],check:function(e,t,a){a.getElementsByClassName("sap.m.DatePicker").forEach(function(t){var a=t.getBinding("value");if(a&&a.getType()instanceof sap.ui.model.odata.type.DateTime&&(!a.getType().oConstraints||!a.getType().oConstraints.isDateOnly)){var n=t.getId(),s=t.getMetadata().getElementName();e.addIssue({severity:i.High,details:"DatePicker '"+s+"' ("+n+") is bound to a model of type sap.ui.model.odata.type.DateTime and the displayFormat is not 'Date'",context:{id:n}})}})}};var o={id:"jsonValueBindingIsCorrect",audiences:[a.Control],categories:[t.Bindings],enabled:true,minversion:"1.28",title:"DatePicker: Binding type sap.ui.model.odata.type.Date is not correct for JSON binding",description:"sap.ui.model.odata.type.Date is not correct for JSON binding. The correct type is sap.ui.model.type.Date",resolution:"Use binding type sap.ui.model.type.Date for JSON binding",resolutionurls:[{text:"SAP Fiori Design Guidelines: DatePicker",href:"https://experience.sap.com/fiori-design-web/date-picker/"}],check:function(e,t,a){a.getElementsByClassName("sap.m.DatePicker").forEach(function(t){var a=t.getBinding("value");if(a&&t.getModel()instanceof sap.ui.model.json.JSONModel&&a.getType()instanceof sap.ui.model.odata.type.Date){var n=t.getId(),s=t.getMetadata().getElementName();e.addIssue({severity:i.Medium,details:"DatePicker '"+s+"' ("+n+") is bound to a model of type sap.ui.model.odata.type.Date but it should be sap.ui.model.type.Date",context:{id:n}})}})}};var r={id:"dateValueHasHoursMinutesSeconds",audiences:[a.Control],categories:[t.Usage],enabled:true,minversion:"1.28",title:"DatePicker: dateValue has hours, minutes or seconds",description:"The dateValue contains JS Date object with hours, minutes and seconds different than 0, 0, 0, local time - warхing.",resolution:"Do not set hours, minutes and seconds, when you set dateValue",resolutionurls:[{text:"SAP Fiori Design Guidelines: DatePicker",href:"https://experience.sap.com/fiori-design-web/date-picker/"}],check:function(e,t,a){a.getElementsByClassName("sap.m.DatePicker").forEach(function(t){var a=t.getDateValue();if(a&&(a.getHours()||a.getMinutes()||a.getSeconds())){var n=t.getId(),s=t.getMetadata().getElementName();e.addIssue({severity:i.Medium,details:"DatePicker '"+s+"' ("+n+")'s dateValue has hours, minutes or seconds set",context:{id:n}})}})}};return[n,s,o,r]},true);
|
from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from api.base import views
from api.base import settings
from api.base import versioning
default_version = versioning.decimal_version_to_url_path(settings.REST_FRAMEWORK['DEFAULT_VERSION'])
# Please keep URLs alphabetized for auto-generated documentation
urlpatterns = [
url(
r'^_/',
include(
[
url(r'^', include('waffle.urls')),
url(r'^wb/', include('api.wb.urls', namespace='wb')),
url(r'^banners/', include('api.banners.urls', namespace='banners')),
url(r'^crossref/', include('api.crossref.urls', namespace='crossref')),
url(r'^chronos/', include('api.chronos.urls', namespace='chronos')),
url(r'^meetings/', include('api.meetings.urls', namespace='meetings')),
url(r'^metrics/', include('api.metrics.urls', namespace='metrics')),
],
),
),
url(
'^(?P<version>(v2))/',
include(
[
url(r'^$', views.root, name='root'),
url(r'^status/', views.status_check, name='status_check'),
url(r'^actions/', include('api.actions.urls', namespace='actions')),
url(r'^addons/', include('api.addons.urls', namespace='addons')),
url(r'^alerts/', include(('api.alerts.urls', 'alerts'), namespace='alerts')),
url(r'^applications/', include('api.applications.urls', namespace='applications')),
url(r'^brands/', include('api.brands.urls', namespace='brands')),
url(r'^citations/', include('api.citations.urls', namespace='citations')),
url(r'^collections/', include('api.collections.urls', namespace='collections')),
url(r'^comments/', include('api.comments.urls', namespace='comments')),
url(r'^docs/', RedirectView.as_view(pattern_name=views.root), name='redirect-to-root', kwargs={'version': default_version}),
url(r'^draft_nodes/', include('api.draft_nodes.urls', namespace='draft_nodes')),
url(r'^draft_registrations/', include('api.draft_registrations.urls', namespace='draft_registrations')),
url(r'^files/', include('api.files.urls', namespace='files')),
url(r'^groups/', include('api.osf_groups.urls', namespace='groups')),
url(r'^guids/', include('api.guids.urls', namespace='guids')),
url(r'^identifiers/', include('api.identifiers.urls', namespace='identifiers')),
url(r'^institutions/', include('api.institutions.urls', namespace='institutions')),
url(r'^licenses/', include('api.licenses.urls', namespace='licenses')),
url(r'^logs/', include('api.logs.urls', namespace='logs')),
url(r'^metaschemas/', include('api.metaschemas.urls', namespace='metaschemas')),
url(r'^schemas/', include('api.schemas.urls', namespace='schemas')),
url(r'^nodes/', include('api.nodes.urls', namespace='nodes')),
url(r'^preprints/', include('api.preprints.urls', namespace='preprints')),
url(r'^preprint_providers/', include('api.preprint_providers.urls', namespace='preprint_providers')),
url(r'^regions/', include('api.regions.urls', namespace='regions')),
url(r'^providers/', include('api.providers.urls', namespace='providers')),
url(r'^registrations/', include('api.registrations.urls', namespace='registrations')),
url(r'^requests/', include(('api.requests.urls', 'requests'), namespace='requests')),
url(r'^scopes/', include('api.scopes.urls', namespace='scopes')),
url(r'^search/', include('api.search.urls', namespace='search')),
url(r'^sparse/', include('api.sparse.urls', namespace='sparse')),
url(r'^subjects/', include('api.subjects.urls', namespace='subjects')),
url(r'^subscriptions/', include('api.subscriptions.urls', namespace='subscriptions')),
url(r'^taxonomies/', include('api.taxonomies.urls', namespace='taxonomies')),
url(r'^test/', include('api.test.urls', namespace='test')),
url(r'^tokens/', include('api.tokens.urls', namespace='tokens')),
url(r'^users/', include('api.users.urls', namespace='users')),
url(r'^view_only_links/', include('api.view_only_links.urls', namespace='view-only-links')),
url(r'^wikis/', include('api.wikis.urls', namespace='wikis')),
url(r'^_waffle/', include(('api.waffle.urls', 'waffle'), namespace='waffle')),
],
),
),
url(r'^$', RedirectView.as_view(pattern_name=views.root), name='redirect-to-root', kwargs={'version': default_version}),
]
# Add django-silk URLs if it's in INSTALLED_APPS
if 'silk' in settings.INSTALLED_APPS:
urlpatterns += [
url(r'^silk/', include('silk.urls', namespace='silk')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
handler404 = views.error_404
|
/*
gpiBuddy.c
GameSpy Presence SDK
Dan "Mr. Pants" Schoenblum
Copyright 1999-2007 GameSpy Industries, Inc
devsupport@gamespy.com
***********************************************************************
Please see the GameSpy Presence SDK documentation for more information
**********************************************************************/
//INCLUDES
//////////
#include <stdlib.h>
#include <string.h>
#include "gpi.h"
//FUNCTIONS
///////////
static GPResult
gpiSendAuthBuddyRequest(
GPConnection * connection,
GPIProfile * profile
)
{
GPIConnection * iconnection = (GPIConnection*)*connection;
// Send the auth.
/////////////////
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\authadd\\");
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\sesskey\\");
gpiAppendIntToBuffer(connection, &iconnection->outputBuffer, iconnection->sessKey);
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\fromprofileid\\");
gpiAppendIntToBuffer(connection, &iconnection->outputBuffer, profile->profileId);
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\sig\\");
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, profile->authSig);
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\final\\");
return GP_NO_ERROR;
}
GPResult
gpiProcessRecvBuddyMessage(
GPConnection * connection,
const char * input
)
{
char buffer[4096];
int type;
int profileid;
time_t date;
GPICallback callback;
GPIProfile * profile;
GPIBuddyStatus * buddyStatus;
char intValue[16];
char * str;
unsigned short port;
int productID;
GPIConnection * iconnection = (GPIConnection*)*connection;
char strTemp[max(GP_STATUS_STRING_LEN, GP_LOCATION_STRING_LEN)];
// Check the type of bm.
////////////////////////
if(!gpiValueForKey(input, "\\bm\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
type = atoi(buffer);
// Get the profile this is from.
////////////////////////////////
if(!gpiValueForKey(input, "\\f\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
profileid = atoi(buffer);
// Get the time.
////////////////
if(!gpiValueForKey(input, "\\date\\", buffer, sizeof(buffer)))
date = time(NULL);
else
date = atoi(buffer);
// What type of message is this?
////////////////////////////////
switch(type)
{
case GPI_BM_MESSAGE:
// Call the callback.
/////////////////////
callback = iconnection->callbacks[GPI_RECV_BUDDY_MESSAGE];
if(callback.callback != NULL)
{
GPRecvBuddyMessageArg * arg;
arg = (GPRecvBuddyMessageArg *)gsimalloc(sizeof(GPRecvBuddyMessageArg));
if(arg == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
if(!gpiValueForKey(input, "\\msg\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
#ifndef GSI_UNICODE
arg->message = (char *)gsimalloc(strlen(buffer) + 1);
if(arg->message == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
strcpy(arg->message, buffer);
arg->profile = (GPProfile)profileid;
arg->date = (unsigned int)date;
#else
arg->message = (unsigned short*)gsimalloc(strlen(buffer)*2+2);
if(arg->message == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
UTF8ToUCS2String(buffer, arg->message);
arg->profile = (GPProfile)profileid;
arg->date = (unsigned int)date;
#endif
CHECK_RESULT(gpiAddCallback(connection, callback, arg, NULL, GPI_ADD_MESSAGE));
}
break;
case GPI_BM_UTM:
// Call the callback.
/////////////////////
callback = iconnection->callbacks[GPI_RECV_BUDDY_UTM];
if(callback.callback != NULL)
{
GPRecvBuddyUTMArg * arg;
arg = (GPRecvBuddyUTMArg *)gsimalloc(sizeof(GPRecvBuddyUTMArg));
if(arg == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
if(!gpiValueForKey(input, "\\msg\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
#ifndef GSI_UNICODE
arg->message = (char *)gsimalloc(strlen(buffer) + 1);
if(arg->message == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
strcpy(arg->message, buffer);
arg->profile = (GPProfile)profileid;
arg->date = (unsigned int)date;
#else
arg->message = (unsigned short*)gsimalloc(strlen(buffer)*2+2);
if(arg->message == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
UTF8ToUCS2String(buffer, arg->message);
arg->profile = (GPProfile)profileid;
arg->date = (unsigned int)date;
#endif
CHECK_RESULT(gpiAddCallback(connection, callback, arg, NULL, GPI_ADD_BUDDYUTM));
}
break;
case GPI_BM_REQUEST:
// Get the profile, adding if needed.
/////////////////////////////////////
profile = gpiProfileListAdd(connection, profileid);
if(!profile)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
// Get the reason.
//////////////////
if(!gpiValueForKey(input, "\\msg\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
// Find where the sig starts.
/////////////////////////////
str = strstr(buffer, "|signed|");
if(str == NULL)
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
// Get the sig out of the message.
//////////////////////////////////
*str = '\0';
str += 8;
if(strlen(str) != 32)
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
freeclear(profile->authSig);
profile->authSig = goastrdup(str);
profile->requestCount++;
// Call the callback.
/////////////////////
callback = iconnection->callbacks[GPI_RECV_BUDDY_REQUEST];
if(callback.callback != NULL)
{
GPRecvBuddyRequestArg * arg;
arg = (GPRecvBuddyRequestArg *)gsimalloc(sizeof(GPRecvBuddyRequestArg));
if(arg == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
#ifndef GSI_UNICODE
strzcpy(arg->reason, buffer, GP_REASON_LEN);
#else
UTF8ToUCS2String(buffer, arg->reason);
#endif
arg->profile = (GPProfile)profileid;
arg->date = (unsigned int)date;
CHECK_RESULT(gpiAddCallback(connection, callback, arg, NULL, GPI_ADD_BUDDDYREQUEST));
}
break;
case GPI_BM_AUTH:
// call the callback
callback = iconnection->callbacks[GPI_RECV_BUDDY_AUTH];
if(callback.callback != NULL)
{
GPRecvBuddyAuthArg * arg;
arg = (GPRecvBuddyAuthArg *)gsimalloc(sizeof(GPRecvBuddyAuthArg));
if (arg == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
arg->profile = (GPProfile)profileid;
arg->date = (unsigned int)date;
CHECK_RESULT(gpiAddCallback(connection, callback, arg, NULL, GPI_ADD_BUDDYAUTH));
}
break;
case GPI_BM_REVOKE:
// call the callback
callback = iconnection->callbacks[GPI_RECV_BUDDY_REVOKE];
if(callback.callback != NULL)
{
GPRecvBuddyRevokeArg * arg;
arg = (GPRecvBuddyRevokeArg *)gsimalloc(sizeof(GPRecvBuddyRevokeArg));
if (arg == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
arg->profile = (GPProfile)profileid;
arg->date = (unsigned int)date;
CHECK_RESULT(gpiAddCallback(connection, callback, arg, NULL, GPI_ADD_BUDDYREVOKE));
}
break;
case GPI_BM_STATUS:
// Get the profile, adding if needed.
/////////////////////////////////////
profile = gpiProfileListAdd(connection, profileid);
if(!profile)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
// Make sure profile wasn't blocked prior to getting the status update
//////////////////////////////////////////////////////////////////////
if (!profile->blocked)
{
// This is a buddy.
///////////////////
if(!profile->buddyStatus)
{
profile->buddyStatus = (GPIBuddyStatus *)gsimalloc(sizeof(GPIBuddyStatus));
if(!profile->buddyStatus)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
memset(profile->buddyStatus, 0, sizeof(GPIBuddyStatus));
if (profile->buddyStatusInfo)
{
profile->buddyStatus->buddyIndex = profile->buddyStatusInfo->buddyIndex;
gpiRemoveBuddyStatusInfo(profile->buddyStatusInfo);
profile->buddyStatusInfo = NULL;
}
else
profile->buddyStatus->buddyIndex = iconnection->profileList.numBuddies++;
}
// Get the buddy status.
////////////////////////
buddyStatus = profile->buddyStatus;
// Get the msg.
///////////////
if(!gpiValueForKey(input, "\\msg\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
// Get the status.
//////////////////
if(!gpiValueForKey(buffer, "|s|", intValue, sizeof(intValue)))
{
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
}
else
{
buddyStatus->status = (GPEnum)atoi(intValue);
}
// Get the status string.
/////////////////////////
freeclear(buddyStatus->statusString);
if(!gpiValueForKey(buffer, "|ss|", strTemp, GP_STATUS_STRING_LEN))
strTemp[0] = '\0';
buddyStatus->statusString = goastrdup(strTemp);
if(!buddyStatus->statusString)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
// Get the location string.
///////////////////////////
freeclear(buddyStatus->locationString);
if(!gpiValueForKey(buffer, "|ls|", strTemp, GP_LOCATION_STRING_LEN))
strTemp[0] = '\0';
buddyStatus->locationString = goastrdup(strTemp);
if(!buddyStatus->locationString)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
// Get the ip.
//////////////
if(!gpiValueForKey(buffer, "|ip|", intValue, sizeof(intValue)))
buddyStatus->ip = 0;
else
buddyStatus->ip = htonl((unsigned int)atoi(intValue));
// Get the port.
////////////////
if(!gpiValueForKey(buffer, "|p|", intValue, sizeof(intValue)))
buddyStatus->port = 0;
else
{
port = (unsigned short)atoi(intValue);
buddyStatus->port = htons(port);
}
// Get the quiet mode flags.
////////////////////////////
if(!gpiValueForKey(buffer, "|qm|", intValue, sizeof(intValue)))
buddyStatus->quietModeFlags = GP_SILENCE_NONE;
else
buddyStatus->quietModeFlags = (GPEnum)atoi(intValue);
// Call the callback.
/////////////////////
callback = iconnection->callbacks[GPI_RECV_BUDDY_STATUS];
if(callback.callback != NULL)
{
GPRecvBuddyStatusArg * arg;
arg = (GPRecvBuddyStatusArg *)gsimalloc(sizeof(GPRecvBuddyStatusArg));
if(arg == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
arg->profile = (GPProfile)profileid;
arg->index = buddyStatus->buddyIndex;
arg->date = (unsigned int)date;
CHECK_RESULT(gpiAddCallback(connection, callback, arg, NULL, GPI_ADD_STATUS));
}
}
break;
case GPI_BM_INVITE:
// Get the msg.
///////////////
if(!gpiValueForKey(input, "\\msg\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
// Find the productid.
//////////////////////
str = strstr(buffer, "|p|");
if(str == NULL)
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
// Skip the |p|.
////////////////
str += 3;
if(str[0] == '\0')
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
// Get the productid.
/////////////////////
productID = atoi(str);
// Find the location string (optional - older versions won't have)
str = strstr(buffer, "|l|");
if(str != NULL)
strzcpy(strTemp, (str+3), sizeof(strTemp));
else
strTemp[0] = '\0'; // no location, set to empty string
// Call the callback.
/////////////////////
callback = iconnection->callbacks[GPI_RECV_GAME_INVITE];
if(callback.callback != NULL)
{
GPRecvGameInviteArg * arg;
arg = (GPRecvGameInviteArg *)gsimalloc(sizeof(GPRecvGameInviteArg));
if(arg == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
arg->profile = (GPProfile)profileid;
arg->productID = productID;
#ifdef GSI_UNICODE
AsciiToUCS2String(strTemp, arg->location);
#else
strcpy(arg->location, strTemp);
#endif
CHECK_RESULT(gpiAddCallback(connection, callback, arg, NULL, 0));
}
break;
case GPI_BM_PING:
// Get the msg.
///////////////
if(!gpiValueForKey(input, "\\msg\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
// Send back a pong.
////////////////////
gpiSendBuddyMessage(connection, profileid, GPI_BM_PONG, "1", 0, NULL);
break;
#ifndef NOFILE
case GPI_BM_PONG:
// Lets the transfers handle this.
//////////////////////////////////
gpiTransfersHandlePong(connection, profileid, NULL);
break;
#endif
}
return GP_NO_ERROR;
}
GPResult gpiProcessRecvBuddyStatusInfo(GPConnection *connection, const char *input)
{
char buffer[1024];
int profileid;
time_t date;
GPICallback callback;
GPIProfile * profile;
GPIBuddyStatusInfo * buddyStatusInfo;
GPIConnection * iconnection = (GPIConnection*)*connection;
// This is what the message should look like. Its broken up for easy viewing.
//
// "\bsi\\state\\profile\\bip\\bport\\hostip\\hprivip\"
// "\qport\\hport\\sessflags\\rstatus\\gameType\"
// "\gameVnt\\gameMn\\product\\qmodeflags\"
////////////////////////////////
date = time(NULL);
// Get the buddy's profile
////////////////////////////////
if(!gpiValueForKey(input, "\\profile\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
profileid = atoi(buffer);
// Get the profile from the SDK's list, adding it if needed.
/////////////////////////////////////
profile = gpiProfileListAdd(connection, profileid);
if(!profile)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
// Make sure profile wasn't blocked prior to getting the status update
//////////////////////////////////////////////////////////////////////
if (!profile->blocked)
{
// This is a buddy.
///////////////////
if(!profile->buddyStatusInfo)
{
profile->buddyStatusInfo = (GPIBuddyStatusInfo *)gsimalloc(sizeof(GPIBuddyStatusInfo));
if(!profile->buddyStatusInfo)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
memset(profile->buddyStatusInfo, 0, sizeof(GPIBuddyStatusInfo));
if (profile->buddyStatus)
{
profile->buddyStatusInfo->buddyIndex = profile->buddyStatus->buddyIndex;
gpiRemoveBuddyStatus(profile->buddyStatus);
profile->buddyStatus = NULL;
}
else
profile->buddyStatusInfo->buddyIndex = iconnection->profileList.numBuddies++;
profile->buddyStatusInfo->extendedInfoKeys = ArrayNew(sizeof(GPIKey), GPI_INITIAL_NUM_KEYS, gpiStatusInfoKeyFree);
if (!profile->buddyStatusInfo->extendedInfoKeys)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
}
// extract the buddy status information and
// fill in appropriate information.
/////////////////////////////////////////////
buddyStatusInfo = profile->buddyStatusInfo;
if (!gpiValueForKey(input, "\\state\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->statusState = (GPEnum)atoi(buffer);
if (!gpiValueForKey(input, "\\bip\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->buddyIp = htonl((unsigned int)atoi(buffer));
if (!gpiValueForKey(input, "\\bport\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->buddyPort = (unsigned short)atoi(buffer);
if (!gpiValueForKey(input, "\\hostip\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->hostIp = htonl((unsigned int)atoi(buffer));
if (!gpiValueForKey(input, "\\hprivip\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->hostPrivateIp = htonl((unsigned int)atoi(buffer));
if (!gpiValueForKey(input, "\\qport\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->queryPort = (unsigned short)atoi(buffer);
if (!gpiValueForKey(input, "\\hport\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->hostPort = (unsigned short)atoi(buffer);
if (!gpiValueForKey(input, "\\sessflags\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->sessionFlags = (unsigned int)atoi(buffer);
freeclear(buddyStatusInfo->richStatus);
if (!gpiValueForKey(input, "\\rstatus\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->richStatus = goastrdup(buffer);
freeclear(buddyStatusInfo->gameType);
if (!gpiValueForKey(input, "\\gameType\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->gameType = goastrdup(buffer);
freeclear(buddyStatusInfo->gameVariant);
if (!gpiValueForKey(input, "\\gameVnt\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->gameVariant = goastrdup(buffer);
freeclear(buddyStatusInfo->gameMapName);
if (!gpiValueForKey(input, "\\gameMn\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->gameMapName = goastrdup(buffer);
if (!gpiValueForKey(input, "\\product\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->productId = (int)atoi(buffer);
if (!gpiValueForKey(input, "\\qmodeflags\\", buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
buddyStatusInfo->quietModeFlags = (GPEnum)atoi(buffer);
callback = iconnection->callbacks[GPI_RECV_BUDDY_STATUS];
if (callback.callback != NULL)
{
GPRecvBuddyStatusArg *anArg;
anArg = (GPRecvBuddyStatusArg *)gsimalloc(sizeof(GPRecvBuddyStatusArg));
if (anArg == NULL)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
anArg->date = (unsigned int)date;
anArg->index = buddyStatusInfo->buddyIndex;
anArg->profile = profileid;
CHECK_RESULT(gpiAddCallback(connection, callback, anArg, NULL, 0));
}
}
return GP_NO_ERROR;
}
GPResult
gpiProcessRecvBuddyList(
GPConnection * connection,
const char * input
)
{
int i=0, j=0;
int num = 0;
int index = 0;
char c;
char *str = NULL;
char buffer[512];
GPIProfile * profile;
GPProfile profileid;
GPIConnection * iconnection = (GPIConnection*)*connection;
// Check for an error.
//////////////////////
if(gpiCheckForError(connection, input, GPITrue))
return GP_SERVER_ERROR;
// Process Buddy List Retrieval msg - Format like:
/* ===============================================
\bdy\<num in list>\list\<block list - comma delimited>\final\
=============================================== */
if(!gpiValueForKeyWithIndex(input, "\\bdy\\", &index, buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
num = atoi(buffer);
// Check to make sure list is there
///////////////////////////////////
str = strstr(input, "\\list\\");
if (str == NULL)
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
// Then increment index to get ready for parsing
////////////////////////////////////////////////
str += 6;
index += 6;
for (i=0; i < num; i++)
{
if (i==0)
{
// Manually grab first profile in list - comma delimiter
////////////////////////////////////////////////////////
for(j=0 ; (j < sizeof(buffer)) && ((c = str[j]) != '\0') && (c != ',') ; j++)
{
buffer[j] = c;
}
buffer[j] = '\0';
index += j;
}
else
{
if(!gpiValueForKeyWithIndex(input, ",", &index, buffer, sizeof(buffer)))
CallbackFatalError(connection, GP_NETWORK_ERROR, GP_PARSE, "Unexpected data was received from the server.");
}
profileid = atoi(buffer);
// Get the profile, adding if needed.
/////////////////////////////////////
profile = gpiProfileListAdd(connection, profileid);
if(!profile)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
// Mark as offline buddy for now until we get the real status
/////////////////////////////////////////////////////////////
#ifdef GP_NEW_STATUS_INFO
// Use new status info as placeholder
profile->buddyStatusInfo = (GPIBuddyStatusInfo *)gsimalloc(sizeof(GPIBuddyStatusInfo));
if(!profile->buddyStatusInfo)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
memset(profile->buddyStatusInfo, 0, sizeof(GPIBuddyStatusInfo));
profile->buddyStatusInfo->extendedInfoKeys = ArrayNew(sizeof(GPIKey), GPI_INITIAL_NUM_KEYS, gpiStatusInfoKeyFree);
if (!profile->buddyStatusInfo->extendedInfoKeys)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
profile->buddyStatusInfo->buddyIndex = iconnection->profileList.numBuddies++;
profile->buddyStatusInfo->statusState = GP_OFFLINE;
#else
// Use buddy status as placeholder
profile->buddyStatus = (GPIBuddyStatus *)gsimalloc(sizeof(GPIBuddyStatus));
if(!profile->buddyStatus)
Error(connection, GP_MEMORY_ERROR, "Out of memory.");
memset(profile->buddyStatus, 0, sizeof(GPIBuddyStatus));
profile->buddyStatus->buddyIndex = iconnection->profileList.numBuddies++;
profile->buddyStatus->status = GP_OFFLINE;
#endif
}
return GP_NO_ERROR;
}
GPResult
gpiSendServerBuddyMessage(
GPConnection * connection,
int profileid,
int type,
const char * message
)
{
char buffer[3501];
GPIConnection * iconnection = (GPIConnection*)*connection;
// Copy the message into an internal buffer.
////////////////////////////////////////////
strzcpy(buffer, message, sizeof(buffer));
// Setup the message.
/////////////////////
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\bm\\");
gpiAppendIntToBuffer(connection, &iconnection->outputBuffer, type);
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\sesskey\\");
gpiAppendIntToBuffer(connection, &iconnection->outputBuffer, iconnection->sessKey);
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\t\\");
gpiAppendIntToBuffer(connection, &iconnection->outputBuffer, profileid);
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\msg\\");
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, buffer);
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\final\\");
return GP_NO_ERROR;
}
GPResult
gpiSendBuddyMessage(
GPConnection * connection,
int profileid,
int type,
const char * message,
int sendOption,
GPIPeerOp *peerOp
)
{
GPIPeer * peer;
GPIProfile * profile;
//GPIConnection *iconnection = (GPIConnection *)*connection;
peer = gpiGetPeerByProfile(connection, profileid);
if(!peer)
{
// Check if we should send this through the server.
////////////////////////////////////////////////////
if(!gpiGetProfile(connection, profileid, &profile) ||
(!profile->buddyStatusInfo || !profile->buddyStatusInfo->buddyPort))
{
if (sendOption == GP_DONT_ROUTE)
return GP_NETWORK_ERROR;
return gpiSendServerBuddyMessage(connection, profileid, type, message);
}
// Create a new peer connection for this message.
/////////////////////////////////////////////////
peer = gpiAddPeer(connection, profileid, GPITrue);
if(!peer)
return GP_MEMORY_ERROR;
// Check if we need a sig.
//////////////////////////
if(!profile->peerSig)
{
// Get the sig.
///////////////
CHECK_RESULT(gpiPeerGetSig(connection, peer));
}
else
{
// Try to connect to the peer.
//////////////////////////////
CHECK_RESULT(gpiPeerStartConnect(connection, peer));
}
}
else if (peer->state == GPI_PEER_DISCONNECTED)
{
if (gpiGetProfile(connection, profileid, &profile))
{
// clear the buddy port to prevent future messages from
// being sent via UDP layer
if (profile->buddyStatusInfo)
profile->buddyStatusInfo->buddyPort = 0;
// send the message through the server
if (sendOption == GP_DONT_ROUTE)
return GP_NETWORK_ERROR;
if (type < 100)
return gpiSendServerBuddyMessage(connection, profileid, type, message);
}
}
if (peerOp)
{
gpiPeerAddOp(peer, peerOp);
}
// Copy the message.
////////////////////
CHECK_RESULT(gpiPeerAddMessage(connection, peer, type, message));
return GP_NO_ERROR;
}
GPResult gpiBuddyHandleKeyRequest(GPConnection *connection, GPIPeer *peer)
{
char *message;
// get all the keys and put them in the message part of bm
//////////////////////////////////////////////////////////
CHECK_RESULT(gpiSaveKeysToBuffer(connection, &message));
// Done in case we haven't set any keys
if (message == NULL)
message = "";
CHECK_RESULT(gpiSendBuddyMessage(connection, peer->profile, GPI_BM_KEYS_REPLY, message, GP_DONT_ROUTE, NULL));
if (strcmp(message, "")!= 0)
freeclear(message);
return GP_NO_ERROR;
}
GPResult gpiBuddyHandleKeyReply(GPConnection *connection, GPIPeer *peer, char *buffer)
{
GPIProfile *pProfile;
// Get the profile object to store the keys internally
//////////////////////////////////////////////////////
if(!gpiGetProfile(connection, peer->profile, &pProfile))
Error(connection, GP_PARAMETER_ERROR, "Invalid profile.");
// calculate the B64Decoded string len
if (strcmp(buffer, "") == 0)
{
GPIPeerOp *anIterator;
for (anIterator = peer->peerOpQueue.first; anIterator != NULL; anIterator = anIterator->next)
if (anIterator->type == GPI_BM_KEYS_REQUEST)
break;
if (!anIterator)
{
return GP_NO_ERROR;
}
else if (anIterator->type == GPI_BM_KEYS_REQUEST && anIterator->callback)
{
GPGetBuddyStatusInfoKeysArg *arg = (GPGetBuddyStatusInfoKeysArg *)gsimalloc(sizeof(GPGetBuddyStatusInfoKeysArg));
GPICallback callback;
callback.callback = anIterator->callback;
callback.param = anIterator->userData;
arg->keys = NULL;
arg->numKeys = 0;
arg->values = NULL;
arg->profile = peer->profile;
gpiAddCallback(connection, callback, arg, NULL, 0);
gpiPeerRemoveOp(peer, anIterator);
}
}
else
{
int decodedLen = 0,
index = 0, numKeys, i;
char keyName[512];
char keyVal[512];
char decodeKey[512];
char decodeVal[512];
gsi_char **keys;
gsi_char **values;
GPIPeerOp *anIterator;
char *checkKey = NULL;
// start by getting the number of keys
gpiReadKeyAndValue(connection, buffer, &index, keyName, keyVal);
// do not continue further if the header is missing
if (strcmp(keyName, "keys") != 0)
CallbackError(connection, GP_NETWORK_ERROR, GP_PARSE, "Error reading keys reply message");
numKeys = atoi(keyVal);
if (numKeys == 0)
{
GPIPeerOp *anIterator;
for (anIterator = peer->peerOpQueue.first; anIterator != NULL; anIterator = anIterator->next)
if (anIterator->type == GPI_BM_KEYS_REQUEST)
break;
if (!anIterator)
{
return GP_NO_ERROR;
}
else if (anIterator->type == GPI_BM_KEYS_REQUEST && anIterator->callback)
{
GPGetBuddyStatusInfoKeysArg *arg = (GPGetBuddyStatusInfoKeysArg *)gsimalloc(sizeof(GPGetBuddyStatusInfoKeysArg));
GPICallback callback;
callback.callback = anIterator->callback;
callback.param = anIterator->userData;
arg->keys = NULL;
arg->numKeys = 0;
arg->values = NULL;
arg->profile = peer->profile;
gpiAddCallback(connection, callback, arg, NULL, 0);
gpiPeerRemoveOp(peer, anIterator);
}
}
else
{
keys = (gsi_char **)gsimalloc(sizeof(gsi_char *) * numKeys);
values = (gsi_char **)gsimalloc(sizeof(gsi_char *) * numKeys);
for (i = 0; i < numKeys; i++)
{
gpiReadKeyAndValue(connection, buffer, &index, keyName, keyVal);
B64Decode(keyName, decodeKey, (int)strlen(keyName), &decodedLen, 2);
decodeKey[decodedLen] = '\0';
B64Decode(keyVal, decodeVal, (int)strlen(keyVal), &decodedLen, 2);
decodeVal[decodedLen] = '\0';
#ifdef GSI_UNICODE
keys[i] = UTF8ToUCS2StringAlloc(decodeKey);
values[i]= UTF8ToUCS2StringAlloc(decodeVal);
#else
keys[i] = goastrdup(decodeKey);
values[i] = goastrdup(decodeVal);
#endif
if (gpiStatusInfoCheckKey(connection, pProfile->buddyStatusInfo->extendedInfoKeys, decodeKey, &checkKey) == GP_NO_ERROR
&& checkKey == NULL)
{
gpiStatusInfoAddKey(connection, pProfile->buddyStatusInfo->extendedInfoKeys, decodeKey, decodeVal);
}
else
{
gpiStatusInfoSetKey(connection, pProfile->buddyStatusInfo->extendedInfoKeys, decodeKey, decodeVal);
}
}
for (anIterator = peer->peerOpQueue.first; anIterator != NULL; anIterator = anIterator->next)
if (anIterator->type == GPI_BM_KEYS_REQUEST)
break;
if (!anIterator)
{
return GP_NO_ERROR;
}
else if (anIterator->type == GPI_BM_KEYS_REQUEST && anIterator->callback)
{
GPICallback callback;
GPGetBuddyStatusInfoKeysArg *arg = (GPGetBuddyStatusInfoKeysArg *)gsimalloc(sizeof(GPGetBuddyStatusInfoKeysArg));
callback.callback = anIterator->callback;
callback.param = anIterator->userData;
// allocate a key array that points to each extended info key for that player
arg->numKeys = numKeys;
arg->keys = keys;
arg->values = values;
arg->profile = peer->profile;
gpiAddCallback(connection, callback, arg, NULL, GPI_ADD_BUDDYKEYS);
gpiPeerRemoveOp(peer, anIterator);
}
}
}
return GP_NO_ERROR;
}
GPResult gpiAuthBuddyRequest
(
GPConnection * connection,
GPProfile profile
)
{
GPIProfile * pProfile;
GPIConnection * iconnection = (GPIConnection*)*connection;
// Get the profile object.
//////////////////////////
if(!gpiGetProfile(connection, profile, &pProfile))
Error(connection, GP_PARAMETER_ERROR, "Invalid profile.");
// Check for a valid sig.
/////////////////////////
if(!pProfile->authSig)
Error(connection, GP_PARAMETER_ERROR, "Invalid profile.");
// Send the request.
////////////////////
CHECK_RESULT(gpiSendAuthBuddyRequest(connection, pProfile));
// freeclear the sig if no more requests.
////////////////////////////////////
pProfile->requestCount--;
if(!iconnection->infoCaching && (pProfile->requestCount <= 0))
{
freeclear(pProfile->authSig);
if(gpiCanFreeProfile(pProfile))
gpiRemoveProfile(connection, pProfile);
}
return GP_NO_ERROR;
}
GPIBool
gpiFixBuddyIndices(
GPConnection * connection,
GPIProfile * profile,
void * data
)
{
#ifndef _PS2
int baseIndex = (int)(unsigned long)data;
#else
int baseIndex = (int)data;
#endif
GSI_UNUSED(connection);
if(profile->buddyStatus && (profile->buddyStatus->buddyIndex > baseIndex))
profile->buddyStatus->buddyIndex--;
else if (profile->buddyStatusInfo && profile->buddyStatusInfo->buddyIndex > baseIndex)
profile->buddyStatusInfo->buddyIndex--;
return GPITrue;
}
GPResult
gpiDeleteBuddy(
GPConnection * connection,
GPProfile profile,
GPIBool sendServerRequest
)
{
GPIProfile * pProfile;
GPIConnection * iconnection = (GPIConnection*)*connection;
int index;
// Get the profile object.
//////////////////////////
if(!gpiGetProfile(connection, profile, &pProfile))
Error(connection, GP_PARAMETER_ERROR, "Invalid profile.");
// Check that this is a buddy.
//////////////////////////////
// Removed - 092404 BED - User could be a buddy even though we don't have the status
//if(!pProfile->buddyStatus)
// Error(connection, GP_PARAMETER_ERROR, "Profile not a buddy.");
// Send the request.
////////////////////
if (GPITrue == sendServerRequest)
{
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\delbuddy\\");
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\sesskey\\");
gpiAppendIntToBuffer(connection, &iconnection->outputBuffer, iconnection->sessKey);
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\delprofileid\\");
gpiAppendIntToBuffer(connection, &iconnection->outputBuffer, pProfile->profileId);
gpiAppendStringToBuffer(connection, &iconnection->outputBuffer, "\\final\\");
}
// Need to fix up the buddy indexes.
////////////////////////////////////
if (pProfile->buddyStatus)
{
index = pProfile->buddyStatus->buddyIndex;
assert(index >= 0);
freeclear(pProfile->buddyStatus->statusString);
freeclear(pProfile->buddyStatus->locationString);
freeclear(pProfile->buddyStatus);
if(gpiCanFreeProfile(pProfile))
gpiRemoveProfile(connection, pProfile);
iconnection->profileList.numBuddies--;
assert(iconnection->profileList.numBuddies >= 0);
#ifndef _PS2
gpiProfileMap(connection, gpiFixBuddyIndices, (void *)(unsigned long)index);
#else
gpiProfileMap(connection, gpiFixBuddyIndices, (void *)index);
#endif
}
if (pProfile->buddyStatusInfo)
{
index = pProfile->buddyStatusInfo->buddyIndex;
assert(index >= 0);
freeclear(pProfile->buddyStatusInfo->richStatus);
freeclear(pProfile->buddyStatusInfo->gameType);
freeclear(pProfile->buddyStatusInfo->gameVariant);
freeclear(pProfile->buddyStatusInfo->gameMapName);
freeclear(pProfile->buddyStatusInfo);
if (pProfile->buddyStatusInfo->extendedInfoKeys)
{
ArrayFree(pProfile->buddyStatusInfo->extendedInfoKeys);
pProfile->buddyStatusInfo->extendedInfoKeys = NULL;
}
if(gpiCanFreeProfile(pProfile))
gpiRemoveProfile(connection, pProfile);
iconnection->profileList.numBuddies--;
assert(iconnection->profileList.numBuddies >= 0);
#ifndef _PS2
gpiProfileMap(connection, gpiFixBuddyIndices, (void *)(unsigned long)index);
#else
gpiProfileMap(connection, gpiFixBuddyIndices, (void *)index);
#endif
}
return GP_NO_ERROR;
}
|
var ImportDialog = new Class({
MAX_FILE_SIZE: 100 * 1024 * 1024, // 100 MB
INITIAL_CHUNK_READ: 4 * 1024, // 4 KB
initialize: function(root, status) {
this._root = root;
this._status = status;
this._file = null;
this._currentStep = null;
this._lines = null;
this._picked = null;
this._columns = null;
this._timeColumns = null;
this._valueColumns = null;
this._fullFileReader = null;
this._backButton = this._root.getElement('#back');
this._backButton.addEvent('click', this._back.bind(this));
this._nextButton = this._root.getElement('#next');
this._nextButton.addEvent('click', this._next.bind(this));
this._cancelButton = this._root.getElement('#cancel');
this._cancelButton.addEvent('click', this._cancel.bind(this));
},
_step: function(i) {
this._currentStep = i;
this._root.getElements('.step').addClass('hidden');
switch (this._currentStep) {
case 1:
this._root.getElement('#step1').removeClass('hidden');
this._backButton.addClass('disabled');
this._nextButton.set('value', 'next').removeClass('disabled');
break;
case 2:
this._root.getElement('#step2').removeClass('hidden');
this._backButton.removeClass('disabled');
this._nextButton.set('value', 'next').addClass('disabled');
break;
case 3:
this._root.getElement('#step3').removeClass('hidden');
this._backButton.removeClass('disabled');
this._nextButton.set('value', 'import').addClass('disabled');
break;
case 4:
this._root.getElement('#step3').removeClass('hidden');
this._backButton.addClass('disabled');
this._nextButton.addClass('disabled');
break;
default:
}
},
_step0: function(file) {
this._step(0);
this._file = file;
if (this._file.size > this.MAX_FILE_SIZE) {
this._error('file too large!');
}
var reader = new FileReader();
reader.onloadstart = function(evt) {
if (!evt.lengthComputable) {
this._error('could not compute file length!');
return;
}
}.bind(this);
reader.onloadend = function(evt) {
if (evt.target.readyState !== FileReader.DONE) {
this._error('failed to load file!');
return;
}
if (this._currentStep !== 0) {
return;
}
this._step1(evt.target.result);
}.bind(this);
var blob = this._file.slice(0, this.INITIAL_CHUNK_READ);
reader.readAsText(blob);
},
_pickLines: function(lineData) {
var rows = d3.csv.parseRows(lineData);
var maxL = d3.max(rows, function(row) { return row.length; }),
Ls = [];
for (var i = 0; i <= maxL; i++) {
Ls.push([]);
}
rows.each(function(row, i) {
Ls[row.length].push(i);
});
var maxi = 0;
for (var i = 1; i <= maxL; i++) {
if (Ls[i].length > Ls[maxi].length) {
maxi = i;
}
}
return {
selected: Ls[maxi][0],
limit: Ls[maxi][0] + 10
};
},
_getSepLast: function(partialFileData) {
var sep = '\n',
last = partialFileData.lastIndexOf(sep);
if (last === -1) {
sep = '\r';
last = partialFileData.lastIndexOf(sep);
}
return {
sep: sep,
last: last
};
},
_step1: function(partialFileData) {
this._step(1);
var sepLast = this._getSepLast(partialFileData),
lineData = partialFileData.substring(0, sepLast.last),
picked = this._pickLines(lineData),
lines = lineData.split(sepLast.sep),
stepRoot = this._root.getElement('#step1'),
table = stepRoot.getElement('.table');
this._lines = lines;
this._picked = picked;
var buildLine = function(i, selected) {
var line = this._lines[i],
lineNumber = i + 1;
var rowElem = new Element('div.data-row');
var cell = new Element('div.data-cell')
.setStyle('width', '99%')
.toggleClass('odd', lineNumber % 2 === 1)
.toggleClass('selected', i === selected)
.addEvent('click', function(evt) {
table.getElements('.data-cell').removeClass('selected');
cell.addClass('selected');
this._picked = {selected: i, limit: i + 10};
}.bind(this));
var text = new Element('div', {
text: line
});
cell.adopt(text);
rowElem.adopt(cell);
return rowElem;
}.bind(this);
table.empty();
for (var i = 0; i < this._picked.limit; i++) {
table.adopt(buildLine(i, this._picked.selected));
}
this._root.addClass('active');
},
_step2: function() {
this._step(2);
var lines = this._lines.slice(this._picked.selected, this._picked.limit),
rows = lines.map(function(line) {
return d3.csv.parseRows(line)[0];
}),
stepRoot = this._root.getElement('#step2'),
table = stepRoot.getElement('.table');
this._columns = rows[0];
this._timeColumns = new Array(this._columns.length);
table.setStyle('width', (100 + 2 * 2) * this._columns.length)
var buildRow = function(row) {
var rowElem = new Element('div.data-row');
row.each(function(col, i) {
var cell = new Element('div.data-cell')
.set('text', col)
.setStyle('width', 100)
.addClass('col_' + i)
.toggleClass('odd', i % 2 === 1);
cell.addEvent('click', function(evt) {
table.getElements('.col_' + i).toggleClass('selected');
if (this._timeColumns[i] === undefined) {
this._timeColumns[i] = true;
this._nextButton.removeClass('disabled');
} else {
this._timeColumns[i] = undefined;
var tcols = this._getColumns(this._timeColumns);
if (tcols.length === 0) {
this._nextButton.addClass('disabled');
}
}
console.log(JSON.stringify(this._timeColumns));
}.bind(this));
rowElem.adopt(cell);
}.bind(this));
return rowElem;
}.bind(this);
table.empty();
for (var i = 0; i < rows.length; i++) {
table.adopt(buildRow(rows[i]));
}
},
_step3: function() {
this._step(3);
var lines = this._lines.slice(this._picked.selected, this._picked.limit),
rows = lines.map(function(line) {
return d3.csv.parseRows(line)[0];
}),
stepRoot = this._root.getElement('#step3'),
table = stepRoot.getElement('.table');
this._valueColumns = new Array(this._columns.length);
table.setStyle('width', (100 + 2 * 2) * this._columns.length)
var buildRow = function(row) {
var rowElem = new Element('div.data-row');
row.each(function(col, i) {
var cell = new Element('div.data-cell')
.setStyle('width', 100)
.addClass('col_' + i)
.toggleClass('odd', i % 2 === 1);
if (this._timeColumns[i] === true) {
cell.addClass('unselectable');
} else {
cell.addEvent('click', function(evt) {
table.getElements('.col_' + i).toggleClass('selected');
if (this._valueColumns[i] === undefined) {
this._valueColumns[i] = true;
this._nextButton.removeClass('disabled');
} else {
this._valueColumns[i] = undefined;
var xcols = this._getColumns(this._valueColumns);
if (xcols.length === 0) {
this._nextButton.addClass('disabled');
}
}
console.log(JSON.stringify(this._timeColumns));
}.bind(this));
}
var text = new Element('div', {
text: col
});
cell.adopt(text);
rowElem.adopt(cell);
}.bind(this));
return rowElem;
}.bind(this);
table.empty();
for (var i = 0; i < rows.length; i++) {
table.adopt(buildRow(rows[i]));
}
},
_getColumns: function(selection) {
return this._columns.filter(function(x, i) {
return selection[i];
});
},
_importData: function(data) {
var tcols = this._getColumns(this._timeColumns),
xcols = this._getColumns(this._valueColumns),
rows = RowLoader.load(data),
channels = ChannelExtractor.extract(tcols, xcols, rows);
Object.each(channels, function(channelData, suffix) {
var fileName = this._file.name,
prefix = fileName.substring(0, fileName.lastIndexOf('.')),
lowerSuffix = suffix.toLowerCase().replace(' ', '-'),
name = prefix + '-' + lowerSuffix;
Fist.importData(name, channelData, fileName);
}.bind(this));
},
_step4: function() {
this._step(4);
var progress = this._root.getElement('#progress');
this._fullFileReader = new FileReader();
this._fullFileReader.onloadstart = function(evt) {
progress.set('value', 0).set('max', evt.total);
};
this._fullFileReader.onprogress = function(evt) {
progress.set('value', evt.loaded);
};
this._fullFileReader.onloadend = function(evt) {
if (evt.target.readyState !== FileReader.DONE) {
this._error('failed to load file!');
}
try {
this._importData(evt.target.result);
} catch (e) {
if (!(e instanceof DataImportError)) {
throw e;
}
this._error(e.toString());
return;
}
this._finish();
}.bind(this);
this._fullFileReader.readAsText(this._file);
},
_back: function() {
switch (this._currentStep) {
case 2:
case 3:
this._step(this._currentStep - 1);
break;
default:
var msg = 'invalid step for _back(): ' + this._currentStep;
this._error(msg);
}
},
_next: function(args) {
// TODO: validation!
switch (this._currentStep) {
case 1:
this._step2();
break;
case 2:
var tcols = this._getColumns(this._timeColumns);
if (tcols.length === 0) {
return;
}
this._step3();
break;
case 3:
var xcols = this._getColumns(this._timeColumns);
if (xcols.length === 0) {
return;
}
this._step4();
break;
default:
var msg = 'invalid step for _next(): ' + this._currentStep;
this._error(msg);
}
},
_reset: function() {
this._file = null;
this._currentStep = null;
this._lines = null;
if (this._fullFileReader !== null) {
this._fullFileReader.abort();
}
this._picked = null;
this._columns = null;
this._timeColumns = null;
this._valueColumns = null;
this._fullFileReader = null;
},
_error: function(msg) {
this._reset();
this._status.notOK('import failed! ' + msg);
this._root.removeClass('active');
},
_cancel: function() {
this._reset();
this._status.notOK('import cancelled.');
this._root.removeClass('active');
},
_finish: function() {
this._reset();
this._status.OK('import successful.');
this._root.removeClass('active');
},
show: function(file) {
this._step0(file);
}
});
|
const axios = require("axios")
class Bridge {
constructor(app) {
this.url = 'https://api.dialog.lk'
this.app = app
}
call(req) {
// PROPS
let payload = {}
// METHOD
payload.method = ( req.method ) ? req.method : 'GET'
// HEADERS
payload.headers = { 'Content-Type': 'application/json' }
// DATA
payload.data = {
applicationId: this.app.id,
password: this.app.password
}
payload.data = Object.assign(payload.data, req.payload)
return new Promise((resolve, reject) => {
axios({
method: req.method,
url: this.url + req.api,
headers: payload.headers,
data: payload.data
})
.then(res => resolve(res))
.catch(e => reject(e))
})
}
}
exports.Bridge = Bridge
|
"""
Forms and validation code for user registration.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.conf import settings
from signbank.settings.server_specific import *
from django.utils.safestring import mark_safe
from django.utils.functional import lazy
from django.utils import six
from django.db.utils import OperationalError
mark_safe_lazy = lazy(mark_safe, six.text_type)
from signbank.registration.models import RegistrationProfile, UserProfile
from signbank.dictionary.models import Dataset
from django_select2 import *
from easy_select2.widgets import Select2, Select2Multiple
import re
alnum_re = re.compile(r'^\w+$')
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_reqd = { 'class': 'required form-control' }
attrs_default = {'class': 'form-control'}
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the request username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should either preserve the base ``save()`` or implement
a ``save()`` which accepts the ``profile_callback`` keyword
argument and passes it through to
``RegistrationProfile.objects.create_inactive_user()``.
"""
error_css_class = 'error'
username = forms.CharField(max_length=30, required=True,
widget=forms.TextInput(attrs=attrs_reqd),
label=_(u'Username'))
first_name = forms.CharField(max_length=30, required=True,
widget=forms.TextInput(attrs=attrs_reqd),
label=_(u'First Name'))
last_name = forms.CharField(max_length=30, required=True,
widget=forms.TextInput(attrs=attrs_reqd),
label=_(u'Last Name'))
email = forms.EmailField(widget=forms.TextInput(attrs=attrs_reqd), required=True,
label=_(u'Your Email Address'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_reqd), required=True,
label=_(u'Password'))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_reqd),
label=_(u'Password (again)'))
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and settings.SHOW_DATASET_INTERFACE_OPTIONS:
try:
dataset_choices = [ (ds.name, ds.name) for ds in Dataset.objects.filter(is_public='1') ]
if not dataset_choices:
dataset_choices = [(ds.name, ds.name) for ds in Dataset.objects.filter(acronym=settings.DEFAULT_DATASET_ACRONYM)]
#This process can fail during migrations of the Dataset model
except OperationalError:
dataset_choices = []
dataset = forms.TypedMultipleChoiceField(label=_(u'Requested Datasets'),
choices=dataset_choices,
required=False, widget=Select2)
motivation = forms.CharField(widget=forms.Textarea(attrs={'cols': 80, 'rows': 5,
'placeholder': 'Motivation'}),
label=_(u'Motivation'), required=False, # this has to be False
# the motivation field is not stored in the database
# it is retrieved from the form to be used in the request access email sent to the dataset manager
# in order to avoid default Django behaviour, it is renamed in the form to motivation_for_use
# required needs to be False
# otherwise Django can't match the new field name with this one
# requested datasets are also passed this way rather than stored in the database
help_text=_("Please explain why you would like to get access to this dataset. What are the purposes for which you wish to use it?"))
tos_choices = [(True, 'Agree'), (False, 'Disagree')]
href_hyperlink = settings.URL + settings.PREFIX_URL + '/about/conditions/'
tos_hyperlink = _(u'I have read and agree to the <a href="' + href_hyperlink + '" target="_blank">Terms of Service</a>')
tos = forms.BooleanField(label=mark_safe_lazy(tos_hyperlink),
widget=forms.RadioSelect(choices=tos_choices),
error_messages={'required': 'Error: You must agree to the Terms of Service in order to register'})
def clean_username(self):
"""
Validates that the username is alphanumeric and is not already
in use.
"""
try:
user = User.objects.get(username__exact=self.cleaned_data['username'])
except User.DoesNotExist:
return self.cleaned_data['username']
raise forms.ValidationError(_(u'This username is already taken. Please choose another.'))
def clean_password2(self):
"""
Validates that the two password inputs match.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] == self.cleaned_data['password2']:
return self.cleaned_data['password2']
raise forms.ValidationError(_(u'You must type the same password each time'))
def clean_motivation(self):
if 'motivation_for_use' in self.cleaned_data:
return self.cleaned_data['motivation_for_use']
elif 'motivation' in self.cleaned_data:
return self.cleaned_data['motivation']
else:
raise forms.ValidationError(_(u'Please provide motivation for your request'))
def clean_tos(self):
"""
Validates that the user accepted the Terms of Service.
"""
if self.cleaned_data.get('tos', True):
return self.cleaned_data['tos']
raise forms.ValidationError()
def save(self, profile_callback=None):
"""
Creates the new ``User`` and ``RegistrationProfile``, and
returns the ``User``.
This is essentially a light wrapper around
``RegistrationProfile.objects.create_inactive_user()``,
feeding it the form data and a profile callback (see the
documentation on ``create_inactive_user()`` for details) if
supplied.
"""
new_user = RegistrationProfile.objects.create_inactive_user(username=self.cleaned_data['username'],
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'],
firstname=self.cleaned_data['first_name'],
lastname=self.cleaned_data['last_name'],
agree=self.cleaned_data['tos'],
profile_callback=profile_callback)
return new_user
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_reqd),
label=_(u'I have read and agree to the Terms of Service'))
def clean_tos(self):
"""
Validates that the user accepted the Terms of Service.
"""
if self.cleaned_data.get('tos', False):
return self.cleaned_data['tos']
raise forms.ValidationError(_(u'You must agree to the terms to register'))
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validates that the supplied email address is unique for the
site.
"""
try:
user = User.objects.get(email__exact=self.cleaned_data['email'])
except User.DoesNotExist:
return self.cleaned_data['email']
raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))
yesnoChoices = ((1, 'yes'), (0, 'no'))
import string
def t(message):
"""Replace $country and $language in message with date from settings"""
tpl = string.Template(message)
return tpl.substitute(country=settings.COUNTRY_NAME, language=settings.LANGUAGE_NAME)
from django.views.decorators.cache import never_cache
from django.contrib.auth import authenticate
class EmailAuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
email = forms.CharField(label=_("Email"), max_length=100)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(EmailAuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user_cache = authenticate(username=email, password=password)
if self.user_cache is None:
raise forms.ValidationError(_("Please enter a correct email and password. Note that password is case-sensitive."))
elif not self.user_cache.is_active:
raise forms.ValidationError(_("This account is inactive."))
# TODO: determine whether this should move to its own method.
if self.request:
if not self.request.session.test_cookie_worked():
raise forms.ValidationError(_("Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."))
return self.cleaned_data
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
|
"""
<name>Undulator</name>
<description>An undulator</description>
<icon>icons/gaussian.svg</icon>
<priority>2</priority>
"""
import sys
from PyQt4.Qt import *
from orangewidget.settings import Setting
from orangewidget import gui
from oasys.widgets import widget
from orangecontrib.wanys.util.OpticalElement import OpticalElement
from orangecontrib.wanys.util.OpticalBeam import OpticalBeam
from orangecontrib.wanys.widgets.drivers.DriverSettingsWidget import DriverSettingsWidget
from orangecontrib.wanys.BeamlineComponents.Source.Undulator import Undulator
class UndulatorWidget(widget.OWWidget):
name = "Undulator"
description = "Undulator"
icon = "icons/gaussian.svg"
want_main_area = False
inputs = [("Optical beam", OpticalBeam, "onOpticalBeam", widget.Multiple)]
outputs = [("Optical beam", OpticalBeam)]
value_le_K_vertical = Setting(1.87)
value_le_K_horizontal = Setting(0)
value_le_period_length = Setting(0.035)
value_le_period_number = Setting(14)
value_le_driver_settings = Setting("")
def __init__(self, parent=None, signalManager=None):
widget.OWWidget.__init__(self, parent, signalManager)
self.__optical_undulator = OpticalElement("undulator")
self.le_K_vertical = gui.lineEdit(self.controlArea,
self,
"value_le_K_vertical",
label="Vertical K",
validator=QDoubleValidator(bottom=0.0))
self.le_K_horizontal = gui.lineEdit(self.controlArea,
self,
"value_le_K_horizontal",
label="Horizontal K",
validator=QDoubleValidator(bottom=0.0))
self.le_period_length = gui.lineEdit(self.controlArea,
self,
"value_le_period_length",
label="period length [m]",
validator=QDoubleValidator(bottom=0.0))
self.le_period_number = gui.lineEdit(self.controlArea,
self,
"value_le_period_number",
label="number periods",
validator=QDoubleValidator(bottom=0.0))
self.__driver_settings_widget = DriverSettingsWidget(self.__optical_undulator,
self,
"value_le_driver_settings",
Undulator(1.8,1.8,0.35,100))
self.__optical_undulator.setOnSynchronize(self.synchronizeToOpticalElement)
def synchronizeToOpticalElement(self):
source = self.__optical_undulator
K_vertical = float(self.value_le_K_vertical)
K_horizontal = float(self.value_le_K_horizontal)
period_length = float(self.value_le_period_length)
period_number =float(self.value_le_period_number)
beamline_component = Undulator(K_vertical=K_vertical,
K_horizontal=K_horizontal,
period_length=period_length,
periods_number=period_number)
self.__optical_undulator.setBeamlineComponent(beamline_component=beamline_component)
def onOpticalBeam(self, optical_beam, sender):
optical_beam.sender().addOutput(self.__optical_undulator)
sender = OpticalBeam(self.__optical_undulator)
self.send("Optical beam", sender)
if __name__=="__main__":
appl = QApplication(sys.argv)
ow = UndulatorWidget()
ow.show()
appl.exec_()
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import time
import random
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.vision.c_transforms as vision
import mindspore.nn as nn
import mindspore.ops.functional as F
from mindspore import Tensor
from mindspore import context
from mindspore import ParameterTuple
from mindspore.nn import Cell
from mindspore.ops import operations as P
from mindspore.ops import composite as CP
from mindspore.nn.optim.momentum import Momentum
from mindspore.nn.wrap.cell_wrapper import WithLossCell
from mindspore.context import ParallelMode
from mindspore.communication.management import init
random.seed(1)
np.random.seed(1)
ds.config.set_seed(1)
grad_by_list = CP.GradOperation(get_by_list=True)
def weight_variable_0(shape):
zeros = np.zeros(shape).astype(np.float32)
return Tensor(zeros)
def weight_variable_1(shape):
ones = np.ones(shape).astype(np.float32)
return Tensor(ones)
def conv3x3(in_channels, out_channels, stride=1, padding=0):
"""3x3 convolution """
return nn.Conv2d(in_channels, out_channels,
kernel_size=3, stride=stride, padding=padding, weight_init='XavierUniform',
has_bias=False, pad_mode="same")
def conv1x1(in_channels, out_channels, stride=1, padding=0):
"""1x1 convolution"""
return nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, padding=padding, weight_init='XavierUniform',
has_bias=False, pad_mode="same")
def conv7x7(in_channels, out_channels, stride=1, padding=0):
"""1x1 convolution"""
return nn.Conv2d(in_channels, out_channels,
kernel_size=7, stride=stride, padding=padding, weight_init='XavierUniform',
has_bias=False, pad_mode="same")
def bn_with_initialize(out_channels):
shape = (out_channels)
mean = weight_variable_0(shape)
var = weight_variable_1(shape)
beta = weight_variable_0(shape)
bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init='Uniform',
beta_init=beta, moving_mean_init=mean, moving_var_init=var)
return bn
def bn_with_initialize_last(out_channels):
shape = (out_channels)
mean = weight_variable_0(shape)
var = weight_variable_1(shape)
beta = weight_variable_0(shape)
bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init='Uniform',
beta_init=beta, moving_mean_init=mean, moving_var_init=var)
return bn
def fc_with_initialize(input_channels, out_channels):
return nn.Dense(input_channels, out_channels, weight_init='XavierUniform', bias_init='Uniform')
class ResidualBlock(nn.Cell):
expansion = 4
def __init__(self,
in_channels,
out_channels,
stride=1):
super(ResidualBlock, self).__init__()
out_chls = out_channels // self.expansion
self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)
self.bn1 = bn_with_initialize(out_chls)
self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0)
self.bn2 = bn_with_initialize(out_chls)
self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
self.bn3 = bn_with_initialize_last(out_channels)
self.relu = P.ReLU()
self.add = P.Add()
def construct(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.add(out, identity)
out = self.relu(out)
return out
class ResidualBlockWithDown(nn.Cell):
expansion = 4
def __init__(self,
in_channels,
out_channels,
stride=1,
down_sample=False):
super(ResidualBlockWithDown, self).__init__()
out_chls = out_channels // self.expansion
self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)
self.bn1 = bn_with_initialize(out_chls)
self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0)
self.bn2 = bn_with_initialize(out_chls)
self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
self.bn3 = bn_with_initialize_last(out_channels)
self.relu = P.ReLU()
self.downSample = down_sample
self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0)
self.bn_down_sample = bn_with_initialize(out_channels)
self.add = P.Add()
def construct(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
identity = self.conv_down_sample(identity)
identity = self.bn_down_sample(identity)
out = self.add(out, identity)
out = self.relu(out)
return out
class MakeLayer0(nn.Cell):
def __init__(self, block, in_channels, out_channels, stride):
super(MakeLayer0, self).__init__()
self.a = ResidualBlockWithDown(in_channels, out_channels, stride=1, down_sample=True)
self.b = block(out_channels, out_channels, stride=stride)
self.c = block(out_channels, out_channels, stride=1)
def construct(self, x):
x = self.a(x)
x = self.b(x)
x = self.c(x)
return x
class MakeLayer1(nn.Cell):
def __init__(self, block, in_channels, out_channels, stride):
super(MakeLayer1, self).__init__()
self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)
self.b = block(out_channels, out_channels, stride=1)
self.c = block(out_channels, out_channels, stride=1)
self.d = block(out_channels, out_channels, stride=1)
def construct(self, x):
x = self.a(x)
x = self.b(x)
x = self.c(x)
x = self.d(x)
return x
class MakeLayer2(nn.Cell):
def __init__(self, block, in_channels, out_channels, stride):
super(MakeLayer2, self).__init__()
self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)
self.b = block(out_channels, out_channels, stride=1)
self.c = block(out_channels, out_channels, stride=1)
self.d = block(out_channels, out_channels, stride=1)
self.e = block(out_channels, out_channels, stride=1)
self.f = block(out_channels, out_channels, stride=1)
def construct(self, x):
x = self.a(x)
x = self.b(x)
x = self.c(x)
x = self.d(x)
x = self.e(x)
x = self.f(x)
return x
class MakeLayer3(nn.Cell):
def __init__(self, block, in_channels, out_channels, stride):
super(MakeLayer3, self).__init__()
self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)
self.b = block(out_channels, out_channels, stride=1)
self.c = block(out_channels, out_channels, stride=1)
def construct(self, x):
x = self.a(x)
x = self.b(x)
x = self.c(x)
return x
class ResNet(nn.Cell):
def __init__(self, block, num_classes=100, batch_size=32):
super(ResNet, self).__init__()
self.batch_size = batch_size
self.num_classes = num_classes
self.conv1 = conv7x7(3, 64, stride=2, padding=0)
self.bn1 = bn_with_initialize(64)
self.relu = P.ReLU()
self.maxpool = P.MaxPoolWithArgmax(kernel_size=3, strides=2, pad_mode="SAME")
self.layer1 = MakeLayer0(block, in_channels=64, out_channels=256, stride=1)
self.layer2 = MakeLayer1(block, in_channels=256, out_channels=512, stride=2)
self.layer3 = MakeLayer2(block, in_channels=512, out_channels=1024, stride=2)
self.layer4 = MakeLayer3(block, in_channels=1024, out_channels=2048, stride=2)
self.pool = P.ReduceMean(keep_dims=True)
self.squeeze = P.Squeeze(axis=(2, 3))
self.fc = fc_with_initialize(512 * block.expansion, num_classes)
def construct(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)[0]
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.pool(x, (2, 3))
x = self.squeeze(x)
x = self.fc(x)
return x
def resnet50(batch_size, num_classes):
return ResNet(ResidualBlock, num_classes, batch_size)
def create_dataset(repeat_num=1, training=True, batch_size=32):
data_home = "/home/workspace/mindspore_dataset"
data_dir = data_home + "/cifar-10-batches-bin"
if not training:
data_dir = data_home + "/cifar-10-verify-bin"
data_set = ds.Cifar10Dataset(data_dir)
resize_height = 224
resize_width = 224
rescale = 1.0 / 255.0
shift = 0.0
# define map operations
random_crop_op = vision.RandomCrop((32, 32), (4, 4, 4, 4)) # padding_mode default CONSTANT
random_horizontal_op = vision.RandomHorizontalFlip()
# interpolation default BILINEAR
resize_op = vision.Resize((resize_height, resize_width))
rescale_op = vision.Rescale(rescale, shift)
normalize_op = vision.Normalize((0.4465, 0.4822, 0.4914), (0.2010, 0.1994, 0.2023))
changeswap_op = vision.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)
c_trans = []
if training:
c_trans = [random_crop_op, random_horizontal_op]
c_trans += [resize_op, rescale_op, normalize_op,
changeswap_op]
# apply map operations on images
data_set = data_set.map(operations=type_cast_op, input_columns="label")
data_set = data_set.map(operations=c_trans, input_columns="image")
# apply shuffle operations
data_set = data_set.shuffle(buffer_size=1000)
# apply batch operations
data_set = data_set.batch(batch_size=batch_size, drop_remainder=True)
# apply repeat operations
data_set = data_set.repeat(repeat_num)
return data_set
class CrossEntropyLoss(nn.Cell):
def __init__(self):
super(CrossEntropyLoss, self).__init__()
self.cross_entropy = P.SoftmaxCrossEntropyWithLogits()
self.mean = P.ReduceMean()
self.one_hot = P.OneHot()
self.one = Tensor(1.0, mstype.float32)
self.zero = Tensor(0.0, mstype.float32)
def construct(self, logits, label):
label = self.one_hot(label, F.shape(logits)[1], self.one, self.zero)
loss = self.cross_entropy(logits, label)[0]
loss = self.mean(loss, (-1,))
return loss
class GradWrap(Cell):
""" GradWrap definition """
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
self.weights = ParameterTuple(network.trainable_params())
def construct(self, x, label):
weights = self.weights
return grad_by_list(self.network, weights)(x, label)
def test_pynative_resnet50():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=False, device_num=8)
init()
batch_size = 32
num_classes = 10
net = resnet50(batch_size, num_classes)
criterion = CrossEntropyLoss()
optimizer = Momentum(learning_rate=0.01, momentum=0.9,
params=filter(lambda x: x.requires_grad, net.get_parameters()))
net_with_criterion = WithLossCell(net, criterion)
net_with_criterion.set_grad()
train_network = GradWrap(net_with_criterion)
train_network.set_train()
step = 0
max_step = 21
exceed_num = 0
data_set = create_dataset(repeat_num=1, training=True, batch_size=batch_size)
for element in data_set.create_dict_iterator(num_epochs=1):
step = step + 1
if step > max_step:
break
start_time = time.time()
input_data = element["image"]
input_label = element["label"]
loss_output = net_with_criterion(input_data, input_label)
grads = train_network(input_data, input_label)
optimizer(grads)
end_time = time.time()
cost_time = end_time - start_time
print("======step: ", step, " loss: ", loss_output.asnumpy(), " cost time: ", cost_time)
if step > 1 and cost_time > 0.18:
exceed_num = exceed_num + 1
assert exceed_num < 20
|
import { ApolloServer, gql } from 'apollo-server';
import mongoose from 'mongoose';
import bcrypt from 'bcrypt';
import jwt from 'jsonwebtoken';
import User from './models/user';
import Employee from './models/employee';
// generate token
const SECRET = 'mysecret';
function generateToken(id, email) {
const token = jwt.sign({ id, email }, SECRET);
return token;
}
function verifyToken(token) {
try {
const { id } = jwt.verify(token, SECRET);
return id;
} catch (err) {
return null;
}
}
const typeDefs = gql`
input UserInput {
email: String!
password: String!
userName: String
position: String
experience: String
}
type UserLogged {
token: String
email: String!
userName: String
position: String
experience: String
}
type Employee {
id : ID!
firstName: String!
lastName: String!
projet: String!
position: String
}
input EmployeeInput{
id : ID
firstName: String!
lastName: String!
projet: String!
position: String
}
type Query {
employees: [Employee]
employee(id: ID!) : Employee
me: UserLogged
}
type Mutation {
register(input: UserInput): UserLogged
login(input: UserInput): UserLogged
addEmploye(input: EmployeeInput) : Employee
updateEmploye(input: EmployeeInput): Employee
removeEmploye(id: ID!,): Boolean
}
`;
// Resolvers define the technique for fetching the types in the
// schema. We'll retrieve books from the "books" array above.
const resolvers = {
Query: {
employees: async (_, $, { models }) => {
const employee = await models.Employee.find();
// console.log('employe', employee);
await setTimeout(() => {
// console.log('toto');
}, 7000);
return employee;
},
me: (_, $, { models, userId }) => models.User.findOne({ _id: userId }),
employee: (_, { id }, { models }) => models.Employee.findOne({ _id: id }),
},
Mutation: {// User save as a model.user
register: async (_, { input }, { models }) => {
const hashPassword = await bcrypt.hash(input.password, 3);
const user = new models.User({
email: input.email,
password: hashPassword,
userName: input.userName,
position: input.position,
experience: input.experience,
});
await user.save();
const token = generateToken(user.id, user.email);
return { token, email: user.email, userName: user.userName, position: user.position, experience: user.experience };
},
login: async (_, { input }, { models }) => {
// console.log(input);
const currentUser = await models.User.findOne({ email: input.email });
if (!currentUser) {
throw new Error('User not found');
}
const correctPassword = await bcrypt.compare(input.password, currentUser.password);
if (!correctPassword) {
throw new Error('Wrong Password');
}
const token = generateToken(currentUser.id, currentUser.email);
return { token, email: currentUser.email };
},
addEmploye: async (_, { input }, { models }) => {
// console.log('input', input);
const newUser = new models.Employee({
firstName: input.firstName,
lastName: input.lastName,
projet: input.projet,
position: input.position,
});
const UserAdded = await newUser.save();
// console.log('UserAdded', UserAdded);
return UserAdded;
},
updateEmploye: async (_,{input: { id, firstName, lastName, projet, position }}) => {
console.log(id, firstName);
const utilisateur = await Employee.findOneAndUpdate({ _id: id }, {
$set: { firstName, lastName, projet, position } });
console.log(utilisateur);
return utilisateur;
},
removeEmploye: async (_, { id }) => {
await Employee.findByIdAndRemove(id);
return true;
},
},
};
// In the most basic sense, the ApolloServer can be started
// by passing type definitions (typeDefs) and the resolvers
// responsible for fetching the data for those types.
const server = new ApolloServer({
typeDefs,
resolvers,
context: ({ req }) => {
const userId = verifyToken(req.headers.authorization);
return {
userId,
models: {
User,
Employee,
},
};
},
});
// This `listen` method launches a web-server. Existing apps
// can utilize middleware options, which we'll discuss later.
mongoose.connect('mongodb://localhost:27017/training', { useNewUrlParser: true })
.then(() => {
console.log('connected to mongodb');
server.listen().then(({ url }) => {
console.log(`🚀 Server ready at ${url}`);
});
});
|
/**
* Copyright (c) 2017-present, Facebook, Inc.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
const HtmlWebpackPlugin = require('html-webpack-plugin');
const fs = require('fs-extra');
const path = require('path');
const pluginName = 'ChunkManifestPlugin';
class ChunkManifestPlugin {
constructor(options) {
this.options = {
filename: 'manifest.json',
manifestVariable: 'webpackManifest',
inlineManifest: false,
...options,
};
}
apply(compiler) {
let chunkManifest;
const {path: outputPath, publicPath} = compiler.options.output;
// Build the chunk mapping
compiler.hooks.afterCompile.tapAsync(pluginName, (compilation, done) => {
const assets = {};
const assetsMap = {};
// eslint-disable-next-line
for (const chunkGroup of compilation.chunkGroups) {
if (chunkGroup.name) {
const files = [];
// eslint-disable-next-line
for (const chunk of chunkGroup.chunks) {
files.push(...chunk.files);
}
assets[chunkGroup.name] = files.filter(f => f.slice(-4) !== '.map');
assetsMap[chunkGroup.name] = files
.filter(
f =>
f.slice(-4) !== '.map' &&
f.slice(0, chunkGroup.name.length) === chunkGroup.name,
)
.map(filename => `${publicPath}${filename}`);
}
}
chunkManifest = assetsMap;
if (!this.options.inlineManifest) {
const finalPath = path.resolve(outputPath, this.options.filename);
fs.ensureDir(path.dirname(finalPath), () => {
fs.writeFile(finalPath, JSON.stringify(chunkManifest, null, 2), done);
});
} else {
done();
}
});
compiler.hooks.compilation.tap(pluginName, compilation => {
// inline to html-webpack-plugin <head> tag
if (this.options.inlineManifest) {
const hooks = HtmlWebpackPlugin.getHooks(compilation);
const {manifestVariable} = this.options;
hooks.alterAssetTagGroups.tap(pluginName, assets => {
if (chunkManifest) {
const newTag = {
tagName: 'script',
closeTag: true,
attributes: {
type: 'text/javascript',
},
innerHTML: `/*<![CDATA[*/window.${manifestVariable}=${JSON.stringify(
chunkManifest,
)};/*]]>*/`,
};
assets.headTags.unshift(newTag);
}
});
}
});
}
}
module.exports = ChunkManifestPlugin;
|
#!/usr/bin/env python
# encoding: utf-8
# Ali Sabil, 2007
# Radosław Szkodziński, 2010
"""
At this point, vala is still unstable, so do not expect
this tool to be too stable either (apis, etc)
"""
import os.path, shutil, re
from waflib import Context, Task, Utils, Logs, Options, Errors
from waflib.TaskGen import extension, taskgen_method
from waflib.Configure import conf
class valac(Task.Task):
"""
Task to compile vala files.
"""
#run_str = "${VALAC} ${VALAFLAGS}" # ideally
#vars = ['VALAC_VERSION']
vars = ["VALAC", "VALAC_VERSION", "VALAFLAGS"]
ext_out = ['.h']
def run(self):
cmd = [self.env['VALAC']] + self.env['VALAFLAGS']
cmd.extend([a.abspath() for a in self.inputs])
ret = self.exec_command(cmd, cwd=self.outputs[0].parent.abspath())
if ret:
return ret
for x in self.outputs:
if id(x.parent) != id(self.outputs[0].parent):
shutil.move(self.outputs[0].parent.abspath() + os.sep + x.name, x.abspath())
if self.generator.dump_deps_node:
self.generator.dump_deps_node.write('\n'.join(self.generator.packages))
return ret
valac = Task.update_outputs(valac) # no decorators for python2 classes
@taskgen_method
def init_vala_task(self):
self.profile = getattr(self, 'profile', 'gobject')
if self.profile == 'gobject':
self.uselib = Utils.to_list(getattr(self, 'uselib', []))
if not 'GOBJECT' in self.uselib:
self.uselib.append('GOBJECT')
def addflags(flags):
self.env.append_value('VALAFLAGS', flags)
if self.profile:
addflags('--profile=%s' % self.profile)
if hasattr(self, 'threading'):
if self.profile == 'gobject':
if not 'GTHREAD' in self.uselib:
self.uselib.append('GTHREAD')
else:
#Vala doesn't have threading support for dova nor posix
Logs.warn("Profile %s means no threading support" % self.profile)
self.threading = False
if self.threading:
addflags('--threading')
valatask = self.valatask
self.is_lib = 'cprogram' not in self.features
if self.is_lib:
addflags('--library=%s' % self.target)
h_node = self.path.find_or_declare('%s.h' % self.target)
valatask.outputs.append(h_node)
addflags('--header=%s' % h_node.name)
valatask.outputs.append(self.path.find_or_declare('%s.vapi' % self.target))
if getattr(self, 'gir', None):
gir_node = self.path.find_or_declare('%s.gir' % self.gir)
addflags('--gir=%s' % gir_node.name)
valatask.outputs.append(gir_node)
self.vala_target_glib = getattr(self, 'vala_target_glib', getattr(Options.options, 'vala_target_glib', None))
if self.vala_target_glib:
addflags('--target-glib=%s' % self.vala_target_glib)
addflags(['--define=%s' % x for x in getattr(self, 'vala_defines', [])])
packages_private = Utils.to_list(getattr(self, 'packages_private', []))
addflags(['--pkg=%s' % x for x in packages_private])
def _get_api_version():
api_version = '1.0'
if hasattr(Context.g_module, 'API_VERSION'):
version = Context.g_module.API_VERSION.split(".")
if version[0] == "0":
api_version = "0." + version[1]
else:
api_version = version[0] + ".0"
return api_version
self.includes = Utils.to_list(getattr(self, 'includes', []))
self.uselib = self.to_list(getattr(self, 'uselib', []))
valatask.install_path = getattr(self, 'install_path', '')
valatask.vapi_path = getattr(self, 'vapi_path', '${DATAROOTDIR}/vala/vapi')
valatask.pkg_name = getattr(self, 'pkg_name', self.env['PACKAGE'])
valatask.header_path = getattr(self, 'header_path', '${INCLUDEDIR}/%s-%s' % (valatask.pkg_name, _get_api_version()))
valatask.install_binding = getattr(self, 'install_binding', True)
self.packages = packages = Utils.to_list(getattr(self, 'packages', []))
self.vapi_dirs = vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', []))
includes = []
if hasattr(self, 'use'):
local_packages = Utils.to_list(self.use)[:] # make sure to have a copy
seen = []
while len(local_packages) > 0:
package = local_packages.pop()
if package in seen:
continue
seen.append(package)
# check if the package exists
try:
package_obj = self.bld.get_tgen_by_name(package)
except Errors.WafError:
continue
package_name = package_obj.target
package_node = package_obj.path
package_dir = package_node.path_from(self.path)
for task in package_obj.tasks:
for output in task.outputs:
if output.name == package_name + ".vapi":
valatask.set_run_after(task)
if package_name not in packages:
packages.append(package_name)
if package_dir not in vapi_dirs:
vapi_dirs.append(package_dir)
if package_dir not in includes:
includes.append(package_dir)
if hasattr(package_obj, 'use'):
lst = self.to_list(package_obj.use)
lst.reverse()
local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages
addflags(['--pkg=%s' % p for p in packages])
for vapi_dir in vapi_dirs:
v_node = self.path.find_dir(vapi_dir)
if not v_node:
Logs.warn('Unable to locate Vala API directory: %r' % vapi_dir)
else:
addflags('--vapidir=%s' % v_node.abspath())
addflags('--vapidir=%s' % v_node.get_bld().abspath())
self.dump_deps_node = None
if self.is_lib and self.packages:
self.dump_deps_node = self.path.find_or_declare('%s.deps' % self.target)
valatask.outputs.append(self.dump_deps_node)
self.includes.append(self.bld.srcnode.abspath())
self.includes.append(self.bld.bldnode.abspath())
for include in includes:
try:
self.includes.append(self.path.find_dir(include).abspath())
self.includes.append(self.path.find_dir(include).get_bld().abspath())
except AttributeError:
Logs.warn("Unable to locate include directory: '%s'" % include)
if self.is_lib and valatask.install_binding:
headers_list = [o for o in valatask.outputs if o.suffix() == ".h"]
try:
self.install_vheader.source = headers_list
except AttributeError:
self.install_vheader = self.bld.install_files(valatask.header_path, headers_list, self.env)
vapi_list = [o for o in valatask.outputs if (o.suffix() in (".vapi", ".deps"))]
try:
self.install_vapi.source = vapi_list
except AttributeError:
self.install_vapi = self.bld.install_files(valatask.vapi_path, vapi_list, self.env)
gir_list = [o for o in valatask.outputs if o.suffix() == '.gir']
try:
self.install_gir.source = gir_list
except AttributeError:
self.install_gir = self.bld.install_files(getattr(self, 'gir_path', '${DATAROOTDIR}/gir-1.0'), gir_list, self.env)
@extension('.vala', '.gs')
def vala_file(self, node):
"""
Compile a vala file and bind the task to *self.valatask*. If an existing vala task is already set, add the node
to its inputs. The typical example is::
def build(bld):
bld.program(
packages = 'gtk+-2.0',
target = 'vala-gtk-example',
uselib = 'GTK GLIB',
source = 'vala-gtk-example.vala foo.vala',
vala_defines = ['DEBUG'] # adds --define=<xyz> values to the command-line
# the following arguments are for libraries
#gir = 'hello-1.0',
#gir_path = '/tmp',
#vapi_path = '/tmp',
#pkg_name = 'hello'
# disable installing of gir, vapi and header
#install_binding = False
# profile = 'xyz' # adds --profile=<xyz> to enable profiling
# threading = True, # add --threading, except if profile is on or not on 'gobject'
# vala_target_glib = 'xyz' # adds --target-glib=<xyz>, can be given through the command-line option --vala-target-glib=<xyz>
)
:param node: vala file
:type node: :py:class:`waflib.Node.Node`
"""
try:
valatask = self.valatask
except AttributeError:
valatask = self.valatask = self.create_task('valac')
self.init_vala_task()
valatask.inputs.append(node)
c_node = node.change_ext('.c')
valatask.outputs.append(c_node)
self.source.append(c_node)
@conf
def find_valac(self, valac_name, min_version):
"""
Find the valac program, and execute it to store the version
number in *conf.env.VALAC_VERSION*
:param valac_name: program name
:type valac_name: string or list of string
:param min_version: minimum version acceptable
:type min_version: tuple of int
"""
valac = self.find_program(valac_name, var='VALAC')
try:
output = self.cmd_and_log(valac + ' --version')
except Exception:
valac_version = None
else:
ver = re.search(r'\d+.\d+.\d+', output).group(0).split('.')
valac_version = tuple([int(x) for x in ver])
self.msg('Checking for %s version >= %r' % (valac_name, min_version),
valac_version, valac_version and valac_version >= min_version)
if valac and valac_version < min_version:
self.fatal("%s version %r is too old, need >= %r" % (valac_name, valac_version, min_version))
self.env['VALAC_VERSION'] = valac_version
return valac
@conf
def check_vala(self, min_version=(0,8,0), branch=None):
"""
Check if vala compiler from a given branch exists of at least a given
version.
:param min_version: minimum version acceptable (0.8.0)
:type min_version: tuple
:param branch: first part of the version number, in case a snapshot is used (0, 8)
:type branch: tuple of int
"""
if not branch:
branch = min_version[:2]
try:
find_valac(self, 'valac-%d.%d' % (branch[0], branch[1]), min_version)
except self.errors.ConfigurationError:
find_valac(self, 'valac', min_version)
@conf
def check_vala_deps(self):
"""
Load the gobject and gthread packages if they are missing.
"""
if not self.env['HAVE_GOBJECT']:
pkg_args = {'package': 'gobject-2.0',
'uselib_store': 'GOBJECT',
'args': '--cflags --libs'}
if getattr(Options.options, 'vala_target_glib', None):
pkg_args['atleast_version'] = Options.options.vala_target_glib
self.check_cfg(**pkg_args)
if not self.env['HAVE_GTHREAD']:
pkg_args = {'package': 'gthread-2.0',
'uselib_store': 'GTHREAD',
'args': '--cflags --libs'}
if getattr(Options.options, 'vala_target_glib', None):
pkg_args['atleast_version'] = Options.options.vala_target_glib
self.check_cfg(**pkg_args)
def configure(self):
"""
Use the following to enforce minimum vala version::
def configure(conf):
conf.load('vala', funs='')
conf.check_vala(min_version=(0,10,0))
"""
self.load('gnu_dirs')
self.check_vala_deps()
self.check_vala()
self.env.VALAFLAGS = ['-C', '--quiet']
def options(opt):
"""
Load the :py:mod:`waflib.Tools.gnu_dirs` tool and add the ``--vala-target-glib`` command-line option
"""
opt.load('gnu_dirs')
valaopts = opt.add_option_group('Vala Compiler Options')
valaopts.add_option ('--vala-target-glib', default=None,
dest='vala_target_glib', metavar='MAJOR.MINOR',
help='Target version of glib for Vala GObject code generation')
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pathlib
import warnings
from nvtabular.graph.schema import Schema
from nvtabular.graph.selector import ColumnSelector
from nvtabular.inference.graph.ops.operator import InferenceOperator
from nvtabular.inference.triton.ensemble import (
_generate_nvtabular_config,
_remove_columns,
_triton_datatype_to_dtype,
)
class WorkflowOp(InferenceOperator):
def __init__(
self,
workflow,
name=None,
sparse_max=None,
max_batch_size=None,
label_columns=None,
cats=None,
conts=None,
):
self.workflow = workflow
self.sparse_max = sparse_max or {}
self.name = name or self.__class__.__name__.lower()
self.max_batch_size = max_batch_size
self.label_columns = label_columns or []
self.cats = cats or []
self.conts = conts or []
def compute_output_schema(self, input_schema: Schema, col_selector: ColumnSelector) -> Schema:
expected_input = input_schema.apply(col_selector)
if not expected_input == self.workflow.graph.input_schema:
raise ValueError(
"Request schema provided to WorkflowOp doesn't match workflow's input schema.\n"
f"Request schema columns: {input_schema.column_names}\n"
f"Workflow input schema columns: {self.workflow.graph.input_schema.column_names}."
)
return self.workflow.output_schema
def export(self, path, consumer_config=None, version=1):
"""Create a directory inside supplied path based on our export name"""
new_dir_path = pathlib.Path(path) / self.export_name
new_dir_path.mkdir()
workflow = _remove_columns(self.workflow, self.label_columns)
# override the output dtype of the nvtabular model if necessary (fixes mismatches
# in dtypes between tf inputs and nvt outputs)
if consumer_config:
for column in consumer_config.input:
tf_dtype = _triton_datatype_to_dtype(column.data_type)
nvt_dtype = workflow.output_dtypes.get(column.name)
if nvt_dtype and nvt_dtype != tf_dtype:
warnings.warn(
f"TF model expects {tf_dtype} for column {column.name}, but workflow "
f" is producing type {nvt_dtype}. Overriding dtype in NVTabular workflow."
)
workflow.output_dtypes[column.name] = tf_dtype
# TODO: Extract this logic to base inference operator?
export_path = new_dir_path / str(version) / self.export_name
workflow.save(str(export_path))
return _generate_nvtabular_config(
workflow,
self.export_name,
new_dir_path,
backend="nvtabular",
sparse_max=self.sparse_max,
max_batch_size=self.max_batch_size,
cats=self.cats,
conts=self.conts,
)
@property
def export_name(self):
return self.name
|
from wifi import Wifi
if __name__ == '__main__':
# Show saved Wi-Fi profiles on the system
prof = Wifi.Profile()
ssid = prof.get_ssid()
print(ssid)
print(prof.get_password(ssid[0]))
|
const moment = require('moment')
exports.seed = function(knex, Promise) {
// Deletes ALL existing entries
return knex('servers').del()
.then(function () {
// Inserts seed entries
return knex('servers').insert([
{
id: 1,
name: 'The seeded server name',
user_id: 888,
// This can't be destroyed because is a user server
destroy_at: moment().add('1', 'minute').format('YYYY-MM-DD HH:mm:ss'),
destroyed_at: null,
},
{
id: 2,
name: 'Provided by user',
user_id: 999
},
{
id: 5,
name: 'Should be destroyed very soon',
destroyed_at: null,
destroy_at: moment().subtract(1, 'minutes').format('YYYY-MM-DD HH:mm:ss'),
}
]);
});
};
|
# 1st solution, TLE
class Solution:
def reversePairs(self, nums: List[int]) -> int:
memo = {}
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
if nums[i] > 2 * nums[j]:
memo[i] = max(memo.get(i, 0), memo.get(j, 0)) + 1
return sum([memo.values()])
# 2nd solution
# O(n*log(n)) time | O(log(n)) space
class Solution:
def reversePairs(self, nums: List[int]) -> int:
if not nums:
return 0
return self.mergesort(nums)[1]
def mergesort(self, nums):
if len(nums) <= 1:
return nums, 0
m = len(nums) // 2
left, countl = self.mergesort(nums[:m])
right, countr = self.mergesort(nums[m:])
count = countl + countr
L, R = len(left) - 1 , len(right) - 1
for i in reversed(range(len(nums))):
a = left[L] if L >= 0 else float("-inf")
b = right[R] if R >= 0 else float("-inf")
if a <= b:
nums[i] = b
count += len(left) - bisect.bisect(left, 2*b)
R -= 1
else:
nums[i] = a
L -= 1
return nums, count
# 3rd solution
# O(n*log(n)) time | O(log(n)) space
class Solution:
def reversePairs(self, nums: List[int]) -> int:
if not nums:
return 0
return self.mergesort(nums)[1]
def mergesort(self, nums):
if len(nums) <= 1:
return nums, 0
m = len(nums) // 2
left, countl = self.mergesort(nums[:m])
right, countr = self.mergesort(nums[m:])
count = countl + countr
for r in right:
temp = len(left) - bisect.bisect(left, 2*r)
if temp == 0:
break
count += temp
return sorted(left + right), count
|
import os
import tempfile
from exasol_advanced_analytics_framework.deployment.bundle_lua_scripts import \
BundleLuaScripts
def test_bundle_lua_scripts():
lua_bundled_content = BundleLuaScripts.get_content()
assert lua_bundled_content
with tempfile.TemporaryDirectory() as tmp_dir:
lua_bundled_file_path = os.path.join(tmp_dir, "bundle_final.lua")
bundler = BundleLuaScripts(tmp_dir, lua_bundled_file_path)
bundler.copy_lua_source_files()
bundler.bundle_lua_scripts()
with open(os.path.join(
tmp_dir, lua_bundled_file_path), "r") as file:
lua_bundled_data = file.read()
assert lua_bundled_data == lua_bundled_content
|
/*****************************************************************************\
* ANALYSIS PERFORMANCE TOOLS *
* Extrae *
* Instrumentation package for parallel applications *
*****************************************************************************
* ___ This library is free software; you can redistribute it and/or *
* / __ modify it under the terms of the GNU LGPL as published *
* / / _____ by the Free Software Foundation; either version 2.1 *
* / / / \ of the License, or (at your option) any later version. *
* ( ( ( B S C ) *
* \ \ \_____/ This library is distributed in hope that it will be *
* \ \__ useful but WITHOUT ANY WARRANTY; without even the *
* \___ implied warranty of MERCHANTABILITY or FITNESS FOR A *
* PARTICULAR PURPOSE. See the GNU LGPL for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this library; if not, write to the Free Software Foundation, *
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *
* The GNU LEsser General Public License is contained in the file COPYING. *
* --------- *
* Barcelona Supercomputing Center - Centro Nacional de Supercomputacion *
\*****************************************************************************/
#ifndef CHECKOPTIONS_H
#define CHECKOPTIONS_H
void CheckHWCcontrol (int taskid, long long options);
void CheckClockType (int taskid, long long options, int traceformat, int force);
#endif
|
import React from 'react'
import {AppBar, Toolbar, Typography, Tab, Grid, Tabs} from '@material-ui/core'
import {Link, Switch, BrowserRouter, Route} from 'react-router-dom'
import {ThemeProvider, createMuiTheme, responsiveFontSizes, withStyles, makeStyles} from '@material-ui/core/styles'
import Routes from '../../router/Router'
import styles from './styles/Layout'
import Navigation from './Navigation'
import Footer from './Footer'
const useStyles = makeStyles(styles)
const makeTheme = createMuiTheme({
palette: {
primary: {
main: '#250A3C'
},
secondary: {
main: '#CFEBEC'
},
error: {
main: '#E86D48'
}
}
})
const Layout = () => {
const classes = useStyles()
const theme = responsiveFontSizes(makeTheme)
return (
<div className={classes.root}>
<ThemeProvider theme={theme}>
<BrowserRouter>
<div className={classes.secondC}>
<Navigation/>
<div className={classes.page}>
<Route path="/" component={ScrollToTop}/>
<Routes/>
</div>
<Footer/>
</div>
</BrowserRouter>
</ThemeProvider>
</div>
)
}
class ScrollToTop extends React.Component {
componentDidUpdate(prevProps) {
if (this.props.location !== prevProps.location) {
window.scrollTo(0, 0)
}
}
render() {
return null
}
}
export default Layout
|
'use strict';
import { cardViewModel } from './cardViewModel.js';
import interact from 'interactjs';
// Represents one of the four open playable cards
class playerOpenCardViewModel extends cardViewModel {
constructor(connection, elementId, isPlayer) {
super(connection, elementId);
this.isPlayer = isPlayer;
if (isPlayer)
this.enableDragStart();
}
setModel(model) {
super.setModel(model);
if (this.isPlayer) {
// Reset css transition animation after card has been dropped on stack
this.element.classList.remove('playingCard');
this.element.classList.add('playableCard');
}
}
// Dragging an open card to one of the stacks will send a signal to the server that the player
// wants to make a move.
enableDragStart() {
function dragStartListener(event) {
var target = event.target;
// Removes css transition on hover while dragging
target.classList.remove('playableCard');
target.classList.add('playingCard');
target.style.opacity = '0.5';
}
function dragMoveListener(event) {
var target = event.target,
// keep the dragged position in the data-x/data-y attributes
x = (parseFloat(target.getAttribute('data-x')) || 0) + event.dx,
y = (parseFloat(target.getAttribute('data-y')) || 0) + event.dy;
// move card by applying transform
target.style.webkitTransform =
target.style.transform =
'translate(' + x + 'px, ' + y + 'px)';
// update the position attributes
target.setAttribute('data-x', x);
target.setAttribute('data-y', y);
}
function dragEndListener(event) {
var target = event.target;
target.style.opacity = '1';
// move back the card when dropped and reset previous recorded delta in x/y
target.style.removeProperty('webkitTransform');
target.style.removeProperty('transform');
target.setAttribute('data-x', 0);
target.setAttribute('data-y', 0);
}
interact(this.element).draggable({
onstart: dragStartListener,
onmove: dragMoveListener,
onend: dragEndListener
});
}
}
export { playerOpenCardViewModel };
|
import factory
from surveys.models import Survey
from users.tests.factories import UserFactory
class SurveyFactory(factory.django.DjangoModelFactory):
class Meta:
model = Survey
title = factory.Faker('catch_phrase')
description = factory.Faker('text')
author = factory.SubFactory(UserFactory)
|
from unittest import mock
import pytest
from h.events import AnnotationEvent
from h.services.annotation_delete import annotation_delete_service_factory
class TestAnnotationDeleteService:
def test_it_marks_the_annotation_as_deleted(
self, svc, pyramid_request, factories, annotation
):
ann = annotation()
svc.delete(ann)
assert ann.deleted
def test_it_updates_the_updated_field(
self, svc, pyramid_request, factories, annotation, datetime
):
ann = annotation()
svc.delete(ann)
assert ann.updated == datetime.utcnow.return_value
def test_it_publishes_a_delete_event(
self, svc, pyramid_request, factories, annotation
):
ann = annotation()
svc.delete(ann)
expected_event = AnnotationEvent(pyramid_request, ann.id, "delete")
actual_event = pyramid_request.notify_after_commit.call_args[0][0]
assert (
expected_event.request,
expected_event.annotation_id,
expected_event.action,
) == (actual_event.request, actual_event.annotation_id, actual_event.action)
def test_it_deletes_all_annotations(
self, svc, pyramid_request, factories, annotation
):
svc.delete = mock.create_autospec(svc.delete, spec_set=True)
anns = [annotation(), annotation()]
svc.delete_annotations(anns)
svc.delete.mock_calls == [mock.call(anns[0]), mock.call(anns[1])]
@pytest.fixture
def annotation(factories):
return lambda factories=factories: factories.Annotation()
@pytest.fixture
def svc(db_session, pyramid_request):
pyramid_request.db = db_session
return annotation_delete_service_factory({}, pyramid_request)
@pytest.fixture
def pyramid_request(pyramid_request):
pyramid_request.notify_after_commit = mock.Mock()
return pyramid_request
@pytest.fixture
def datetime(patch):
return patch("h.services.annotation_delete.datetime")
|
"""
homeassistant.components.notify.pushover
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Pushover platform for notify component.
Configuration:
To use the Pushover notifier you will need to add something like the following
to your config/configuration.yaml
notify:
platform: pushover
api_key: ABCDEFGHJKLMNOPQRSTUVXYZ
user_key: ABCDEFGHJKLMNOPQRSTUVXYZ
Variables:
api_key
*Required
This parameter is optional but should be configured, in order to get an API
key you should go to https://pushover.net and register a new application.
This is a quote from the pushover website regarding free/open source apps:
"If you are creating a client-side library, application, or open source project
that will be redistributed and installed by end-users, you may want to require
each of your users to register their own application rather than including your
own API token with the software."
When setting up the application I recommend using the icon located here:
https://home-assistant.io/images/favicon-192x192.png
user_key
*Required
To retrieve this value log into your account at https://pushover.net
"""
import logging
from homeassistant.helpers import validate_config
from homeassistant.components.notify import (
DOMAIN, ATTR_TITLE, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-variable
def get_service(hass, config):
""" Get the pushover notification service. """
if not validate_config(config,
{DOMAIN: ['user_key', CONF_API_KEY]},
_LOGGER):
return None
try:
# pylint: disable=no-name-in-module, unused-variable
from pushover import InitError
except ImportError:
_LOGGER.exception(
"Unable to import pushover. "
"Did you maybe not install the 'python-pushover.py' package?")
return None
try:
api_token = config[DOMAIN].get(CONF_API_KEY)
return PushoverNotificationService(
config[DOMAIN]['user_key'],
api_token)
except InitError:
_LOGGER.error(
"Wrong API key supplied. "
"Get it at https://pushover.net")
# pylint: disable=too-few-public-methods
class PushoverNotificationService(BaseNotificationService):
""" Implements notification service for Pushover. """
def __init__(self, user_key, api_token):
# pylint: disable=no-name-in-module, unused-variable
from pushover import Client
self._user_key = user_key
self._api_token = api_token
self.pushover = Client(
self._user_key, api_token=self._api_token)
def send_message(self, message="", **kwargs):
""" Send a message to a user. """
# pylint: disable=no-name-in-module
from pushover import RequestError
title = kwargs.get(ATTR_TITLE)
try:
self.pushover.send_message(message, title=title)
except RequestError:
_LOGGER.exception("Could not send pushover notification")
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Integration test for list_env.py
GOOGLE_APPLICATION_CREDENTIALS must be set to a Service Account for a project
that has enabled the Monitoring API.
Currently the TEST_PROJECT_ID is hard-coded to run using the project created
for this test, but it could be changed to a different project.
"""
import os
import re
import googleapiclient.discovery
import pytest
import list_resources
PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
METRIC = 'compute.googleapis.com/instance/cpu/usage_time'
@pytest.fixture(scope='module')
def client():
return googleapiclient.discovery.build('monitoring', 'v3')
@pytest.mark.flaky
def test_list_monitored_resources(client, capsys):
PROJECT_RESOURCE = "projects/{}".format(PROJECT)
list_resources.list_monitored_resource_descriptors(
client, PROJECT_RESOURCE)
stdout, _ = capsys.readouterr()
regex = re.compile(
'An application running', re.I)
assert regex.search(stdout) is not None
@pytest.mark.flaky
def test_list_metrics(client, capsys):
PROJECT_RESOURCE = "projects/{}".format(PROJECT)
list_resources.list_metric_descriptors(
client, PROJECT_RESOURCE, METRIC)
stdout, _ = capsys.readouterr()
regex = re.compile(
u'Delta', re.I)
assert regex.search(stdout) is not None
@pytest.mark.flaky
def test_list_timeseries(client, capsys):
PROJECT_RESOURCE = "projects/{}".format(PROJECT)
list_resources.list_timeseries(
client, PROJECT_RESOURCE, METRIC)
stdout, _ = capsys.readouterr()
regex = re.compile(u'list_timeseries response:\n', re.I)
assert regex.search(stdout) is not None
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2DaemonSetStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'collision_count': 'int',
'conditions': 'list[V1beta2DaemonSetCondition]',
'current_number_scheduled': 'int',
'desired_number_scheduled': 'int',
'number_available': 'int',
'number_misscheduled': 'int',
'number_ready': 'int',
'number_unavailable': 'int',
'observed_generation': 'int',
'updated_number_scheduled': 'int'
}
attribute_map = {
'collision_count': 'collisionCount',
'conditions': 'conditions',
'current_number_scheduled': 'currentNumberScheduled',
'desired_number_scheduled': 'desiredNumberScheduled',
'number_available': 'numberAvailable',
'number_misscheduled': 'numberMisscheduled',
'number_ready': 'numberReady',
'number_unavailable': 'numberUnavailable',
'observed_generation': 'observedGeneration',
'updated_number_scheduled': 'updatedNumberScheduled'
}
def __init__(self, collision_count=None, conditions=None, current_number_scheduled=None, desired_number_scheduled=None, number_available=None, number_misscheduled=None, number_ready=None, number_unavailable=None, observed_generation=None, updated_number_scheduled=None):
"""
V1beta2DaemonSetStatus - a model defined in Swagger
"""
self._collision_count = None
self._conditions = None
self._current_number_scheduled = None
self._desired_number_scheduled = None
self._number_available = None
self._number_misscheduled = None
self._number_ready = None
self._number_unavailable = None
self._observed_generation = None
self._updated_number_scheduled = None
self.discriminator = None
if collision_count is not None:
self.collision_count = collision_count
if conditions is not None:
self.conditions = conditions
self.current_number_scheduled = current_number_scheduled
self.desired_number_scheduled = desired_number_scheduled
if number_available is not None:
self.number_available = number_available
self.number_misscheduled = number_misscheduled
self.number_ready = number_ready
if number_unavailable is not None:
self.number_unavailable = number_unavailable
if observed_generation is not None:
self.observed_generation = observed_generation
if updated_number_scheduled is not None:
self.updated_number_scheduled = updated_number_scheduled
@property
def collision_count(self):
"""
Gets the collision_count of this V1beta2DaemonSetStatus.
Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.
:return: The collision_count of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._collision_count
@collision_count.setter
def collision_count(self, collision_count):
"""
Sets the collision_count of this V1beta2DaemonSetStatus.
Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.
:param collision_count: The collision_count of this V1beta2DaemonSetStatus.
:type: int
"""
self._collision_count = collision_count
@property
def conditions(self):
"""
Gets the conditions of this V1beta2DaemonSetStatus.
Represents the latest available observations of a DaemonSet's current state.
:return: The conditions of this V1beta2DaemonSetStatus.
:rtype: list[V1beta2DaemonSetCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""
Sets the conditions of this V1beta2DaemonSetStatus.
Represents the latest available observations of a DaemonSet's current state.
:param conditions: The conditions of this V1beta2DaemonSetStatus.
:type: list[V1beta2DaemonSetCondition]
"""
self._conditions = conditions
@property
def current_number_scheduled(self):
"""
Gets the current_number_scheduled of this V1beta2DaemonSetStatus.
The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:return: The current_number_scheduled of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._current_number_scheduled
@current_number_scheduled.setter
def current_number_scheduled(self, current_number_scheduled):
"""
Sets the current_number_scheduled of this V1beta2DaemonSetStatus.
The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:param current_number_scheduled: The current_number_scheduled of this V1beta2DaemonSetStatus.
:type: int
"""
if current_number_scheduled is None:
raise ValueError("Invalid value for `current_number_scheduled`, must not be `None`")
self._current_number_scheduled = current_number_scheduled
@property
def desired_number_scheduled(self):
"""
Gets the desired_number_scheduled of this V1beta2DaemonSetStatus.
The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:return: The desired_number_scheduled of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._desired_number_scheduled
@desired_number_scheduled.setter
def desired_number_scheduled(self, desired_number_scheduled):
"""
Sets the desired_number_scheduled of this V1beta2DaemonSetStatus.
The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:param desired_number_scheduled: The desired_number_scheduled of this V1beta2DaemonSetStatus.
:type: int
"""
if desired_number_scheduled is None:
raise ValueError("Invalid value for `desired_number_scheduled`, must not be `None`")
self._desired_number_scheduled = desired_number_scheduled
@property
def number_available(self):
"""
Gets the number_available of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)
:return: The number_available of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._number_available
@number_available.setter
def number_available(self, number_available):
"""
Sets the number_available of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)
:param number_available: The number_available of this V1beta2DaemonSetStatus.
:type: int
"""
self._number_available = number_available
@property
def number_misscheduled(self):
"""
Gets the number_misscheduled of this V1beta2DaemonSetStatus.
The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:return: The number_misscheduled of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._number_misscheduled
@number_misscheduled.setter
def number_misscheduled(self, number_misscheduled):
"""
Sets the number_misscheduled of this V1beta2DaemonSetStatus.
The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:param number_misscheduled: The number_misscheduled of this V1beta2DaemonSetStatus.
:type: int
"""
if number_misscheduled is None:
raise ValueError("Invalid value for `number_misscheduled`, must not be `None`")
self._number_misscheduled = number_misscheduled
@property
def number_ready(self):
"""
Gets the number_ready of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.
:return: The number_ready of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._number_ready
@number_ready.setter
def number_ready(self, number_ready):
"""
Sets the number_ready of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.
:param number_ready: The number_ready of this V1beta2DaemonSetStatus.
:type: int
"""
if number_ready is None:
raise ValueError("Invalid value for `number_ready`, must not be `None`")
self._number_ready = number_ready
@property
def number_unavailable(self):
"""
Gets the number_unavailable of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)
:return: The number_unavailable of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._number_unavailable
@number_unavailable.setter
def number_unavailable(self, number_unavailable):
"""
Sets the number_unavailable of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)
:param number_unavailable: The number_unavailable of this V1beta2DaemonSetStatus.
:type: int
"""
self._number_unavailable = number_unavailable
@property
def observed_generation(self):
"""
Gets the observed_generation of this V1beta2DaemonSetStatus.
The most recent generation observed by the daemon set controller.
:return: The observed_generation of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""
Sets the observed_generation of this V1beta2DaemonSetStatus.
The most recent generation observed by the daemon set controller.
:param observed_generation: The observed_generation of this V1beta2DaemonSetStatus.
:type: int
"""
self._observed_generation = observed_generation
@property
def updated_number_scheduled(self):
"""
Gets the updated_number_scheduled of this V1beta2DaemonSetStatus.
The total number of nodes that are running updated daemon pod
:return: The updated_number_scheduled of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._updated_number_scheduled
@updated_number_scheduled.setter
def updated_number_scheduled(self, updated_number_scheduled):
"""
Sets the updated_number_scheduled of this V1beta2DaemonSetStatus.
The total number of nodes that are running updated daemon pod
:param updated_number_scheduled: The updated_number_scheduled of this V1beta2DaemonSetStatus.
:type: int
"""
self._updated_number_scheduled = updated_number_scheduled
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2DaemonSetStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
/*!
=========================================================
* BLK Design System React - v1.2.0
=========================================================
* Product Page: https://www.creative-tim.com/product/blk-design-system-react
* Copyright 2020 Creative Tim (https://www.creative-tim.com)
* Licensed under MIT (https://github.com/creativetimofficial/blk-design-system-react/blob/main/LICENSE.md)
* Coded by Creative Tim
=========================================================
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
*/
import React, { useState} from "react";
// core components
import IndexNavbar from "components/Navbars/IndexNavbar.js";
import PageHeader from "components/PageHeader/PageHeader.js";
// import Footer from "components/Footer/Footer.js";
// sections for this page/view
// import Basics from "views/IndexSections/Basics.js";
// import Navbars from "views/IndexSections/Navbars.js";
// import Tabs from "views/IndexSections/Tabs.js";
// import Pagination from "views/IndexSections/Pagination.js";
// import Notifications from "views/IndexSections/Notifications.js";
// import Typography from "views/IndexSections/Typography.js";
// import JavaScript from "views/IndexSections/JavaScript.js";
// import NucleoIcons from "views/IndexSections/NucleoIcons.js";
// import Signup from "views/IndexSections/Signup.js";
// import Examples from "views/IndexSections/Examples.js";
// import Download from "views/IndexSections/Download.js";
import LandingPage from "views/examples/LandingPage";
import { Web3Context } from "Context/Web3Context";
export default function Index() {
const [web3Context, setWeb3Context] = useState({})
React.useEffect(() => {
document.body.classList.toggle("index-page");
// Specify how to clean up after this effect:
return function cleanup() {
document.body.classList.toggle("index-page");
};
},[]);
return (
<>
<IndexNavbar />
<div className="wrapper">
<PageHeader />
<div className="main">
{/* <Basics /> */}
{/* <Navbars /> */}
{/* <Tabs />
<Pagination />
<Notifications />
<Typography />
<JavaScript />
<NucleoIcons />
<Signup />
<Examples />
<Download /> */}
<LandingPage></LandingPage>
</div>
{/* <Footer /> */}
</div>
</>
);
}
|
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from fixture.session import SessionHelper
from fixture.mk import MkHelper
from fixture.cas import CasHelper
class Application:
def __init__(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
self.session = SessionHelper(self)
self.mk = MkHelper(self)
self.cas = CasHelper(self)
def open_home_page(self):
wd = self.wd
wd.get("https://new.kyivstar.ua/ecare/")
wd.maximize_window()
def destroy(self):
self.wd.quit()
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
|
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load_module, reload_module, get_stdout, check_source
from functools import reduce
import os
exercise = 'src.same_characters'
@points('4.same_characters')
class SameCharactersTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', side_effect=["2"] * 10):
cls.module = load_module(exercise, 'en')
def test_0_main_program_ok(self):
ok, line = check_source(self.module)
message = """The code for testing the functions should be placed inside
if __name__ == "__main__":
block. The following row should be moved:
"""
self.assertTrue(ok, message+line)
def test_2_function_exists(self):
try:
from src.same_characters import same_chars
except:
self.assertTrue(False, f'Your code should contain function named as same_chars')
def test_3_function_can_be_called1(self):
try:
from src.same_characters import same_chars
same_chars("coder", 1, 10)
except:
self.assertTrue(False, f'Make sure, that function same_chars can be called as follows:\nsame_chars("coder", 1, 10)')
try:
from src.same_characters import same_chars
same_chars("coder", 10, 1)
except:
self.assertTrue(False, f'Make sure, that function same_chars can be called as follows:\nsame_chars("coder", 10, 1)')
if __name__ == '__main__':
unittest.main()
|
const less = require('less')
function loader (source) {
let css
less.render(source, function (err, r) {
css = r.css
})
return css
}
module.exports = loader
|
import UserSidebar from './Component';
import { connect } from 'react-redux';
import { compose } from 'redux';
const mapStateToProps = state => ({
web3: state.web3,
tartarusAddress: state.tartarus.tartarusAddress,
userSettings: state.user.userSettings,
username: state.user.username
});
const enhance = compose(connect(mapStateToProps));
const UserSidebarContainer = enhance(UserSidebar);
export default UserSidebarContainer;
|
const { DataTypes, Model } = require('sequelize');
const sequelize = require('../common/database');
class User extends Model {}
User.init(
{
id: {
type: DataTypes.UUID,
defaultValue: DataTypes.UUIDV4,
primaryKey: true,
},
username: {
type: DataTypes.STRING,
unique: true,
},
password: {
type: DataTypes.STRING,
allowNull: false,
},
accessToken: {
type: DataTypes.STRING,
defaultValue: '',
},
email: {
type: DataTypes.STRING,
},
prefix: {
type: DataTypes.STRING,
unique: true,
},
isAdmin: {
type: DataTypes.BOOLEAN,
defaultValue: false,
},
isBlocked: {
type: DataTypes.BOOLEAN,
defaultValue: false,
},
defaultMethod:{
type: DataTypes.STRING,
defaultValue: 'test'
},
// WeChat public account
wechatAppId: DataTypes.STRING,
wechatAppSecret: DataTypes.STRING,
wechatTemplateId: DataTypes.STRING,
wechatOpenId: DataTypes.STRING,
wechatVerifyToken: DataTypes.STRING,
// Email
smtpServer: {
type: DataTypes.STRING,
defaultValue: 'smtp.qq.com',
},
smtpUser: DataTypes.STRING,
smtpPass: DataTypes.STRING,
// WeChat corporation
corpId: DataTypes.STRING,
corpAgentId: DataTypes.STRING,
corpAppSecret: DataTypes.STRING,
corpUserId: DataTypes.STRING
},
{ sequelize }
);
module.exports = User;
|
// Set up MySQL connection.
var mysql = require("mysql");
var connection = mysql.createConnection({
host: "l6slz5o3eduzatkw.cbetxkdyhwsb.us-east-1.rds.amazonaws.com",
port: 3306,
user: "zd8r0td4y9w7ftbs",
password: "gz69st69ufa260i1",
database: "x66ijdt1fgvvj992"
});
// Make connection.
connection.connect(function(err) {
if (err) {
console.error("error connecting: " + err.stack);
return;
}
console.log("connected as id " + connection.threadId);
});
// Export connection for our ORM to use.
module.exports = connection;
|
var Hoptoad = {
VERSION : '2.0',
NOTICE_XML : '<?xml version="1.0" encoding="UTF-8"?>\
<notice version="2.0">\
<api-key></api-key>\
<notifier>\
<name>errbit_notifier_js</name>\
<version>2.0</version>\
<url>https://github.com/errbit/errbit</url>\
</notifier>\
<error>\
<class>EXCEPTION_CLASS</class>\
<message>EXCEPTION_MESSAGE</message>\
<backtrace>BACKTRACE_LINES</backtrace>\
</error>\
<request>\
<url>REQUEST_URL</url>\
<component>REQUEST_COMPONENT</component>\
<action>REQUEST_ACTION</action>\
</request>\
<server-environment>\
<project-root>PROJECT_ROOT</project-root>\
<environment-name>production</environment-name>\
</server-environment>\
</notice>',
ROOT : window.location.protocol + '//' + window.location.host,
BACKTRACE_MATCHER : /^(.*)\@(.*)\:(\d+)$/,
backtrace_filters : [/notifier\.js/],
notify: function(error) {
var xml = escape(Hoptoad.generateXML(error));
var host = Hoptoad.host;
var url = '//' + host + '/notifier_api/v2/notices.xml?data=' + xml;
var request = document.createElement('iframe');
request.style.width = '1px';
request.style.height = '1px';
request.style.display = 'none';
request.src = url;
document.getElementsByTagName('head')[0].appendChild(request);
},
setEnvironment: function(value) {
var matcher = /<environment-name>.*<\/environment-name>/;
Hoptoad.NOTICE_XML = Hoptoad.NOTICE_XML.replace(matcher,
'<environment-name>' +
value +
'</environment-name>')
},
setHost: function(value) {
Hoptoad.host = value;
},
setKey: function(value) {
var matcher = /<api-key>.*<\/api-key>/;
Hoptoad.NOTICE_XML = Hoptoad.NOTICE_XML.replace(matcher,
'<api-key>' +
value +
'</api-key>');
},
setErrorDefaults: function(value) {
Hoptoad.errorDefaults = value;
},
generateXML: function(errorWithoutDefaults) {
var error = Hoptoad.mergeDefault(Hoptoad.errorDefaults, errorWithoutDefaults);
var xml = Hoptoad.NOTICE_XML;
var url = Hoptoad.escapeText(error.url || '');
var component = Hoptoad.escapeText(error.component || '');
var action = Hoptoad.escapeText(error.action || '');
var type = Hoptoad.escapeText(error.type || 'Error');
var message = Hoptoad.escapeText(error.message || 'Unknown error.');
var backtrace = Hoptoad.generateBacktrace(error);
if (Hoptoad.trim(url) == '' && Hoptoad.trim(component) == '') {
xml = xml.replace(/<request>.*<\/request>/, '');
} else {
var data = '';
var cgi_data = error['cgi-data'] || {};
cgi_data["HTTP_USER_AGENT"] = navigator.userAgent;
data += '<cgi-data>';
data += Hoptoad.generateVariables(cgi_data);
data += '</cgi-data>';
var methods = ['params', 'session'];
for (var i = 0; i < methods.length; i++) {
var method = methods[i];
if (error[method]) {
data += '<' + method + '>';
data += Hoptoad.generateVariables(error[method]);
data += '</' + method + '>';
}
}
xml = xml.replace('</request>', data + '</request>')
.replace('REQUEST_URL', url)
.replace('REQUEST_ACTION', action)
.replace('REQUEST_COMPONENT', component);
}
return xml.replace('PROJECT_ROOT', Hoptoad.ROOT)
.replace('EXCEPTION_CLASS', type)
.replace('EXCEPTION_MESSAGE', message)
.replace('BACKTRACE_LINES', backtrace.join(''));
},
generateBacktrace: function(error) {
error = error || {};
if (typeof error.stack != 'string') {
try {
(0)();
} catch(e) {
error.stack = e.stack;
}
}
var backtrace = [];
var stacktrace = Hoptoad.getStackTrace(error);
for (var i = 0, l = stacktrace.length; i < l; i++) {
var line = stacktrace[i];
var matches = line.match(Hoptoad.BACKTRACE_MATCHER);
if (matches && Hoptoad.validBacktraceLine(line)) {
var file = matches[2].replace(Hoptoad.ROOT, '[PROJECT_ROOT]');
if (i == 0) {
if (matches[2].match(document.location.href)) {
backtrace.push('<line method="" file="internal: " number=""/>');
}
}
backtrace.push('<line method="' + Hoptoad.escapeText(matches[1]) +
'" file="' + Hoptoad.escapeText(file) +
'" number="' + matches[3] + '" />');
}
}
return backtrace;
},
getStackTrace: function(error) {
var stacktrace = printStackTrace({ e : error, guess : false });
for (var i = 0, l = stacktrace.length; i < l; i++) {
if (stacktrace[i].match(/\:\d+$/)) {
continue;
}
if (stacktrace[i].indexOf('@') == -1) {
stacktrace[i] += '@unsupported.js';
}
stacktrace[i] += ':0';
}
return stacktrace;
},
validBacktraceLine: function(line) {
for (var i = 0; i < Hoptoad.backtrace_filters.length; i++) {
if (line.match(Hoptoad.backtrace_filters[i])) {
return false;
}
}
return true;
},
generateVariables: function(parameters) {
var key;
var result = '';
for (key in parameters) {
result += '<var key="' + Hoptoad.escapeText(key) + '">' +
Hoptoad.escapeText(parameters[key]) +
'</var>';
}
return result;
},
escapeText: function(text) {
return text.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/'/g, ''')
.replace(/"/g, '"');
},
trim: function(text) {
return text.toString().replace(/^\s+/, '').replace(/\s+$/, '');
},
mergeDefault: function(defaults, hash) {
var cloned = {};
var key;
for (key in hash) {
cloned[key] = hash[key];
}
for (key in defaults) {
if (!cloned.hasOwnProperty(key)) {
cloned[key] = defaults[key];
}
}
return cloned;
}
};
// From: http://stacktracejs.com/
//
// Domain Public by Eric Wendelin http://eriwen.com/ (2008)
// Luke Smith http://lucassmith.name/ (2008)
// Loic Dachary <loic@dachary.org> (2008)
// Johan Euphrosine <proppy@aminche.com> (2008)
// Oyvind Sean Kinsey http://kinsey.no/blog (2010)
// Victor Homyakov <victor-homyakov@users.sourceforge.net> (2010)
function printStackTrace(a){var a=a||{guess:!0},b=a.e||null,a=!!a.guess,d=new printStackTrace.implementation,b=d.run(b);return a?d.guessAnonymousFunctions(b):b}printStackTrace.implementation=function(){};
printStackTrace.implementation.prototype={run:function(a,b){a=a||this.createException();b=b||this.mode(a);return"other"===b?this.other(arguments.callee):this[b](a)},createException:function(){try{this.undef()}catch(a){return a}},mode:function(a){return a.arguments&&a.stack?"chrome":a.stack&&a.sourceURL?"safari":"string"===typeof a.message&&"undefined"!==typeof window&&window.opera?!a.stacktrace||-1<a.message.indexOf("\n")&&a.message.split("\n").length>a.stacktrace.split("\n").length?"opera9":!a.stack?
"opera10a":0>a.stacktrace.indexOf("called from line")?"opera10b":"opera11":a.stack?"firefox":"other"},instrumentFunction:function(a,b,d){var a=a||window,c=a[b];a[b]=function(){d.call(this,printStackTrace().slice(4));return a[b]._instrumented.apply(this,arguments)};a[b]._instrumented=c},deinstrumentFunction:function(a,b){a[b].constructor===Function&&(a[b]._instrumented&&a[b]._instrumented.constructor===Function)&&(a[b]=a[b]._instrumented)},chrome:function(a){a=(a.stack+"\n").replace(/^\S[^\(]+?[\n$]/gm,
"").replace(/^\s+(at eval )?at\s+/gm,"").replace(/^([^\(]+?)([\n$])/gm,"{anonymous}()@$1$2").replace(/^Object.<anonymous>\s*\(([^\)]+)\)/gm,"{anonymous}()@$1").split("\n");a.pop();return a},safari:function(a){return a.stack.replace(/\[native code\]\n/m,"").replace(/^@/gm,"{anonymous}()@").split("\n")},firefox:function(a){return a.stack.replace(/(?:\n@:0)?\s+$/m,"").replace(/^[\(@]/gm,"{anonymous}()@").split("\n")},opera11:function(a){for(var b=/^.*line (\d+), column (\d+)(?: in (.+))? in (\S+):$/,
a=a.stacktrace.split("\n"),d=[],c=0,f=a.length;c<f;c+=2){var e=b.exec(a[c]);if(e){var g=e[4]+":"+e[1]+":"+e[2],e=e[3]||"global code",e=e.replace(/<anonymous function: (\S+)>/,"$1").replace(/<anonymous function>/,"{anonymous}");d.push(e+"@"+g+" -- "+a[c+1].replace(/^\s+/,""))}}return d},opera10b:function(a){for(var b=/^(.*)@(.+):(\d+)$/,a=a.stacktrace.split("\n"),d=[],c=0,f=a.length;c<f;c++){var e=b.exec(a[c]);e&&d.push((e[1]?e[1]+"()":"global code")+"@"+e[2]+":"+e[3])}return d},opera10a:function(a){for(var b=
/Line (\d+).*script (?:in )?(\S+)(?:: In function (\S+))?$/i,a=a.stacktrace.split("\n"),d=[],c=0,f=a.length;c<f;c+=2){var e=b.exec(a[c]);e&&d.push((e[3]||"{anonymous}")+"()@"+e[2]+":"+e[1]+" -- "+a[c+1].replace(/^\s+/,""))}return d},opera9:function(a){for(var b=/Line (\d+).*script (?:in )?(\S+)/i,a=a.message.split("\n"),d=[],c=2,f=a.length;c<f;c+=2){var e=b.exec(a[c]);e&&d.push("{anonymous}()@"+e[2]+":"+e[1]+" -- "+a[c+1].replace(/^\s+/,""))}return d},other:function(a){for(var b=/function\s*([\w\-$]+)?\s*\(/i,
d=[],c,f;a&&a.arguments&&10>d.length;)c=b.test(a.toString())?RegExp.$1||"{anonymous}":"{anonymous}",f=Array.prototype.slice.call(a.arguments||[]),d[d.length]=c+"("+this.stringifyArguments(f)+")",a=a.caller;return d},stringifyArguments:function(a){for(var b=[],d=Array.prototype.slice,c=0;c<a.length;++c){var f=a[c];void 0===f?b[c]="undefined":null===f?b[c]="null":f.constructor&&(f.constructor===Array?b[c]=3>f.length?"["+this.stringifyArguments(f)+"]":"["+this.stringifyArguments(d.call(f,0,1))+"..."+
this.stringifyArguments(d.call(f,-1))+"]":f.constructor===Object?b[c]="#object":f.constructor===Function?b[c]="#function":f.constructor===String?b[c]='"'+f+'"':f.constructor===Number&&(b[c]=f))}return b.join(",")},sourceCache:{},ajax:function(a){var b=this.createXMLHTTPObject();if(b)try{return b.open("GET",a,!1),b.send(null),b.responseText}catch(d){}return""},createXMLHTTPObject:function(){for(var a,b=[function(){return new XMLHttpRequest},function(){return new ActiveXObject("Msxml2.XMLHTTP")},function(){return new ActiveXObject("Msxml3.XMLHTTP")},
function(){return new ActiveXObject("Microsoft.XMLHTTP")}],d=0;d<b.length;d++)try{return a=b[d](),this.createXMLHTTPObject=b[d],a}catch(c){}},isSameDomain:function(a){return"undefined"!==typeof location&&-1!==a.indexOf(location.hostname)},getSource:function(a){a in this.sourceCache||(this.sourceCache[a]=this.ajax(a).split("\n"));return this.sourceCache[a]},guessAnonymousFunctions:function(a){for(var b=0;b<a.length;++b){var d=/^(.*?)(?::(\d+))(?::(\d+))?(?: -- .+)?$/,c=a[b],f=/\{anonymous\}\(.*\)@(.*)/.exec(c);
if(f){var e=d.exec(f[1]);e&&(d=e[1],f=e[2],e=e[3]||0,d&&(this.isSameDomain(d)&&f)&&(d=this.guessAnonymousFunction(d,f,e),a[b]=c.replace("{anonymous}",d)))}}return a},guessAnonymousFunction:function(a,b){var d;try{d=this.findFunctionName(this.getSource(a),b)}catch(c){d="getSource failed with url: "+a+", exception: "+c.toString()}return d},findFunctionName:function(a,b){for(var d=/function\s+([^(]*?)\s*\(([^)]*)\)/,c=/['"]?([0-9A-Za-z_]+)['"]?\s*[:=]\s*function\b/,f=/['"]?([0-9A-Za-z_]+)['"]?\s*[:=]\s*(?:eval|new Function)\b/,
e="",g,j=Math.min(b,20),h,i=0;i<j;++i)if(g=a[b-i-1],h=g.indexOf("//"),0<=h&&(g=g.substr(0,h)),g)if(e=g+e,(g=c.exec(e))&&g[1]||(g=d.exec(e))&&g[1]||(g=f.exec(e))&&g[1])return g[1];return"(?)"}};
window.onerror = function(message, file, line) {
setTimeout(function() {
Hoptoad.notify({
message : message,
stack : '()@' + file + ':' + line,
url : document.location.href
});
}, 100);
return true;
};
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 12);
|
!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e(require("@angular/core"),require("tslib"),require("rxjs"),require("@progress/kendo-angular-l10n"),require("rxjs/operators"),require("@angular/common"),require("@angular/animations")):"function"==typeof define&&define.amd?define(["@angular/core","tslib","rxjs","@progress/kendo-angular-l10n","rxjs/operators","@angular/common","@angular/animations"],e):"object"==typeof exports?exports.KendoAngularToolbar=e(require("@angular/core"),require("tslib"),require("rxjs"),require("@progress/kendo-angular-l10n"),require("rxjs/operators"),require("@angular/common"),require("@angular/animations")):t.KendoAngularToolbar=e(t["@angular/core"],t.tslib,t.rxjs,t["@progress/kendo-angular-l10n"],t["rxjs/operators"],t["@angular/common"],t["@angular/animations"])}(window,function(t,e,n,o,i,r,s){return function(t){var e={};function n(o){if(e[o])return e[o].exports;var i=e[o]={i:o,l:!1,exports:{}};return t[o].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=t,n.c=e,n.d=function(t,e,o){n.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:o})},n.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},n.t=function(t,e){if(1&e&&(t=n(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var o=Object.create(null);if(n.r(o),Object.defineProperty(o,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var i in t)n.d(o,i,function(e){return t[e]}.bind(null,i));return o},n.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return n.d(e,"a",e),e},n.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},n.p="",n(n.s=23)}([function(e,n){e.exports=t},function(t,e,n){"use strict";n.r(e);var o=n(0),i=function(t,e){return function(n){return e(t(n))}},r=function(t,e,n){return t.addEventListener&&t.addEventListener(e,n)},s=function(t,e,n){return t&&t.removeEventListener&&t.removeEventListener(e,n)},a=function(){},p=function(t){return t.preventDefault()},u=/touch/;function c(t){return t.type.match(u)?{pageX:t.changedTouches[0].pageX,pageY:t.changedTouches[0].pageY,clientX:t.changedTouches[0].clientX,clientY:t.changedTouches[0].clientY,type:t.type,originalEvent:t,isTouch:!0}:{pageX:t.pageX,pageY:t.pageY,clientX:t.clientX,clientY:t.clientY,offsetX:t.offsetX,offsetY:t.offsetY,type:t.type,ctrlKey:t.ctrlKey,shiftKey:t.shiftKey,altKey:t.altKey,originalEvent:t}}var l=function(t){var e=this,n=t.press;void 0===n&&(n=a);var o=t.drag;void 0===o&&(o=a);var u=t.release;void 0===u&&(u=a);var l=t.mouseOnly;void 0===l&&(l=!1),this._pressHandler=i(c,n),this._dragHandler=i(c,o),this._releaseHandler=i(c,u),this._ignoreMouse=!1,this._mouseOnly=l,this._touchstart=function(t){1===t.touches.length&&e._pressHandler(t)},this._touchmove=function(t){1===t.touches.length&&e._dragHandler(t)},this._touchend=function(t){0===t.touches.length&&1===t.changedTouches.length&&(e._releaseHandler(t),e._ignoreMouse=!0,setTimeout(e._restoreMouse,2e3))},this._restoreMouse=function(){e._ignoreMouse=!1},this._mousedown=function(t){var n=t.which;n&&n>1||e._ignoreMouse||(r(document,"mousemove",e._mousemove),r(document,"mouseup",e._mouseup),e._pressHandler(t))},this._mousemove=function(t){e._dragHandler(t)},this._mouseup=function(t){s(document,"mousemove",e._mousemove),s(document,"mouseup",e._mouseup),e._releaseHandler(t)},this._pointerdown=function(t){t.isPrimary&&0===t.button&&(r(document,"pointermove",e._pointermove),r(document,"pointerup",e._pointerup),r(document,"pointercancel",e._pointerup),r(document,"contextmenu",p),e._pressHandler(t))},this._pointermove=function(t){t.isPrimary&&e._dragHandler(t)},this._pointerup=function(t){t.isPrimary&&(s(document,"pointermove",e._pointermove),s(document,"pointerup",e._pointerup),s(document,"pointercancel",e._pointerup),s(document,"contextmenu",p),e._releaseHandler(t))}};l.supportPointerEvent=function(){return"undefined"!=typeof window&&window.PointerEvent},l.prototype.bindTo=function(t){t!==this._element&&(this._element&&this._unbindFromCurrent(),this._element=t,this._bindToCurrent())},l.prototype._bindToCurrent=function(){var t=this._element;this._usePointers()?r(t,"pointerdown",this._pointerdown):(r(t,"mousedown",this._mousedown),this._mouseOnly||(r(t,"touchstart",this._touchstart),r(t,"touchmove",this._touchmove),r(t,"touchend",this._touchend)))},l.prototype._unbindFromCurrent=function(){var t=this._element;if(this._usePointers())return s(t,"pointerdown",this._pointerdown),s(document,"pointermove",this._pointermove),s(document,"pointerup",this._pointerup),s(document,"contextmenu",p),void s(document,"pointercancel",this._pointerup);s(t,"mousedown",this._mousedown),this._mouseOnly||(s(t,"touchstart",this._touchstart),s(t,"touchmove",this._touchmove),s(t,"touchend",this._touchend))},l.prototype._usePointers=function(){return!this._mouseOnly&&l.supportPointerEvent()},l.prototype.destroy=function(){this._unbindFromCurrent(),this._element=null},l.default=l;var d=l,h=n(7),f=n(3),b=n(5);n.d(e,"DraggableDirective",function(){return w}),n.d(e,"DraggableModule",function(){return S}),n.d(e,"EventsOutsideAngularDirective",function(){return k}),n.d(e,"EventsModule",function(){return O}),n.d(e,"ResizeSensorComponent",function(){return T}),n.d(e,"ResizeBatchService",function(){return E}),n.d(e,"ResizeSensorModule",function(){return B}),n.d(e,"KendoInput",function(){return A}),n.d(e,"isDocumentAvailable",function(){return m}),n.d(e,"isChanged",function(){return y}),n.d(e,"anyChanged",function(){return v}),n.d(e,"hasObservers",function(){return _}),n.d(e,"guid",function(){return C}),n.d(e,"Keys",function(){return g});var g,m=function(){return"undefined"!=typeof document},y=function(t,e,n){return void 0===n&&(n=!0),!(void 0===e[t]||e[t].isFirstChange()&&n||e[t].previousValue===e[t].currentValue)},v=function(t,e,n){return void 0===n&&(n=!0),t.some(function(t){return y(t,e,n)})},_=function(t){return t&&t.observers.length>0},C=function(){for(var t="",e=0;e<32;e++){var n=16*Math.random()|0;8!==e&&12!==e&&16!==e&&20!==e||(t+="-"),t+=(12===e?4:16===e?3&n|8:n).toString(16)}return t},w=function(){function t(t,e){this.element=t,this.ngZone=e,this.enableDrag=!0,this.kendoPress=new o.EventEmitter,this.kendoDrag=new o.EventEmitter,this.kendoRelease=new o.EventEmitter}return t.prototype.ngOnInit=function(){this.toggleDraggable()},t.prototype.ngOnChanges=function(t){y("enableDrag",t)&&this.toggleDraggable()},t.prototype.ngOnDestroy=function(){this.destroyDraggable()},t.prototype.toggleDraggable=function(){var t=this;m()&&(this.destroyDraggable(),this.enableDrag&&(this.draggable=new d({drag:function(e){return t.kendoDrag.next(e)},press:function(e){return t.kendoPress.next(e)},release:function(e){return t.kendoRelease.next(e)}}),this.ngZone.runOutsideAngular(function(){return t.draggable.bindTo(t.element.nativeElement)})))},t.prototype.destroyDraggable=function(){this.draggable&&(this.draggable.destroy(),this.draggable=null)},t.decorators=[{type:o.Directive,args:[{selector:"[kendoDraggable]"}]}],t.ctorParameters=function(){return[{type:o.ElementRef},{type:o.NgZone}]},t.propDecorators={enableDrag:[{type:o.Input}],kendoPress:[{type:o.Output}],kendoDrag:[{type:o.Output}],kendoRelease:[{type:o.Output}]},t}(),S=function(){function t(){}return t.decorators=[{type:o.NgModule,args:[{declarations:[w],exports:[w],imports:[h.CommonModule]}]}],t}(),k=function(){function t(t,e,n){this.element=t,this.ngZone=e,this.renderer=n,this.events={}}return t.prototype.ngOnInit=function(){var t=this;if(this.element&&this.element.nativeElement){var e=this.events;this.subscriptions=[],this.ngZone.runOutsideAngular(function(){for(var n in e)e.hasOwnProperty(n)&&t.subscriptions.push(t.renderer.listen(t.element.nativeElement,n,t.scope?e[n].bind(t.scope):e[n]))})}},t.prototype.ngOnDestroy=function(){if(this.subscriptions){for(var t=0;t<this.subscriptions.length;t++)this.subscriptions[t]();this.subscriptions=null}},t.decorators=[{type:o.Directive,args:[{selector:"[kendoEventsOutsideAngular]"}]}],t.ctorParameters=function(){return[{type:o.ElementRef},{type:o.NgZone},{type:o.Renderer2}]},t.propDecorators={events:[{type:o.Input,args:["kendoEventsOutsideAngular"]}],scope:[{type:o.Input}]},t}(),O=function(){function t(){}return t.decorators=[{type:o.NgModule,args:[{declarations:[k],exports:[k]}]}],t}(),E=function(){function t(t){this.ngZone=t,this.scheduled=[],this.resolvedPromise=Promise.resolve(null),this.flush=this.flush.bind(this)}return t.prototype.schedule=function(t,e){var n=this;this.scheduled.push({instance:t,method:e}),this.subscription||this.ngZone.runOutsideAngular(function(){n.subscription=Object(f.from)(n.resolvedPromise).subscribe(n.flush)})},t.prototype.isScheduled=function(t){return Boolean(this.scheduled.find(function(e){return e.instance===t}))},t.prototype.cancel=function(t){for(var e=this.scheduled,n=e.length,o=0;o<n;o++)if(e[o].instance===t)return e.splice(o,1),void(e.length||this.unsubscribe())},t.prototype.ngOnDestroy=function(){this.unsubscribe()},t.prototype.unsubscribe=function(){this.subscription&&(this.subscription.unsubscribe(),this.subscription=null)},t.prototype.flush=function(){this.scheduled.forEach(function(t){t.method.call(t.instance)}),this.scheduled=[],this.unsubscribe()},t.decorators=[{type:o.Injectable}],t.ctorParameters=function(){return[{type:o.NgZone}]},t}(),x=10,P="position: absolute; display: block; left: 0; top: 0; right: 0; bottom: 0; z-index: -1;overflow: hidden; visibility: hidden;",T=function(){function t(t,e,n,i){this.resizeBatchService=t,this.element=e,this.zone=n,this.renderer=i,this.rateLimit=x,this.resize=new o.EventEmitter,this.source=new f.Subject,this.state=0,this.acceptedSize=!1}return t.prototype.ngAfterViewInit=function(){var t=this;this.zone.runOutsideAngular(function(){var e=t.scroll.bind(t),n=t.renderer.listen(t.expand.nativeElement,"scroll",e),o=t.renderer.listen(t.shrink.nativeElement,"scroll",e);t.detachScrollHandlers=function(){n(),o()}})},t.prototype.ngAfterViewChecked=function(){"undefined"!=typeof document&&(2!==this.state?0===this.state&&(this.state=1,this.resizeBatchService.schedule(this,this.init)):this.resizeBatchService.isScheduled(this)||this.resizeBatchService.schedule(this,this.scroll))},t.prototype.ngOnDestroy=function(){this.subscription&&this.subscription.unsubscribe(),this.detachScrollHandlers&&this.detachScrollHandlers(),this.resizeBatchService.cancel(this)},t.prototype.acceptSize=function(t){void 0===t&&(t=this.measure()),this.lastWidth=t.width,this.lastHeight=t.height,this.acceptedSize=!0},t.prototype.scroll=function(t){var e=this;if(this.parentElement){var n=this.measure(),o=n.width,i=n.height;o===this.lastWidth&&i===this.lastHeight||(this.lastWidth=o,this.lastHeight=i,this.acceptedSize=!1,this.zone.runOutsideAngular(function(){e.source.next()}),this.reset())}},t.prototype.init=function(){var t,e,n=this,o=1e3/(this.rateLimit||x);this.subscription=this.source.asObservable().pipe(Object(b.auditTime)(o)).subscribe(function(){n.acceptedSize||n.resize.emit()}),this.parentElement=this.element.nativeElement.parentElement,"static"===(t=this.parentElement,e="position",getComputedStyle(t,null).getPropertyValue(e))&&(this.parentElement.style.position="relative"),this.reset(),this.lastWidth=this.parentElement.offsetWidth,this.lastHeight=this.parentElement.offsetHeight,this.state=2},t.prototype.reset=function(){var t=this.expandChild.nativeElement;t.style.width=1e5+"px",t.style.height=1e5+"px";var e=this.expand.nativeElement;e.scrollLeft=1e5,e.scrollTop=1e5;var n=this.shrink.nativeElement;n.scrollLeft=1e5,n.scrollTop=1e5},t.prototype.measure=function(){var t=0,e=0;return this.parentElement&&(e=this.parentElement.offsetHeight,t=this.parentElement.offsetWidth),{height:e,width:t}},t.decorators=[{type:o.Component,args:[{selector:"kendo-resize-sensor",styles:[":host { "+P+" }"],template:'<div #expand style="'+P+'"> <div #expandChild style="position: absolute; left: 0; top: 0; transition: 0s;"></div></div><div #shrink style="'+P+'"> <div style="position: absolute; left: 0; top: 0; transition: 0s;width: 200%; height: 200%;"></div></div>'}]}],t.ctorParameters=function(){return[{type:E},{type:o.ElementRef},{type:o.NgZone},{type:o.Renderer2}]},t.propDecorators={rateLimit:[{type:o.Input}],resize:[{type:o.Output}],expand:[{type:o.ViewChild,args:["expand"]}],expandChild:[{type:o.ViewChild,args:["expandChild"]}],shrink:[{type:o.ViewChild,args:["shrink"]}]},t}(),I=[T],B=function(){function t(){}return t.decorators=[{type:o.NgModule,args:[{declarations:[I],exports:[I],providers:[E]}]}],t}(),A=function(){return function(){}}();!function(t){t[t.Alt=18]="Alt",t[t.ArrowDown=40]="ArrowDown",t[t.ArrowLeft=37]="ArrowLeft",t[t.ArrowRight=39]="ArrowRight",t[t.ArrowUp=38]="ArrowUp",t[t.Backspace=8]="Backspace",t[t.Control=17]="Control",t[t.Delete=46]="Delete",t[t.End=35]="End",t[t.Enter=13]="Enter",t[t.Escape=27]="Escape",t[t.F1=112]="F1",t[t.F2=113]="F2",t[t.F10=121]="F10",t[t.Home=36]="Home",t[t.Insert=45]="Insert",t[t.PageDown=34]="PageDown",t[t.PageUp=33]="PageUp",t[t.Shift=16]="Shift",t[t.Space=32]="Space",t[t.Tab=9]="Tab",t[t.NumpadDecimal=110]="NumpadDecimal"}(g||(g={}))},function(t,n){t.exports=e},function(t,e){t.exports=n},function(t,e){t.exports=o},function(t,e){t.exports=i},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(17),s=function(){function t(){this.tabIndex=-1,this.overflows=!0,this.navigationService||(this.navigationService=new r.ToolNavigationService)}return Object.defineProperty(t.prototype,"toolbarDisplay",{get:function(){return this.overflows?"none":"inline-block"},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"overflowDisplay",{get:function(){return this.overflows?"block":"none"},enumerable:!0,configurable:!0}),t=o.__decorate([i.Component({selector:"toolbar-tool",template:""}),o.__metadata("design:paramtypes",[])],t)}();e.ToolBarToolComponent=s},function(t,e){t.exports=r},function(t,e,n){"use strict";n.r(e);var o=n(0),i=n(3),r="bottom",s="center",a="middle",p="left",u="right",c="top",l=function(t){var e=t.anchorRect,n=t.anchorAlign,o=t.elementRect,i=t.elementAlign,p=t.margin;void 0===p&&(p={});var c=n.horizontal,l=n.vertical,d=i.horizontal,h=i.vertical,f=p.horizontal||0,b=p.vertical||0,g=e.top,m=e.left;return l===r&&(g+=e.height),l!==s&&l!==a||(g+=Math.round(e.height/2)),h===r&&(g-=o.height,b*=-1),h!==s&&h!==a||(g-=Math.round(o.height/2),b*=-1),c===u&&(m+=e.width),c!==s&&c!==a||(m+=Math.round(e.width/2)),d===u&&(m-=o.width,f*=-1),d!==s&&d!==a||(m-=Math.round(o.width/2),f*=-1),{top:g+b,left:m+f}};function d(t){return t.ownerDocument||t.document||t}var h=function(t){return d(t).defaultView},f=function(t){return d(t).documentElement},b=0;function g(){if(!b&&"undefined"!=typeof document){var t=document.createElement("div");t.style.cssText="overflow:scroll;overflow-x:hidden;zoom:1;clear:both;display:block",t.innerHTML=" ",document.body.appendChild(t),b=t.offsetWidth-t.scrollWidth,document.body.removeChild(t)}return b}function m(t){var e=h(t),n=f(t),o={height:e.innerHeight,width:e.innerWidth};return n.scrollHeight-n.clientHeight>0&&(o.width-=g()),o}var y=function(t){if(!t.getBoundingClientRect){var e=m(t);return{bottom:e.height,left:0,right:e.width,top:0}}var n=t.getBoundingClientRect();return{bottom:n.bottom,left:n.left,right:n.right,top:n.top}},v=function(t){for(var e=t.offsetParent;e&&"static"===e.style.position;)e=e.offsetParent;return e||f(t)},_=function(t){var e=t.getBoundingClientRect(),n=e.left,o=e.top;return e.height||e.width||(e=function(t){var e=t.style,n=e.display,o=e.left,i=e.position;t.style.display="",t.style.left="-10000px",t.style.position="absolute";var r=t.getBoundingClientRect();return t.style.display=n,t.style.left=o,t.style.position=i,r}(t)),{top:o,left:n,height:e.height,width:e.width}},C=function(t,e){for(var n=[],o=t.parentNode;o&&(n.push(o),o!==e);)o=o.parentNode;return n};function w(t){var e=f(t),n=h(t);return{x:n.pageXOffset||e.scrollLeft||0,y:n.pageYOffset||e.scrollTop||0}}var S=function(t){return t===(t.ownerDocument||{}).body?w(t):{x:t.scrollLeft,y:t.scrollTop}};var k=function(t,e){var n=h(t),o=n.getComputedStyle(t),i=_(t),r=e||v(t),s=t.ownerDocument,a=r!==s.body&&r!==s.documentElement,p={top:0,left:0};if("fixed"!==o.position&&a){var u=n.getComputedStyle(r);(p=_(r)).top+=parseInt(u.borderTopWidth,10),p.left+=parseInt(u.borderLeftWidth,10)}return{top:i.top-p.top,left:i.left-p.left,height:i.height,width:i.width}},O=function(t,e){return t?S(t):function(t){var e=v(t);return e?S(e):{x:0,y:0}}(e)};var E="fit",x="flip",P=function(t,e,n){var o=0;return t+e>n&&(o=n-(t+e)),t<0&&(o=-t),o},T=function(t){var e=t.offset,n=t.size,o=t.anchorSize,i=t.viewPortSize,r=t.anchorAlignPoint,u=t.elementAlignPoint,l=0,d=2*t.margin;if(u!==r&&!(u===s||u===a)&&!(r===s||r===a)){var h=r===c||r===p;e<0&&h?e+(l=n+o+d)+n>i&&(l=0):e>=0&&!h&&(e+n>i&&(l+=-(o+n+d)),e+l<0&&(l=0))}return l},I=function(t){var e=t.anchorRect,n=t.anchorAlign,o=t.elementRect,i=t.elementAlign,r=t.collisions,s=t.viewPort,a=t.margin;void 0===a&&(a={});var p=o.top,u=o.left,c=o.height,l=o.width,d=s.height,h=s.width,f=a.horizontal||0,b=a.vertical||0,g=0,m=0,y=r.horizontal===x,v=r.vertical===x;r.vertical===E&&(m+=P(p,c,d)),r.horizontal===E&&(g+=P(u,l,h)),v&&(m+=T({margin:b,offset:p,size:c,anchorSize:e.height,viewPortSize:d,anchorAlignPoint:n.vertical,elementAlignPoint:i.vertical})),y&&(g+=T({margin:f,offset:u,size:l,anchorSize:e.width,viewPortSize:h,anchorAlignPoint:n.horizontal,elementAlignPoint:i.horizontal}));var _=y&&0!==g,C=v&&0!==m;return{flipped:_||C,flip:{horizontal:_,vertical:C},offset:{left:g,top:m}}},B=function(t){for(var e=[],n=t.parentNode.firstElementChild;n;)n!==t&&e.push(n),n=n.nextElementSibling;return e},A=n(1),j=n(5),F=n(10),R=n(7);n.d(e,"AlignService",function(){return $}),n.d(e,"AnimationService",function(){return G}),n.d(e,"DOMService",function(){return W}),n.d(e,"PositionService",function(){return Z}),n.d(e,"ResizeService",function(){return X}),n.d(e,"ScrollableService",function(){return q}),n.d(e,"PopupService",function(){return nt}),n.d(e,"POPUP_CONTAINER",function(){return et}),n.d(e,"PopupComponent",function(){return tt}),n.d(e,"PopupModule",function(){return it}),n.d(e,"SCALE",function(){return K});var D=function(t,e){return t||{height:0,left:e.left,top:e.top,width:0}},N=function(){return"undefined"!=typeof window},z=/auto|scroll/,H=function(t){return function(t){return""+t.style.overflow+t.style.overflowX+t.style.overflowY}(t)||function(t){var e=window.getComputedStyle(t);return""+e.overflow+e.overflowX+e.overflowY}(t)},L=function(){if(!Object(A.isDocumentAvailable)())return!1;var t=document.createElement("div");t.style.transform="matrix(10, 0, 0, 10, 0, 0)",t.innerHTML='<div style="position: fixed; top: 10px;">child</div>',document.body.appendChild(t);var e=10!==t.children[0].getBoundingClientRect().top;return document.body.removeChild(t),e}(),V=function(t,e){if(!t||!Object(A.isDocumentAvailable)()||!N())return null;var n=function(t,e){for(var n,o,i=C(t),r=e;r&&(n=B(r),!(o=i.reduce(function(t,e){return t.concat(n.filter(function(t){return t===e}))},[])[0]));)r=r.parentElement;return o}(t,e);if(!n)return null;var o=[t].concat(C(t,n)).reduce(function(t,e){var n=e.style.zIndex||window.getComputedStyle(e).zIndex,o=parseInt(n,10);return o>t?o:t},0);return o?o+1:null},M=function(t,e){return t&&1!==e?{height:t.height/e,left:t.left/e,top:t.top/e,width:t.width/e}:t},U=["font-size","font-family","font-stretch","font-style","font-weight","line-height"],W=function(){function t(){}return t.prototype.addOffset=function(t,e){return{left:t.left+e.left,top:t.top+e.top}},t.prototype.addScroll=function(t,e){return function(t,e){return{top:t.top+e.y,left:t.left+e.x,height:t.height,width:t.width}}(t,e)},t.prototype.align=function(t){return l(t)},t.prototype.boundingOffset=function(t){return y(this.nativeElement(t))},t.prototype.getFontStyles=function(t){var e=this.getWindow();if(!e||!t)return[];var n=e.getComputedStyle(this.nativeElement(t));return U.map(function(t){return{key:t,value:n[t]}})},t.prototype.getWindow=function(){return N()?window:null},t.prototype.hasOffsetParent=function(t){return!!t&&!!this.nativeElement(t).offsetParent},t.prototype.offset=function(t){return t?_(this.nativeElement(t)):null},t.prototype.offsetAtPoint=function(t,e){if(!t)return null;var n=this.nativeElement(t),o=n.style,i=o.left,r=o.top,s=o.transition;n.style.transition="none",n.style.left=e.left+"px",n.style.top=e.top+"px";var a=_(n);return n.style.left=i,n.style.top=r,n.offsetHeight,n.style.transition=s,a},t.prototype.nativeElement=function(t){return t?t.nativeElement||t:null},t.prototype.position=function(t,e,n){return void 0===n&&(n=1),t&&e?function(t,e,n){void 0===n&&(n=1);var o=e?v(e):null,i=k(t,o),r=i.top,s=i.left,a=i.height,p=i.width,u=O(o,t),c=u.x,l=u.y,d=t.ownerDocument,h=o===d.body||o===d.documentElement?1:n;return{top:r+l*h,left:s+c*h,height:a,width:p}}(this.nativeElement(t),this.nativeElement(e),n):null},t.prototype.removeScroll=function(t,e){return function(t,e){return{top:t.top-e.y,left:t.left-e.x,height:t.height,width:t.width}}(t,e)},t.prototype.restrictToView=function(t){return I(t)},t.prototype.scrollPosition=function(t){return w(this.nativeElement(t))},t.prototype.scrollableParents=function(t){return function(t){var e=[];if(!Object(A.isDocumentAvailable)()||!N())return e;for(var n=t.parentElement;n;)(z.test(H(n))||n.hasAttribute("data-scrollable"))&&e.push(n),n=n.parentElement;return e.push(window),e}(this.nativeElement(t))},t.prototype.stackingElementOffset=function(t){var e=this.getRelativeContextElement(t);return e?_(e):null},t.prototype.stackingElementScroll=function(t){var e=this.getRelativeContextElement(t);return e?{x:e.scrollLeft,y:e.scrollTop}:{x:0,y:0}},t.prototype.getRelativeContextElement=function(t){if(!t||!L)return null;for(var e=this.nativeElement(t).parentElement;e;){if("none"!==window.getComputedStyle(e).transform)return e;e=e.parentElement}return null},t.prototype.useRelativePosition=function(t){return!!this.getRelativeContextElement(t)},t.prototype.windowViewPort=function(t){return m(this.nativeElement(t))},t.prototype.zIndex=function(t,e){return V(this.nativeElement(t),this.nativeElement(e))},t.prototype.zoomLevel=function(){return Object(A.isDocumentAvailable)()&&N()&&parseFloat((document.documentElement.clientWidth/window.innerWidth).toFixed(2))||1},t.prototype.isZoomed=function(){return this.zoomLevel()>1},t.decorators=[{type:o.Injectable}],t}(),K=new o.InjectionToken("Popup Document Scale"),$=function(){function t(t,e){void 0===e&&(e=1),this._dom=t,this.scale=e}return t.prototype.alignElement=function(t){var e=t.anchor,n=t.element,o=t.anchorAlign,i=t.elementAlign,r=t.margin,s=t.offset,a=t.positionMode,p=this.scale||1,u="fixed"===a||!this._dom.hasOffsetParent(n)?this.absoluteRect(e,n,s,p):this.relativeRect(e,n,s,p),c=M(this._dom.offset(n),p);return this._dom.align({anchorAlign:o,anchorRect:u,elementAlign:i,elementRect:c,margin:r})},t.prototype.absoluteRect=function(t,e,n,o){var i=this.elementScrollPosition(t,e),r=D(this._dom.offset(t),n),s=2*o,a=this._dom.stackingElementScroll(e);1!==o&&a&&(a.x/=s,a.y/=s);var p=this._dom.stackingElementOffset(e);return 1!==o&&p&&(p.left/=s,p.top/=s),this._dom.removeScroll(this._dom.addScroll(function(t,e){return e?{height:t.height,left:t.left-e.left,top:t.top-e.top,width:t.width}:t}(M(r,o),p),a),i)},t.prototype.elementScrollPosition=function(t,e){return t?{x:0,y:0}:this._dom.scrollPosition(e)},t.prototype.relativeRect=function(t,e,n,o){var i=D(this._dom.position(t,e,o),n);return M(i,o)},t.decorators=[{type:o.Injectable}],t.ctorParameters=function(){return[{type:W},{type:Number,decorators:[{type:o.Inject,args:[K]},{type:o.Optional}]}]},t}(),Z=function(){function t(t,e){void 0===e&&(e=1),this._dom=t,this.scale=e}return t.prototype.positionElement=function(t){var e=t.anchor,n=t.currentLocation,o=t.element,i=t.anchorAlign,r=t.elementAlign,s=t.collisions,a=t.margin,p=this._dom,u=this.scale||1,c=p.offsetAtPoint(o,n),l=M(c,u),d=M(p.offset(e),u),h=D(d,n),f=t.viewPort||p.windowViewPort(o);f.width=f.width/u,f.height=f.height/u;var b=p.restrictToView({anchorAlign:i,anchorRect:h,collisions:s,elementAlign:r,elementRect:l,margin:a,viewPort:f}),g=p.addOffset(n,b.offset);return{flip:b.flip,flipped:b.flipped,offset:g}},t.decorators=[{type:o.Injectable}],t.ctorParameters=function(){return[{type:W},{type:Number,decorators:[{type:o.Inject,args:[K]},{type:o.Optional}]}]},t}(),X=function(){function t(t,e){this._dom=t,this._zone=e}return t.prototype.subscribe=function(t){var e=this;Object(A.isDocumentAvailable)()&&this._zone.runOutsideAngular(function(){e.subscription=Object(i.fromEvent)(e._dom.getWindow(),"resize").pipe(Object(j.auditTime)(1e3/60)).subscribe(function(){return t()})})},t.prototype.unsubscribe=function(){this.subscription&&this.subscription.unsubscribe()},t.prototype.isUnsubscribed=function(){return this.subscription&&this.subscription.closed},t.decorators=[{type:o.Injectable}],t.ctorParameters=function(){return[{type:W},{type:o.NgZone}]},t}(),q=function(){function t(t,e){this._dom=t,this._zone=e}return t.prototype.forElement=function(t){return this.unsubscribe(),this.element=t,this},t.prototype.subscribe=function(t){var e=this;if(t&&Object(A.isDocumentAvailable)()&&this.element){var n=this._dom.nativeElement(this.element),o=this._dom.scrollableParents(this.element);this._zone.runOutsideAngular(function(){var r=o.map(function(t){return Object(i.fromEvent)(t,"scroll").pipe(Object(j.auditTime)(1e3/60))});e.subscription=i.merge.apply(void 0,r).subscribe(function(i){var r=i.target,s=o.filter(function(t){return t===r}).length>0,a=r===document,p=r===window;(s||a||p)&&t(e.isVisible(n,r))})})}},t.prototype.unsubscribe=function(){this.subscription&&this.subscription.unsubscribe()},t.prototype.isVisible=function(t,e){var n=this._dom.boundingOffset(t),o=this._dom.boundingOffset(e);return!(1<o.top-n.bottom)&&(!(1<n.top-o.bottom)&&(!(1<n.left-o.right)&&!(1<o.left-n.right)))},t.decorators=[{type:o.Injectable}],t.ctorParameters=function(){return[{type:W},{type:o.NgZone}]},t}(),Y={expand:function(t){var e="up"===t||"down"===t?"scaleY":"scaleX";return{start:{transform:e+"(0)",transformOrigin:"down"===t?"top":"left"===t?"right":"right"===t?"left":"bottom"},end:{transform:e+"(1)"}}},slide:function(t){var e="left"===t||"right"===t?"translateX":"translateY";return{start:{transform:e+"("+("right"===t||"down"===t?-100:100)+"%)"},end:{transform:e+"(0%)"}}},fade:function(){return{start:{opacity:0},end:{opacity:1}}},zoom:function(){return{start:{transform:"scale(0)"},end:{transform:"scale(1)"}}}},G=function(){function t(t){this.animationBuilder=t,this.start=new o.EventEmitter,this.end=new o.EventEmitter}return t.prototype.play=function(t,e,n){if(!this.flip||this.flip.horizontal!==n.horizontal||this.flip.vertical!==n.vertical){this.flip=n;var i=e.type||"slide",r=Y[i];if(r){var s=r(this.getDirection(n,e));this.playStates(t,s,e)}else if(Object(o.isDevMode)())throw new Error('Unsupported animation type: "'+i+'". The supported types are slide, expand, fade and zoom.')}},t.prototype.ngOnDestroy=function(){this.stopPlayer()},t.prototype.playStates=function(t,e,n){var o=this;this.stopPlayer();var i=n.duration||100,r=this.animationBuilder.build([Object(F.style)(e.start),Object(F.animate)(i+"ms ease-in",Object(F.style)(e.end))]),s=this.player=r.create(t);s.onDone(function(){o.end.emit(),o.stopPlayer()}),this.start.emit(),s.play()},t.prototype.getDirection=function(t,e){var n=e.direction||"down";return t.horizontal&&("left"===n?n="right":"right"===n&&(n="left")),t.vertical&&("down"===n?n="up":"up"===n&&(n="down")),n},t.prototype.stopPlayer=function(){this.player&&(this.player.destroy(),this.player=null)},t.decorators=[{type:o.Injectable}],t.ctorParameters=function(){return[{type:F.AnimationBuilder}]},t}(),Q={left:-1e4,top:0},J="k-animation-container",tt=function(){function t(t,e,n,i,r,s,a,p,u){this.container=t,this._alignService=e,this.domService=n,this._positionService=i,this._resizeService=r,this._scrollableService=s,this.animationService=a,this._renderer=p,this._zone=u,this.animate=!0,this.anchorAlign={horizontal:"left",vertical:"bottom"},this.collision={horizontal:"fit",vertical:"flip"},this.popupAlign={horizontal:"left",vertical:"top"},this.copyAnchorStyles=!1,this.positionMode="fixed",this.offset=Q,this.anchorViewportLeave=new o.EventEmitter,this.close=new o.EventEmitter,this.open=new o.EventEmitter,this.positionChange=new o.EventEmitter,this.resolvedPromise=Promise.resolve(null),this._renderer.addClass(t.nativeElement,J),this.updateFixedClass()}return t.prototype.ngOnInit=function(){this.repositionCallback=this.reposition.bind(this),this._resizeService.subscribe(this.repositionCallback),this.animationSubscriptions=this.animationService.start.subscribe(this.onAnimationStart.bind(this)),this.animationSubscriptions.add(this.animationService.end.subscribe(this.onAnimationEnd.bind(this))),this._scrollableService.forElement(this.anchor||this.container).subscribe(this.onScroll.bind(this)),this.currentOffset=Q,this.setZIndex(),this.copyFontStyles(),this.updateFixedClass()},t.prototype.ngOnChanges=function(t){t.copyAnchorStyles&&this.copyFontStyles(),t.positionMode&&this.updateFixedClass()},t.prototype.ngAfterViewInit=function(){var t=this;this.reposition(),this.animate||this.resolvedPromise.then(function(){t.onAnimationEnd()})},t.prototype.ngAfterViewChecked=function(){var t=this;this._zone.runOutsideAngular(function(){t.repositionSubscription&&t.repositionSubscription.unsubscribe(),t.repositionSubscription=Object(i.from)(t.resolvedPromise).subscribe(t.repositionCallback)})},t.prototype.ngOnDestroy=function(){this.anchorViewportLeave.complete(),this.positionChange.complete(),this.close.emit(),this.close.complete(),this._resizeService.unsubscribe(),this._scrollableService.unsubscribe(),this.repositionSubscription&&this.repositionSubscription.unsubscribe(),this.animationSubscriptions.unsubscribe()},t.prototype.onAnimationStart=function(){this._renderer.removeClass(this.container.nativeElement,"k-animation-container-shown")},t.prototype.onAnimationEnd=function(){this._renderer.addClass(this.container.nativeElement,"k-animation-container-shown"),this.open.emit(),this.open.complete()},Object.defineProperty(t.prototype,"currentOffset",{get:function(){return this._currentOffset},set:function(t){this.setContainerStyle("left",t.left+"px"),this.setContainerStyle("top",t.top+"px"),this._currentOffset=t},enumerable:!0,configurable:!0}),t.prototype.setZIndex=function(){this.anchor&&this.setContainerStyle("z-index",String(this.domService.zIndex(this.anchor,this.container)))},t.prototype.reposition=function(){var t=this;if(Object(A.isDocumentAvailable)()){var e,n,o,i,r,s,a=this.position(),p=a.flip,u=a.offset;(!this.currentOffset||(e=this.currentOffset,n=u,o=e.left,i=e.top,r=n.left,s=n.top,Math.abs(o-r)>=1||Math.abs(i-s)>=1))&&(this.currentOffset=u,Object(A.hasObservers)(this.positionChange)&&this._zone.run(function(){return t.positionChange.emit({offset:u,flip:p})})),this.animate&&this.animationService.play(this.contentContainer.nativeElement,this.animate,p)}},t.prototype.position=function(){var t=this._alignService.alignElement({anchor:this.anchor,anchorAlign:this.anchorAlign,element:this.container,elementAlign:this.popupAlign,margin:this.margin,offset:this.offset,positionMode:this.positionMode});return this._positionService.positionElement({anchor:this.anchor,anchorAlign:this.anchorAlign,collisions:this.collision,currentLocation:t,element:this.container,elementAlign:this.popupAlign,margin:this.margin})},t.prototype.onScroll=function(t){var e=this,n=Object(A.hasObservers)(this.anchorViewportLeave);t||!n?this.reposition():n&&this._zone.run(function(){e.anchorViewportLeave.emit()})},t.prototype.copyFontStyles=function(){var t=this;this.anchor&&this.copyAnchorStyles&&this.domService.getFontStyles(this.anchor).forEach(function(e){return t.setContainerStyle(e.key,e.value)})},t.prototype.updateFixedClass=function(){var t="fixed"===this.positionMode?"addClass":"removeClass";this._renderer[t](this.container.nativeElement,"k-animation-container-fixed")},t.prototype.setContainerStyle=function(t,e){this._renderer.setStyle(this.container.nativeElement,t,e)},t.decorators=[{type:o.Component,args:[{exportAs:"kendo-popup",providers:[$,G,W,Z,X,q],selector:"kendo-popup",template:'\n <div class="k-popup" [ngClass]="popupClass" #container>\n <ng-content></ng-content>\n <ng-template [ngTemplateOutlet]="content" [ngIf]="content"></ng-template>\n </div>\n '}]}],t.ctorParameters=function(){return[{type:o.ElementRef},{type:$},{type:W},{type:Z},{type:X},{type:q},{type:G},{type:o.Renderer2},{type:o.NgZone}]},t.propDecorators={animate:[{type:o.Input}],anchor:[{type:o.Input}],anchorAlign:[{type:o.Input}],collision:[{type:o.Input}],popupAlign:[{type:o.Input}],copyAnchorStyles:[{type:o.Input}],popupClass:[{type:o.Input}],positionMode:[{type:o.Input}],offset:[{type:o.Input}],margin:[{type:o.Input}],anchorViewportLeave:[{type:o.Output}],close:[{type:o.Output}],open:[{type:o.Output}],positionChange:[{type:o.Output}],contentContainer:[{type:o.ViewChild,args:["container"]}]},t}(),et=new o.InjectionToken("Popup Container"),nt=function(){function t(t,e,n,o){this.applicationRef=t,this.componentFactoryResolver=e,this.injector=n,this.container=o}return Object.defineProperty(t.prototype,"rootViewContainer",{get:function(){var t=this.applicationRef.components||[];if(t[0])return t[0];throw new Error("\n View Container not found! Inject the POPUP_CONTAINER or define a specific ViewContainerRef via the appendTo option.\n See http://www.telerik.com/kendo-angular-ui/components/popup/api/POPUP_CONTAINER/ for more details.\n ")},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"rootViewContainerNode",{get:function(){return this.container?this.container.nativeElement:this.getComponentRootNode(this.rootViewContainer)},enumerable:!0,configurable:!0}),t.prototype.open=function(t){void 0===t&&(t={});var e=this.contentFrom(t.content),n=e.component,o=e.nodes,i=this.appendPopup(o,t.appendTo),r=i.instance;this.projectComponentInputs(i,t),i.changeDetectorRef.detectChanges(),n&&n.changeDetectorRef.detectChanges();var s=this.getComponentRootNode(i);return{close:function(){var t;n?n.destroy():i.hostView.destroyed||(i.instance.content=null,i.changeDetectorRef.detectChanges()),i.destroy(),(t=s)&&t.parentNode&&t.parentNode.removeChild(t)},content:n,popup:i,popupAnchorViewportLeave:r.anchorViewportLeave,popupClose:r.close,popupElement:s,popupOpen:r.open,popupPositionChange:r.positionChange}},t.prototype.appendPopup=function(t,e){var n=this.createComponent(tt,t,e);return e||this.rootViewContainerNode.appendChild(this.getComponentRootNode(n)),n},t.prototype.getComponentRootNode=function(t){return t.hostView.rootNodes[0]},t.prototype.getComponentFactory=function(t){return this.componentFactoryResolver.resolveComponentFactory(t)},t.prototype.createComponent=function(t,e,n){var o=this.getComponentFactory(t);if(n)return n.createComponent(o,void 0,this.injector,e);var i=o.create(this.injector,e);return this.applicationRef.attachView(i.hostView),i},t.prototype.projectComponentInputs=function(t,e){return Object.getOwnPropertyNames(e).filter(function(t){return"content"!==t||e.content instanceof o.TemplateRef}).map(function(n){t.instance[n]=e[n]}),t},t.prototype.contentFrom=function(t){if(!t||t instanceof o.TemplateRef)return{component:null,nodes:[[]]};var e=this.createComponent(t);return{component:e,nodes:[e?[e.location.nativeElement]:[]]}},t.decorators=[{type:o.Injectable}],t.ctorParameters=function(){return[{type:o.ApplicationRef},{type:o.ComponentFactoryResolver},{type:o.Injector},{type:o.ElementRef,decorators:[{type:o.Inject,args:[et]},{type:o.Optional}]}]},t}(),ot=[tt],it=function(){function t(){}return t.decorators=[{type:o.NgModule,args:[{declarations:[ot],entryComponents:[ot],exports:[ot],imports:[R.CommonModule],providers:[nt]}]}],t}()},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=/^(?:a|input|select|textarea|button|object)$/i;e.outerWidth=function(t){var e=t.offsetWidth,n=getComputedStyle(t);return e+=parseFloat(n.marginLeft)||0+parseFloat(n.marginRight)||0},e.innerWidth=function(t){var e=t.offsetWidth,n=getComputedStyle(t);return e-=parseFloat(n.paddingLeft)||0+parseFloat(n.borderLeftWidth)||0,e-=parseFloat(n.paddingRight)||0+parseFloat(n.borderRightWidth)||0},e.outerHeight=function(t){var e=t.offsetHeight,n=getComputedStyle(t);return e+=parseFloat(n.marginTop)||0+parseFloat(n.marginBottom)||0},e.closest=function(t,e){for(;t&&!e(t);)t=t.parentNode;return t},e.isVisible=function(t){var e=t.getBoundingClientRect(),n=e.width>0&&e.height>0,o=0!==e.x&&0!==e.y;return(n||o)&&"hidden"!==window.getComputedStyle(t).visibility},e.findElement=function(t,n,o){if(void 0===o&&(o=!0),t){if(o&&n(t))return t;for(t=t.firstChild;t;){if(1===t.nodeType){var i=e.findElement(t,n);if(i)return i}t=t.nextSibling}}},e.isFocusable=function(t,n){if(void 0===n&&(n=!0),t.tagName){var i=t.tagName.toLowerCase(),r=null!==t.getAttribute("tabIndex");return o.test(i)&&(r=!t.disabled),r&&(!n||e.isVisible(t))}return!1},e.findFocusable=function(t,n){return void 0===n&&(n=!0),e.findElement(t,function(t){return e.isFocusable(t,n)})},e.findFocusableChild=function(t,n){return void 0===n&&(n=!0),e.findElement(t,function(t){return e.isFocusable(t,n)},!1)},e.findFocusableSibling=function(t,n,o){void 0===n&&(n=!0);for(var i=o?t.prevSibling:t.nextSibling;i;){if(1===i.nodeType){var r=e.findElement(i,function(t){return e.isFocusable(t,n)});if(r)return r}i=o?i.prevSibling:i.nextSibling}},e.isPresent=function(t){return null!=t}},function(t,e){t.exports=s},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(6),s=n(17),a=function(t){function e(){var e=t.call(this)||this;return e.navigationService=new s.ToolNavigationService,e}var n;return o.__extends(e,t),n=e,e.prototype.ngAfterViewInit=function(){this.popupTemplate||(this.popupTemplate=this.toolbarTemplate)},o.__decorate([i.ViewChild("toolbarTemplate"),o.__metadata("design:type",i.TemplateRef)],e.prototype,"toolbarTemplate",void 0),o.__decorate([i.ViewChild("popupTemplate"),o.__metadata("design:type",i.TemplateRef)],e.prototype,"popupTemplate",void 0),o.__decorate([i.ViewChild("separator"),o.__metadata("design:type",i.ElementRef)],e.prototype,"separator",void 0),e=n=o.__decorate([i.Component({exportAs:"kendoToolBarSeparator",providers:[{provide:r.ToolBarToolComponent,useExisting:i.forwardRef(function(){return n})}],selector:"kendo-toolbar-separator",template:"\n <ng-template #toolbarTemplate>\n </ng-template>\n "}),o.__metadata("design:paramtypes",[])],e)}(r.ToolBarToolComponent);e.ToolBarSeparatorComponent=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(6),s=n(1),a=n(19),p=n(13);e.display=function(t,e,n){switch(e){case"toolbar":return n?void 0:t;case"overflow":return n?t:void 0;default:return t}};var u=function(t){function n(){var e=t.call(this)||this;return e.showText="both",e.toggleable=!1,e.primary=!1,e.look="default",e.selected=!1,e.showIcon="both",e.click=new i.EventEmitter,e.selectedChange=new i.EventEmitter,e.internalState={selected:void 0},e.navigationService=new p.SingleFocusableNavigationService,e}var u;return o.__extends(n,t),u=n,Object.defineProperty(n.prototype,"text",{get:function(){return e.display(this._text,this.showText,this.overflows)},set:function(t){this._text=t},enumerable:!0,configurable:!0}),Object.defineProperty(n.prototype,"togglable",{get:function(){return this.toggleable},set:function(t){this.toggleable=t},enumerable:!0,configurable:!0}),Object.defineProperty(n.prototype,"icon",{get:function(){return e.display(this._icon,this.showIcon,this.overflows)},set:function(t){this._icon=t},enumerable:!0,configurable:!0}),Object.defineProperty(n.prototype,"iconClass",{get:function(){return e.display(this._iconClass,this.showIcon,this.overflows)},set:function(t){this._iconClass=t},enumerable:!0,configurable:!0}),Object.defineProperty(n.prototype,"imageUrl",{get:function(){return e.display(this._imageUrl,this.showIcon,this.overflows)},set:function(t){this._imageUrl=t},enumerable:!0,configurable:!0}),n.prototype.ngOnChanges=function(t){s.isChanged("selected",t)&&(this.internalState.selected=this.selected)},n.prototype.selectedChangeHandler=function(t){this.internalState.selected=t,this.selectedChange.emit(t)},o.__decorate([i.Input(),o.__metadata("design:type",String),o.__metadata("design:paramtypes",[String])],n.prototype,"text",null),o.__decorate([i.Input(),o.__metadata("design:type",String)],n.prototype,"showText",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Object)],n.prototype,"style",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Object)],n.prototype,"className",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],n.prototype,"title",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Boolean)],n.prototype,"disabled",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Boolean)],n.prototype,"toggleable",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Boolean),o.__metadata("design:paramtypes",[Boolean])],n.prototype,"togglable",null),o.__decorate([i.Input(),o.__metadata("design:type",Boolean)],n.prototype,"primary",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],n.prototype,"look",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Boolean)],n.prototype,"selected",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String),o.__metadata("design:paramtypes",[String])],n.prototype,"icon",null),o.__decorate([i.Input(),o.__metadata("design:type",String),o.__metadata("design:paramtypes",[String])],n.prototype,"iconClass",null),o.__decorate([i.Input(),o.__metadata("design:type",String),o.__metadata("design:paramtypes",[String])],n.prototype,"imageUrl",null),o.__decorate([i.Input(),o.__metadata("design:type",String)],n.prototype,"showIcon",void 0),o.__decorate([i.Output(),o.__metadata("design:type",i.EventEmitter)],n.prototype,"click",void 0),o.__decorate([i.Output(),o.__metadata("design:type",i.EventEmitter)],n.prototype,"selectedChange",void 0),o.__decorate([i.ViewChild("toolbarTemplate"),o.__metadata("design:type",i.TemplateRef)],n.prototype,"toolbarTemplate",void 0),o.__decorate([i.ViewChild("popupTemplate"),o.__metadata("design:type",i.TemplateRef)],n.prototype,"popupTemplate",void 0),o.__decorate([i.ViewChild("toolbarButton",{read:a.Button}),o.__metadata("design:type",a.Button)],n.prototype,"button",void 0),n=u=o.__decorate([i.Component({exportAs:"kendoToolBarButton",providers:[{provide:r.ToolBarToolComponent,useExisting:i.forwardRef(function(){return u})}],selector:"kendo-toolbar-button",template:'\n <ng-template #toolbarTemplate>\n <button #toolbarButton tabindex="-1" type="button" kendoButton\n [ngStyle]="style"\n [ngClass]="className"\n [attr.title]="title"\n [disabled]="disabled"\n [toggleable]="toggleable"\n [primary]="primary"\n [selected]="internalState.selected"\n [icon]="icon"\n [iconClass]="iconClass"\n [imageUrl]="imageUrl"\n [look]="look"\n (click)="click.emit($event)"\n (selectedChange)="selectedChangeHandler($event)">\n {{ text }}\n </button>\n </ng-template>\n <ng-template #popupTemplate>\n <button #overflowButton tabindex="-1" type="button" kendoButton\n class="k-overflow-button"\n [ngStyle]="style"\n [ngClass]="className"\n [attr.title]="title"\n [disabled]="disabled"\n [toggleable]="toggleable"\n [primary]="primary"\n [selected]="internalState.selected"\n [icon]="icon"\n [iconClass]="iconClass"\n [imageUrl]="imageUrl"\n [look]="look"\n (click)="click.emit($event)"\n (selectedChange)="selectedChangeHandler($event)">\n {{ text }}\n </button>\n </ng-template>\n '}),o.__metadata("design:paramtypes",[])],n)}(r.ToolBarToolComponent);e.ToolBarButtonComponent=u},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(9),s=function(){function t(){}return t.prototype.register=function(t,e){"toolbar"===e?this.toolbarRenderer=t:this.overflowRenderer=t},t.prototype.canFocus=function(){var t=this.findFocusable();return t&&t.offsetParent&&!this.hasFocus(t)&&!t.disabled},t.prototype.focus=function(){if(this.canFocus()){var t=this.findFocusable();this.setAttribute(t,"tabindex","0"),t.focus()}},t.prototype.defocus=function(){var t=this.findFocusable();t&&(this.setAttribute(t,"tabindex","-1"),this.hasFocus(t)&&t.blur())},t.prototype.hasFocus=function(t){return document.activeElement!==t&&r.closest(document.activeElement,function(e){return e===t})},t.prototype.findFocusable=function(){return this.toolbarNavigation.isPopupFocused?this.overflowRenderer.findFocusable():this.toolbarRenderer.findFocusable()},t.prototype.setAttribute=function(t,e,n){this.toolbarNavigation.isPopupFocused?this.overflowRenderer.setAttribute(t,e,n):this.toolbarRenderer.setAttribute(t,e,n)},t=o.__decorate([i.Injectable()],t)}();e.SingleFocusableNavigationService=s},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(8),s=n(1),a=n(4),p=n(15),u=n(16),c=n(6),l=n(9),d=n(1),h=n(24),f=n(18),b=n(5),g=function(){function t(t,e,n,o,r,s){this.localization=t,this.popupService=e,this.refreshService=n,this.navigationService=o,this.element=r,this.cdr=s,this.resizable=!1,this.tabindex=0,this.open=new i.EventEmitter,this.close=new i.EventEmitter,this.hostClasses=!0,this._popupSettings={animate:!0},this.direction=t.rtl?"rtl":"ltr"}return Object.defineProperty(t.prototype,"popupSettings",{get:function(){return this._popupSettings},set:function(t){this._popupSettings=Object.assign({},{animate:!0},t)},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"tabIndex",{get:function(){return this.tabindex},set:function(t){this.tabindex=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"appendTo",{get:function(){var t=this.popupSettings.appendTo;if(t&&"root"!==t)return"component"===t?this.container:t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"popupOpen",{get:function(){return this._open},set:function(t){if(this.popupOpen!==t){var e=new h.PreventableEvent;t?this.open.emit(e):this.close.emit(e),e.isDefaultPrevented()||this.toggle(t)}},enumerable:!0,configurable:!0}),t.prototype.onFocus=function(){var t=this.navigationService.focused;t?this.navigationService.focus(t):this.navigationService.focusFirst()},t.prototype.onKeyDown=function(t){var e="ltr"===this.direction?t.keyCode===d.Keys.ArrowLeft:t.keyCode===d.Keys.ArrowRight,n="ltr"===this.direction?t.keyCode===d.Keys.ArrowRight:t.keyCode===d.Keys.ArrowLeft;e&&(t.preventDefault(),this.navigationService.focusPrev()),n&&(t.preventDefault(),this.navigationService.focusNext()),t.keyCode===d.Keys.Tab&&this.element.nativeElement.blur(),this.navigationService.keydown.emit(t)},Object.defineProperty(t.prototype,"getTabIndex",{get:function(){return this.tabindex},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"getRole",{get:function(){return"toolbar"},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"getDir",{get:function(){return this.direction},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"resizableClass",{get:function(){return this.resizable},enumerable:!0,configurable:!0}),t.prototype.ngAfterViewInit=function(){var t=this;this.resizable&&(this.resizeSubscription=this.resizeSensor.resize.pipe(b.filter(function(){return t.resizable})).subscribe(this.onResize.bind(this)),this.resizeSensor.resize.emit(),this.navigationService.overflowButton=this.overflowButton)},t.prototype.ngOnInit=function(){var t=this;this.localizationChangesSubscription=this.localization.changes.subscribe(function(e){var n=e.rtl;return t.direction=n?"rtl":"ltr"})},t.prototype.ngOnDestroy=function(){this.resizeSubscription&&this.resizeSubscription.unsubscribe(),this.localizationChangesSubscription&&this.localizationChangesSubscription.unsubscribe()},t.prototype.showPopup=function(){this.popupOpen=!this.popupOpen},t.prototype.toggle=function(t){var e=this;this._open=void 0!==t?t:!this.popupOpen,this.popupRef&&(this.popupRef.close(),this.popupRef=null),this.popupOpen&&(this.popupRef=this.popupService.open({anchor:this.overflowButton,content:this.popupTemplate,appendTo:this.appendTo,animate:this.popupSettings.animate,popupClass:this.popupSettings.popupClass,positionMode:"absolute"}),this.popupRef.popupOpen.subscribe(this.onPopupOpen.bind(this)),this.popupRef.popupClose.subscribe(this.onPopupClose.bind(this)),this.popupRef.popupAnchorViewportLeave.subscribe(function(){return e.popupOpen=!1}))},t.prototype.onResize=function(){this.toggle(!1);var t=l.innerWidth(this.element.nativeElement)-this.overflowAnchorWidth;this.shrink(t,this.childrenWidth),this.stretch(t,this.childrenWidth),this.cdr.detectChanges(),this.resizeSensor.acceptSize()},t.prototype.onPopupOpen=function(){this.navigationService.moveFocusToPopup()},t.prototype.onPopupClose=function(){this.navigationService.moveFocusToToolBar()},Object.defineProperty(t.prototype,"displayAnchor",{get:function(){return this.overflowTools.length?"visible":"hidden"},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"overflowAnchorWidth",{get:function(){return this.resizable?(this.cachedOverflowAnchorWidth||(this.cachedOverflowAnchorWidth=l.outerWidth(this.overflowButton.nativeElement)),this.cachedOverflowAnchorWidth):0},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"childrenWidth",{get:function(){var t=0;return this.renderedTools.forEach(function(e){t+=e.width}),t+=this.overflowAnchorWidth,Math.ceil(t)},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"visibleTools",{get:function(){return this.allTools.filter(function(t){return!1===t.overflows})},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"overflowTools",{get:function(){return this.allTools.filter(function(t){return!0===t.overflows})},enumerable:!0,configurable:!0}),t.prototype.shrink=function(t,e){if(t<e)for(var n=this.visibleTools.length-1;n>=0&&!(t>e);n--)e-=this.hideLastVisibleTool()},t.prototype.stretch=function(t,e){var n;if(t>e)for(var o=this.overflowTools.length-1;o>=0&&(n=this.showFirstHiddenTool(t,e));o--)e+=n},t.prototype.hideLastVisibleTool=function(){var t=this.visibleTools[this.visibleTools.length-1],e=this.renderedTools.find(function(e){return e.tool===t}).width;return t.overflows=!0,this.refreshService.refresh(t),e},t.prototype.showFirstHiddenTool=function(t,e){var n=this.overflowTools[0],o=this.renderedTools.find(function(t){return t.tool===n});return n.overflows=!1,n.visibility="hidden",this.refreshService.refresh(n),t>e+o.width?(n.visibility="visible",this.refreshService.refresh(n)):(n.overflows=!0,this.refreshService.refresh(n)),o.width},o.__decorate([i.Input(),o.__metadata("design:type",Boolean)],t.prototype,"resizable",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Object),o.__metadata("design:paramtypes",[Object])],t.prototype,"popupSettings",null),o.__decorate([i.Input(),o.__metadata("design:type",Number)],t.prototype,"tabindex",void 0),o.__decorate([i.Input("tabIndex"),o.__metadata("design:type",Number),o.__metadata("design:paramtypes",[Number])],t.prototype,"tabIndex",null),o.__decorate([i.Output(),o.__metadata("design:type",i.EventEmitter)],t.prototype,"open",void 0),o.__decorate([i.Output(),o.__metadata("design:type",i.EventEmitter)],t.prototype,"close",void 0),o.__decorate([i.ContentChildren(c.ToolBarToolComponent),o.__metadata("design:type",i.QueryList)],t.prototype,"allTools",void 0),o.__decorate([i.ViewChild("overflowButton"),o.__metadata("design:type",i.ElementRef)],t.prototype,"overflowButton",void 0),o.__decorate([i.ViewChild("popupTemplate"),o.__metadata("design:type",i.TemplateRef)],t.prototype,"popupTemplate",void 0),o.__decorate([i.ViewChild("resizeSensor"),o.__metadata("design:type",s.ResizeSensorComponent)],t.prototype,"resizeSensor",void 0),o.__decorate([i.ViewChild("container",{read:i.ViewContainerRef}),o.__metadata("design:type",i.ViewContainerRef)],t.prototype,"container",void 0),o.__decorate([i.ViewChildren(f.ToolBarRendererComponent),o.__metadata("design:type",i.QueryList)],t.prototype,"renderedTools",void 0),o.__decorate([i.HostBinding("class.k-widget"),i.HostBinding("class.k-toolbar"),o.__metadata("design:type",Boolean)],t.prototype,"hostClasses",void 0),o.__decorate([i.HostListener("focus"),o.__metadata("design:type",Function),o.__metadata("design:paramtypes",[]),o.__metadata("design:returntype",void 0)],t.prototype,"onFocus",null),o.__decorate([i.HostListener("keydown",["$event"]),o.__metadata("design:type",Function),o.__metadata("design:paramtypes",[Object]),o.__metadata("design:returntype",void 0)],t.prototype,"onKeyDown",null),o.__decorate([i.HostBinding("attr.tabindex"),o.__metadata("design:type",Number),o.__metadata("design:paramtypes",[])],t.prototype,"getTabIndex",null),o.__decorate([i.HostBinding("attr.role"),o.__metadata("design:type",String),o.__metadata("design:paramtypes",[])],t.prototype,"getRole",null),o.__decorate([i.HostBinding("attr.dir"),o.__metadata("design:type",String),o.__metadata("design:paramtypes",[])],t.prototype,"getDir",null),o.__decorate([i.HostBinding("class.k-toolbar-resizable"),o.__metadata("design:type",Boolean),o.__metadata("design:paramtypes",[])],t.prototype,"resizableClass",null),t=o.__decorate([i.Component({exportAs:"kendoToolBar",providers:[p.RefreshService,u.NavigationService,a.LocalizationService,{provide:a.L10N_PREFIX,useValue:"kendo.toolbar"}],selector:"kendo-toolbar",template:'\n <ng-container *ngFor="let tool of allTools; let index = index;">\n <kendo-toolbar-renderer [location]="\'toolbar\'" [resizable]="resizable" [tool]="tool"></kendo-toolbar-renderer>\n </ng-container>\n <button #overflowButton\n tabindex="-1" *ngIf="resizable" [style.visibility]="displayAnchor" class="k-overflow-anchor k-button" (click)="showPopup()">\n <span class="k-icon k-i-more-vertical"></span>\n </button>\n <ng-template #popupTemplate>\n <ul class="k-overflow-container k-list-container k-reset">\n <ng-container *ngFor="let tool of allTools; let index = index;">\n <kendo-toolbar-renderer [location]="\'overflow\'" [resizable]="resizable" [tool]="tool"></kendo-toolbar-renderer>\n </ng-container>\n </ul>\n </ng-template>\n <ng-container #container></ng-container>\n <kendo-resize-sensor *ngIf="resizable" [rateLimit]="1000" #resizeSensor></kendo-resize-sensor>\n '}),o.__metadata("design:paramtypes",[a.LocalizationService,r.PopupService,p.RefreshService,u.NavigationService,i.ElementRef,i.ChangeDetectorRef])],t)}();e.ToolBarComponent=g},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=function(){function t(){this.onRefresh=new i.EventEmitter}return t.prototype.refresh=function(t){this.onRefresh.emit(t)},t=o.__decorate([i.Injectable()],t)}();e.RefreshService=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(9),s=function(){function t(){this.keydown=new i.EventEmitter,this.isPopupFocused=!1,this.tools=[],this.isFocusLocked=!1,this.isOverflowButtonFocused=!1}return t.prototype.register=function(t){this.tools.push(t)},t.prototype.unregister=function(t){this.tools.splice(this.tools.indexOf(t),1)},t.prototype.moveFocusToToolBar=function(){this.isPopupFocused=!1,this.focusOverflowButton()},t.prototype.moveFocusToPopup=function(){this.isPopupFocused=!0},t.prototype.focus=function(t,e){var n=this;this.focused=t,this.tools.filter(function(t){return t!==n.focused}).forEach(function(t){return t.navigationService.defocus()}),this.isOverflowButtonFocused=!1,t.navigationService.focus(e)},t.prototype.focusOverflowButton=function(){this.isOverflowButtonFocused=!0,this.overflowButton.nativeElement.focus()},t.prototype.focusFirst=function(){if(!this.isFocusLocked){var t=this.tools.find(function(t){return!!t.navigationService.canFocus()});t&&this.focus(t)}},t.prototype.focusPrev=function(t){if(r.isPresent(t)||(t=this.isOverflowButtonFocused?this.tools.length-1:this.tools.indexOf(this.focused)-1),!(this.isFocusLocked||!this.tools.length||t<0)){var e=this.tools[t];e.navigationService.canFocus()?this.focus(e,!0):this.focusPrev(t-1)}},t.prototype.focusNext=function(t){var e=this.overflowButton&&"visible"===this.overflowButton.nativeElement.style.visibility;if(r.isPresent(t)||(t=this.tools.indexOf(this.focused)+1),t>=this.tools.length&&e&&!this.isOverflowButtonFocused&&this.focusOverflowButton(),!(this.isFocusLocked||!this.tools.length||t>=this.tools.length)){var n=this.tools[t];n.navigationService.canFocus()?this.focus(n):this.focusNext(t+1)}},t.prototype.lock=function(){this.isFocusLocked=!0},t.prototype.unlock=function(){this.isFocusLocked=!1},t.prototype.focusEnter=function(){},t.prototype.focusLeave=function(){},t.prototype.defocus=function(t){t.navigationService.defocus()},t=o.__decorate([i.Injectable()],t)}();e.NavigationService=s},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=function(){function t(){}return t.prototype.register=function(t,e){"toolbar"===e?this.toolbarRenderer=t:this.overflowRenderer=t},t.prototype.canFocus=function(){return!1},t.prototype.focus=function(){},t.prototype.defocus=function(){},t.prototype.hasFocus=function(){return!1},t=o.__decorate([i.Injectable()],t)}();e.ToolNavigationService=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(9),s=n(6),a=n(11),p=n(15),u=n(16),c=n(25),l=function(){function t(t,e,n,o,i){var r=this;this.element=t,this.renderer=e,this.rendererService=n,this.refreshService=o,this.navigationService=i,this.rendererService.element=t,this.rendererService.renderer=this,this.refreshSubscription=this.refreshService.onRefresh.subscribe(function(t){r.tool===t&&r.refresh()})}return Object.defineProperty(t.prototype,"className",{get:function(){return this.tool instanceof a.ToolBarSeparatorComponent},enumerable:!0,configurable:!0}),t.prototype.onFocus=function(){this.navigationService.focused=this.tool},t.prototype.ngOnInit=function(){this.resizable?"toolbar"===this.location?(this.template=this.tool.toolbarTemplate,this.renderer.setStyle(this.element.nativeElement,"visibility","hidden"),this.renderer.setStyle(this.element.nativeElement,"display","none")):(this.template=this.tool.popupTemplate,this.renderer.setStyle(this.element.nativeElement,"display","none")):(this.tool.overflows=!1,this.template=this.tool.toolbarTemplate,this.renderer.setStyle(this.element.nativeElement,"visibility","display"),this.renderer.setStyle(this.element.nativeElement,"display","inline-block")),this.navigationService.register(this.tool),this.tool.navigationService.register(this.rendererService,this.location),this.tool.navigationService.toolbarNavigation=this.navigationService},t.prototype.ngOnDestroy=function(){this.navigationService.unregister(this.tool),this.refreshSubscription.unsubscribe()},t.prototype.ngAfterViewInit=function(){this.resizable&&this.refresh()},Object.defineProperty(t.prototype,"width",{get:function(){return this.tool.overflows?0:r.outerWidth(this.element.nativeElement)},enumerable:!0,configurable:!0}),t.prototype.isDisplayed=function(){return"none"!==this.element.nativeElement.style.display},t.prototype.refresh=function(){this.resizable&&("toolbar"===this.location?(this.renderer.setStyle(this.element.nativeElement,"visibility",this.tool.visibility),this.renderer.setStyle(this.element.nativeElement,"display",this.tool.toolbarDisplay)):this.renderer.setStyle(this.element.nativeElement,"display",this.tool.overflowDisplay))},t.prototype.setAttribute=function(t,e,n){this.renderer.setAttribute(t,e,n)},o.__decorate([i.Input(),o.__metadata("design:type",s.ToolBarToolComponent)],t.prototype,"tool",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],t.prototype,"location",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Boolean)],t.prototype,"resizable",void 0),o.__decorate([i.HostBinding("class.k-separator"),o.__metadata("design:type",Boolean),o.__metadata("design:paramtypes",[])],t.prototype,"className",null),o.__decorate([i.HostListener("focusin"),o.__metadata("design:type",Function),o.__metadata("design:paramtypes",[]),o.__metadata("design:returntype",void 0)],t.prototype,"onFocus",null),t=o.__decorate([i.Component({exportAs:"kendoToolBarRenderer",providers:[c.RendererService],selector:"kendo-toolbar-renderer",template:'\n <ng-container *ngIf="location === \'toolbar\'">\n <ng-template [ngTemplateOutlet]="template"></ng-template>\n </ng-container>\n <ng-container *ngIf="location === \'overflow\'">\n <li class="k-item">\n <ng-template [ngTemplateOutlet]="template"></ng-template>\n </li>\n </ng-container>\n '}),o.__metadata("design:paramtypes",[i.ElementRef,i.Renderer2,c.RendererService,p.RefreshService,u.NavigationService])],t)}();e.ToolBarRendererComponent=l},function(t,e,n){"use strict";n.r(e),n.d(e,"KendoButtonService",function(){return h}),n.d(e,"FocusService",function(){return S}),n.d(e,"FocusableDirective",function(){return k}),n.d(e,"ButtonItemTemplateDirective",function(){return C}),n.d(e,"ListButton",function(){return P}),n.d(e,"ListComponent",function(){return w}),n.d(e,"ListModule",function(){return x}),n.d(e,"TemplateContextDirective",function(){return O}),n.d(e,"NAVIGATION_CONFIG",function(){return T}),n.d(e,"NavigationService",function(){return I}),n.d(e,"ButtonDirective",function(){return f}),n.d(e,"Button",function(){return f}),n.d(e,"ButtonGroupComponent",function(){return y}),n.d(e,"ButtonGroup",function(){return y}),n.d(e,"ButtonGroupModule",function(){return _}),n.d(e,"ButtonModule",function(){return v}),n.d(e,"ButtonsModule",function(){return z}),n.d(e,"SplitButtonComponent",function(){return j}),n.d(e,"SplitButton",function(){return j}),n.d(e,"SplitButtonModule",function(){return F}),n.d(e,"DropDownButtonComponent",function(){return D}),n.d(e,"DropDownButton",function(){return D}),n.d(e,"DropDownButtonModule",function(){return N});var o,i,r=n(0),s=n(3),a=n(1),p=n(4),u=n(5),c=n(7),l=n(8),d=n(2),h=function(){function t(){this.buttonLookChange=new s.BehaviorSubject("default"),this.buttonClicked=new s.Subject,this.buttonClicked$=this.buttonClicked.asObservable()}return t.prototype.click=function(t){this.buttonClicked.next(t)},t.prototype.setButtonLook=function(t){this.buttonLookChange.next(t)},t.decorators=[{type:r.Injectable}],t}(),f=function(){function t(t,e,n,o,i){var s=this;this.service=n,this.ngZone=i,this.toggleable=!1,this.primary=!1,this.look="default",this.isDisabled=!1,this.isIcon=!1,this.isIconClass=!1,this.tabIndex=0,this.selectedChange=new r.EventEmitter,this.domEvents=[],this.direction=o.rtl?"rtl":"ltr",this.localizationChangeSubscription=o.changes.subscribe(function(t){var e=t.rtl;return s.direction=e?"rtl":"ltr"}),this.element=t.nativeElement,this.renderer=e}return Object.defineProperty(t.prototype,"togglable",{get:function(){return this.toggleable},set:function(t){this.toggleable=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"selected",{get:function(){return this._selected||!1},set:function(t){this._selected=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"icon",{set:function(t){var e=this;t?this.iconSetter(t,function(){e.isIcon=!0;var n="k-icon k-i-"+t;e.addIcon(n)}):(this.isIcon=!1,this.updateIconNode())},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"iconClass",{set:function(t){var e=this;t?this.iconSetter(t,function(){e.isIconClass=!0,e.addIcon(t)}):(this.isIconClass=!1,this.updateIconNode())},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"imageUrl",{set:function(t){t?this.iconSetter(t,this.addImgIcon.bind(this)):this.removeImageNode()},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"disabled",{set:function(t){this.isDisabled=t,this.renderer.setProperty(this.element,"disabled",t)},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"classButton",{get:function(){return!0},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"classDisabled",{get:function(){return this.isDisabled},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"classPrimary",{get:function(){return this.primary},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"isFlat",{get:function(){return"flat"===this.look},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"isBare",{get:function(){return"bare"===this.look},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"isOutline",{get:function(){return"outline"===this.look},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"classActive",{get:function(){return this.selected},enumerable:!0,configurable:!0}),t.prototype.ngOnInit=function(){var t=this;this.service&&(this.buttonLookChangeSubscription=this.service.buttonLookChange.pipe(Object(u.filter)(function(t){return"default"!==t})).subscribe(function(e){return t.look=e})),!this.element.hasAttribute("role")&&this.togglable&&this.toggleAriaCheckbox(this.toggleable),this.ngZone.runOutsideAngular(function(){t.domEvents.push(t.renderer.listen(t.element,"click",t._onButtonClick.bind(t)))})},t.prototype.ngOnChanges=function(t){(Object(a.isChanged)("togglable",t)||Object(a.isChanged)("toggleable",t))&&this.toggleAriaCheckbox(this.toggleable)},t.prototype.ngAfterViewChecked=function(){this.setIconTextClasses()},t.prototype.ngOnDestroy=function(){this.imageNode=null,this.iconNode=null,this.renderer=null,this.localizationChangeSubscription.unsubscribe(),this.service&&this.buttonLookChangeSubscription&&this.buttonLookChangeSubscription.unsubscribe(),clearTimeout(this.deferTimeout),this.domEvents.forEach(function(t){return t()})},t.prototype.focus=function(){Object(a.isDocumentAvailable)()&&this.element.focus()},t.prototype.blur=function(){Object(a.isDocumentAvailable)()&&this.element.blur()},t.prototype.setAttribute=function(t,e){this.renderer.setAttribute(this.element,t,e)},t.prototype.removeAttribute=function(t){this.renderer.removeAttribute(this.element,t)},t.prototype.setSelected=function(t){var e=this,n=this.selected!==t;this.selected=t,this.setAttribute("aria-checked",this.selected.toString()),this.toggleClass("k-state-active",this.selected),n&&Object(a.hasObservers)(this.selectedChange)&&this.ngZone.run(function(){e.selectedChange.emit(t)})},t.prototype.toggleAriaCheckbox=function(t){Object(a.isDocumentAvailable)()&&(t?(this.setAttribute("role","checkbox"),this.setAttribute("aria-checked",this.selected.toString())):(this.removeAttribute("role"),this.removeAttribute("aria-checked")))},t.prototype.hasText=function(){return!!Object(a.isDocumentAvailable)()&&String(this.element.textContent).trim().length>0},t.prototype.addImgIcon=function(t){var e=this.renderer;this.imageNode?e.setProperty(this.imageNode,"src",t):Object(a.isDocumentAvailable)()&&(this.imageNode=e.createElement("img"),e.setProperty(this.imageNode,"src",t),e.setProperty(this.imageNode,"className","k-image"),e.setAttribute(this.imageNode,"role","presentation"),this.prependChild(this.imageNode))},t.prototype.addIcon=function(t){var e=this.renderer;this.iconNode?e.setProperty(this.iconNode,"className",t):Object(a.isDocumentAvailable)()&&(this.iconNode=e.createElement("span"),e.setProperty(this.iconNode,"className",t),e.setAttribute(this.iconNode,"role","presentation"),this.prependChild(this.iconNode))},t.prototype.prependChild=function(t){var e=this;this.defer(function(){e.renderer&&t!==e.element.firstChild&&e.renderer.insertBefore(e.element,t,e.element.firstChild)})},t.prototype.defer=function(t){var e=this;this.ngZone.runOutsideAngular(function(){e.deferTimeout=setTimeout(t,0)})},t.prototype.iconSetter=function(t,e){t&&e(t),this.setIconTextClasses()},t.prototype.removeImageNode=function(){this.imageNode&&this.renderer.parentNode(this.imageNode)&&(this.renderer.removeChild(this.element,this.imageNode),this.imageNode=null)},t.prototype.removeIconNode=function(){this.iconNode&&this.renderer.parentNode(this.iconNode)&&(this.renderer.removeChild(this.element,this.iconNode),this.iconNode=null)},t.prototype.updateIconNode=function(){this.isIcon||this.isIconClass||this.removeIconNode()},t.prototype.setIconTextClasses=function(){var t=this.isIcon||this.isIconClass||this.imageNode,e=this.hasText();this.toggleClass("k-button-icon",t&&!e),this.toggleClass("k-button-icontext",t&&e)},t.prototype.toggleClass=function(t,e){e?this.renderer.addClass(this.element,t):this.renderer.removeClass(this.element,t)},t.prototype._onButtonClick=function(){var t=this;this.toggleable&&(!this.disabled&&this.service&&this.ngZone.run(function(){t.service.click(t)}),this.service||this.setSelected(!this.selected))},t.decorators=[{type:r.Directive,args:[{exportAs:"kendoButton",providers:[p.LocalizationService,{provide:p.L10N_PREFIX,useValue:"kendo.button"}],selector:"button[kendoButton]"}]}],t.ctorParameters=function(){return[{type:r.ElementRef},{type:r.Renderer2},{type:h,decorators:[{type:r.Optional}]},{type:p.LocalizationService},{type:r.NgZone}]},t.propDecorators={toggleable:[{type:r.Input}],togglable:[{type:r.Input}],primary:[{type:r.Input}],look:[{type:r.Input}],selected:[{type:r.Input}],tabIndex:[{type:r.Input}],icon:[{type:r.Input}],iconClass:[{type:r.Input}],imageUrl:[{type:r.Input}],disabled:[{type:r.Input}],selectedChange:[{type:r.Output}],classButton:[{type:r.HostBinding,args:["class.k-button"]}],classDisabled:[{type:r.HostBinding,args:["class.k-state-disabled"]}],classPrimary:[{type:r.HostBinding,args:["class.k-primary"]}],isFlat:[{type:r.HostBinding,args:["class.k-flat"]}],isBare:[{type:r.HostBinding,args:["class.k-bare"]}],isOutline:[{type:r.HostBinding,args:["class.k-outline"]}],classActive:[{type:r.HostBinding,args:["class.k-state-active"]}],direction:[{type:r.HostBinding,args:["attr.dir"]}]},t}(),b=Promise.resolve(null),g=function(t){return null!=t},m=function(t){return b.then(t)},y=function(){function t(t,e,n){var o=this;this.service=t,this.element=n,this.selection="multiple",this.look="default",this.tabIndex=0,this.localizationChangeSubscription=e.changes.subscribe(function(t){var e=t.rtl;return o.direction=e?"rtl":"ltr"})}return Object.defineProperty(t.prototype,"wrapperClass",{get:function(){return!0},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"disabledClass",{get:function(){return this.disabled},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"stretchedClass",{get:function(){return!!this.width},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"isFlat",{get:function(){return"flat"===this.look},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"isBare",{get:function(){return"bare"===this.look},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"isOutline",{get:function(){return"outline"===this.look},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"getRole",{get:function(){return this.isSelectionSingle()?"radiogroup":"group"},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"dir",{get:function(){return this.direction},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"ariaDisabled",{get:function(){return this.disabled},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"wrapperWidth",{get:function(){return this.width},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"wrapperTabIndex",{get:function(){return this.tabIndex},enumerable:!0,configurable:!0}),t.prototype.keydown=function(t){this.isSelectionSingle()?this.navigateSelection(t):this.navigateFocus(t)},t.prototype.focusout=function(t){t.relatedTarget&&t.relatedTarget.parentNode!==this.element.nativeElement&&this.defocus(this.buttons.toArray())},t.prototype.ngOnInit=function(){var t=this;this.service.setButtonLook(this.look),this.subscription=this.service.buttonClicked$.subscribe(function(e){var n;t.isSelectionSingle()?(n=!0,t.deactivate(t.buttons.filter(function(t){return t!==e}))):(t.defocus(t.buttons.toArray()),n=!e.selected),e.togglable&&(e.setSelected(n),e.setAttribute("aria-checked",n.toString())),e.setAttribute("tabindex","0")})},t.prototype.ngOnChanges=function(t){var e=this;Object(a.isChanged)("disabled",t)&&this.buttons.forEach(function(t){g(e.disabled)&&(t.disabled=e.disabled)})},t.prototype.ngAfterContentInit=function(){var t=this.isSelectionSingle(),e=t?"radio":"checkbox",n=!1;this.buttons.forEach(function(t){t.togglable&&(t.setAttribute("aria-checked",t.selected.toString()),t.setAttribute("role",e)),t.selected?t.setAttribute("tabindex","0"):t.setAttribute("tabindex","-1"),n=n||t.selected}),t&&!n&&this.buttons.length&&(this.buttons.first.setAttribute("tabindex","0"),this.buttons.last.setAttribute("tabindex","0"))},t.prototype.ngAfterViewChecked=function(){this.buttons.length&&(this.buttons.first.renderer.addClass(this.buttons.first.element,"k-group-start"),this.buttons.last.renderer.addClass(this.buttons.last.element,"k-group-end"))},t.prototype.ngOnDestroy=function(){this.subscription.unsubscribe(),this.localizationChangeSubscription.unsubscribe()},t.prototype.ngAfterContentChecked=function(){this.verifySettings()},t.prototype.navigateSelection=function(t){var e=this.buttons.toArray().findIndex(function(t){return t.selected}),n=this.buttons.length-1;void 0!==e&&(t.keyCode===a.Keys.ArrowRight&&e<n&&(this.deactivate(this.buttons.filter(function(t){return t.selected})),this.activate(this.buttons.filter(function(t,n){return n===e+1}))),t.keyCode===a.Keys.ArrowLeft&&e>0&&(this.deactivate(this.buttons.filter(function(t){return t.selected})),this.activate(this.buttons.filter(function(t,n){return n===e-1}))))},t.prototype.navigateFocus=function(t){var e=this.buttons.toArray().findIndex(function(t){return-1!==t.element.tabIndex}),n=this.buttons.length-1;t.keyCode===a.Keys.ArrowRight&&e<n&&(this.defocus(this.buttons.toArray()),this.focus(this.buttons.filter(function(t,n){return n===e+1}))),t.keyCode===a.Keys.ArrowLeft&&e>0&&(this.defocus(this.buttons.toArray()),this.focus(this.buttons.filter(function(t,n){return n===e-1})))},t.prototype.deactivate=function(t){t.forEach(function(t){t.setSelected(!1),t.setAttribute("aria-checked",t.selected.toString()),t.setAttribute("tabindex","-1")})},t.prototype.activate=function(t){t.forEach(function(t){t.setSelected(!0),t.setAttribute("aria-checked",t.selected.toString()),t.setAttribute("tabindex","0"),t.focus()})},t.prototype.defocus=function(t){t.forEach(function(t){t.setAttribute("tabindex","-1")})},t.prototype.focus=function(t){t.forEach(function(t){t.setAttribute("tabindex","0"),t.focus()})},t.prototype.verifySettings=function(){if(Object(r.isDevMode)()&&this.isSelectionSingle()&&this.buttons.filter(function(t){return t.selected}).length>1)throw new Error("Having multiple selected buttons with single selection mode is not supported")},t.prototype.isSelectionSingle=function(){return"single"===this.selection},t.decorators=[{type:r.Component,args:[{exportAs:"kendoButtonGroup",providers:[h,p.LocalizationService,{provide:p.L10N_PREFIX,useValue:"kendo.buttongroup"}],selector:"kendo-buttongroup",template:'\n <ng-content select="[kendoButton]"></ng-content>\n '}]}],t.ctorParameters=function(){return[{type:h},{type:p.LocalizationService},{type:r.ElementRef}]},t.propDecorators={disabled:[{type:r.Input,args:["disabled"]}],selection:[{type:r.Input,args:["selection"]}],width:[{type:r.Input,args:["width"]}],look:[{type:r.Input}],tabIndex:[{type:r.Input}],buttons:[{type:r.ContentChildren,args:[f]}],wrapperClass:[{type:r.HostBinding,args:["class.k-button-group"]}],disabledClass:[{type:r.HostBinding,args:["class.k-state-disabled"]}],stretchedClass:[{type:r.HostBinding,args:["class.k-button-group-stretched"]}],isFlat:[{type:r.HostBinding,args:["class.k-button-group-flat"]}],isBare:[{type:r.HostBinding,args:["class.k-button-group-bare"]}],isOutline:[{type:r.HostBinding,args:["class.k-button-group-outline"]}],getRole:[{type:r.HostBinding,args:["attr.role"]}],dir:[{type:r.HostBinding,args:["attr.dir"]}],ariaDisabled:[{type:r.HostBinding,args:["attr.aria-disalbed"]}],wrapperWidth:[{type:r.HostBinding,args:["style.width"]}],wrapperTabIndex:[{type:r.HostBinding,args:["attr.tabindex"]}],keydown:[{type:r.HostListener,args:["keydown",["$event"]]}],focusout:[{type:r.HostListener,args:["focusout",["$event"]]}]},t}(),v=function(){function t(){}return t.decorators=[{type:r.NgModule,args:[{declarations:[f],exports:[f]}]}],t}(),_=function(){function t(){}return t.decorators=[{type:r.NgModule,args:[{declarations:[y],exports:[y],imports:[c.CommonModule,v]}]}],t}(),C=function(){function t(t){this.templateRef=t}return t.decorators=[{type:r.Directive,args:[{selector:"[kendoDropDownButtonItemTemplate],[kendoSplitButtonItemTemplate]"}]}],t.ctorParameters=function(){return[{type:r.TemplateRef}]},t}(),w=function(){function t(){this.onItemClick=new r.EventEmitter,this.onItemBlur=new r.EventEmitter}return t.prototype.getText=function(t){if(t)return this.textField?t[this.textField]:t.text||t},t.prototype.getIconClasses=function(t){var e={};return e[(t.icon?"k-icon k-i-"+t.icon:void 0)||t.iconClass]=!0,e},t.prototype.onClick=function(t){this.onItemClick.emit(t)},t.prototype.onBlur=function(){this.onItemBlur.emit()},t.decorators=[{type:r.Component,args:[{selector:"kendo-button-list",template:'\n <ul class="k-list k-reset" unselectable="on">\n <li role="menuItem" unselectable="on" tabindex="-1"\n kendoButtonFocusable\n *ngFor="let dataItem of data; let index = index;"\n [index]="index"\n [ngClass]="{\'k-item\': true, \'k-state-disabled\': dataItem.disabled}"\n (click)="onClick(index)"\n (blur)="onBlur()"\n [attr.aria-disabled]="dataItem.disabled ? true : false">\n <ng-template *ngIf="itemTemplate?.templateRef"\n [templateContext]="{\n templateRef: itemTemplate?.templateRef,\n $implicit: dataItem\n }">\n </ng-template>\n <ng-template [ngIf]="!itemTemplate?.templateRef">\n <span\n *ngIf="dataItem.icon || dataItem.iconClass"\n [ngClass]="getIconClasses(dataItem)"\n ></span>\n <img\n *ngIf="dataItem.imageUrl"\n class="k-image"\n [src]="dataItem.imageUrl"\n alt=""\n >\n {{ getText(dataItem) }}\n </ng-template>\n </li>\n </ul>\n '}]}],t.propDecorators={data:[{type:r.Input}],textField:[{type:r.Input}],itemTemplate:[{type:r.Input}],onItemClick:[{type:r.Output}],onItemBlur:[{type:r.Output}]},t}(),S=function(){function t(){this.onFocus=new r.EventEmitter}return t.prototype.isFocused=function(t){return t===this.focused},t.prototype.focus=function(t){this.isFocused(t)||(this.focused=t,this.onFocus.emit(t))},t.prototype.resetFocus=function(){this.focused=-1},Object.defineProperty(t.prototype,"focused",{get:function(){return this.focusedIndex},set:function(t){this.focusedIndex=t,this.onFocus.emit(t)},enumerable:!0,configurable:!0}),t.decorators=[{type:r.Injectable}],t}(),k=function(){function t(t,e){this.focusService=t,this.element=e.nativeElement,this.subscribeEvents()}return Object.defineProperty(t.prototype,"focusedClassName",{get:function(){return this.focusService.isFocused(this.index)},enumerable:!0,configurable:!0}),t.prototype.ngOnDestroy=function(){this.unsubscribeEvents()},t.prototype.subscribeEvents=function(){var t=this;Object(a.isDocumentAvailable)()&&(this.focusSubscription=this.focusService.onFocus.subscribe(function(e){t.index===e&&t.element.focus()}))},t.prototype.unsubscribeEvents=function(){Object(a.isDocumentAvailable)()&&this.focusSubscription&&this.focusSubscription.unsubscribe()},t.decorators=[{type:r.Directive,args:[{selector:"[kendoButtonFocusable]"}]}],t.ctorParameters=function(){return[{type:S},{type:r.ElementRef}]},t.propDecorators={index:[{type:r.Input}],focusedClassName:[{type:r.HostBinding,args:["class.k-state-focused"]}]},t}(),O=function(){function t(t){this.viewContainerRef=t}return Object.defineProperty(t.prototype,"templateContext",{set:function(t){this.insertedViewRef&&(this.viewContainerRef.remove(this.viewContainerRef.indexOf(this.insertedViewRef)),this.insertedViewRef=void 0),t.templateRef&&(this.insertedViewRef=this.viewContainerRef.createEmbeddedView(t.templateRef,t))},enumerable:!0,configurable:!0}),t.decorators=[{type:r.Directive,args:[{selector:"[templateContext]"}]}],t.ctorParameters=function(){return[{type:r.ViewContainerRef}]},t.propDecorators={templateContext:[{type:r.Input}]},t}(),E=[w,k,C,O],x=function(){function t(){}return t.decorators=[{type:r.NgModule,args:[{declarations:[E],exports:[E],imports:[c.CommonModule]}]}],t}();!function(t){t[t.keydown=0]="keydown",t[t.keypress=1]="keypress",t[t.keyup=2]="keyup"}(o||(o={})),function(t){t[t.Undefined=0]="Undefined",t[t.Open=1]="Open",t[t.Close=2]="Close",t[t.Enter=3]="Enter",t[t.EnterPress=4]="EnterPress",t[t.EnterUp=5]="EnterUp",t[t.Tab=6]="Tab",t[t.Esc=7]="Esc",t[t.Navigate=8]="Navigate"}(i||(i={}));var P=function(){function t(t,e,n,o,i){var s=this;this.focusService=t,this.navigationService=e,this.wrapperRef=n,this._zone=o,this._open=!1,this._disabled=!1,this._active=!1,this._popupSettings={animate:!0,popupClass:""},this.listId=Object(a.guid)(),this._isFocused=!1,this.wrapperBlurred=new r.EventEmitter,this.focusService=t,this.navigationService=e,this.wrapper=n.nativeElement,this.localizationChangeSubscription=i.changes.subscribe(function(t){var e=t.rtl;return s.direction=e?"rtl":"ltr"}),this.subscribeEvents()}return Object.defineProperty(t.prototype,"popupClasses",{get:function(){var t=["k-list-container","k-reset","k-group"];return this._popupSettings.popupClass&&t.push(this._popupSettings.popupClass),t.join(" ")},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"openState",{get:function(){return this._open},set:function(t){this._open=t},enumerable:!0,configurable:!0}),t.prototype.togglePopupVisibility=function(){this._disabled||(this.openState=!this.openState,this.openState||this.focusService.focus(-1))},t.prototype.onItemClick=function(t){var e=this;this.emitItemClickHandler(t),setTimeout(function(){e.focusWrapper()},1)},t.prototype.ngOnDestroy=function(){this.openState=!1,this.unsubscribeEvents(),clearTimeout(this.focusFirstTimeout),clearTimeout(this.blurTimeout),this.localizationChangeSubscription&&this.localizationChangeSubscription.unsubscribe()},t.prototype.subscribeEvents=function(){Object(a.isDocumentAvailable)()&&(this.subscribeListItemFocusEvent(),this.subscribeComponentBlurredEvent(),this.subscribeNavigationEvents())},t.prototype.subscribeListItemFocusEvent=function(){var t=this;this.focusSubscription=this.focusService.onFocus.subscribe(function(){t._isFocused=!0})},t.prototype.subscribeComponentBlurredEvent=function(){var t=this;this._zone.runOutsideAngular(function(){t.documentClick=Object(s.fromEvent)(document,"click").pipe(Object(u.filter)(function(e){return!t.wrapperContains(e.target)})),t.tabSubscription=t.navigationService.tab.pipe(Object(u.filter)(function(){return t._isFocused})).subscribe(t.handleTab.bind(t)),t.componentBlurredSubscription=Object(s.merge)(t.documentClick,t.wrapperBlurred).pipe(Object(u.filter)(function(){return t._isFocused})).subscribe(function(){return t._zone.run(function(){return t.blurWrapper()})})})},t.prototype.subscribeNavigationEvents=function(){var t=this;this.navigationSubscription=this.navigationService.navigate.subscribe(this.focusService.focus.bind(this.focusService)),this.enterPressSubscription=this.navigationService.enterpress.subscribe(function(){t._disabled||t._open||(t._active=!0)}),this.enterUpSubscription=this.navigationService.enterup.subscribe(function(){t._open||(t._active=!1),t.enterHandler(),t.focusWrapper()}),this.openSubscription=this.navigationService.open.subscribe(function(){t._open?t.focusWrapper():(t.togglePopupVisibility(),t.focusFirstItem())}),this.closeSubscription=Object(s.merge)(this.navigationService.close,this.navigationService.esc).subscribe(function(){return t.focusWrapper()})},t.prototype.enterHandler=function(){},t.prototype.unsubscribeEvents=function(){Object(a.isDocumentAvailable)()&&(this.unsubscribe(this.componentBlurredSubscription),this.unsubscribe(this.focusSubscription),this.unsubscribe(this.navigationSubscription),this.unsubscribe(this.enterPressSubscription),this.unsubscribe(this.enterUpSubscription),this.unsubscribe(this.openSubscription),this.unsubscribe(this.closeSubscription),this.unsubscribe(this.tabSubscription))},t.prototype.unsubscribe=function(t){t&&t.unsubscribe()},t.prototype.keyDownHandler=function(t){this.keyHandler(t)},t.prototype.keyPressHandler=function(t){this.keyHandler(t,o.keypress)},t.prototype.keyUpHandler=function(t){this.keyHandler(t,o.keyup)},t.prototype.keyHandler=function(t,e){if(!this._disabled){var n=this.focusService.focused||0,o=t,r=this.navigationService.process({altKey:o.altKey,current:n,keyCode:o.keyCode,keyEvent:e,max:this._data?this._data.length-1:0,min:0});r!==i.Undefined&&r!==i.Tab&&(r!==i.Enter||r===i.Enter&&this._open)&&o.preventDefault()}},t.prototype.emitItemClickHandler=function(t){var e=this._data[t];this._itemClick&&this._itemClick.emit(e),e&&e.click&&!e.disabled&&e.click(e)},t.prototype.focusFirstItem=function(){var t=this;this._data&&g(this._data[0])&&(this.focusFirstTimeout=setTimeout(function(){t.focusService.focus(0)},1))},t.prototype.focusWrapper=function(){this._open&&(this.togglePopupVisibility(),this.focusButton())},t.prototype.blurHandler=function(){var t=this;Object(a.isDocumentAvailable)()&&(this.blurTimeout=setTimeout(function(){t.wrapperContains(document.activeElement)||t.blurWrapper()}))},t.prototype.wrapperContains=function(t){return this.wrapper===t||this.wrapper.contains(t)},t.prototype.blurWrapper=function(){this._open&&this.togglePopupVisibility(),this._isFocused=!1,this._blur.emit()},t.prototype.focusButton=function(){this.button&&this.button.nativeElement.focus()},t.prototype.handleTab=function(){this.focusButton(),this.blurWrapper()},t}(),T=new r.InjectionToken("navigation.config"),I=function(){function t(t){this.navigate=new r.EventEmitter,this.open=new r.EventEmitter,this.close=new r.EventEmitter,this.enter=new r.EventEmitter,this.enterpress=new r.EventEmitter,this.enterup=new r.EventEmitter,this.tab=new r.EventEmitter,this.esc=new r.EventEmitter,this.useLeftRightArrows=t.useLeftRightArrows}return t.prototype.process=function(t){var e,n=t.keyCode,r=t.keyEvent,s=i.Undefined;return r===o.keypress?this.isEnter(n)&&(s=i.EnterPress):r===o.keyup?this.isEnter(n)&&(s=i.EnterUp):t.altKey&&n===a.Keys.ArrowDown?s=i.Open:t.altKey&&n===a.Keys.ArrowUp?s=i.Close:this.isEnter(n)?s=i.Enter:n===a.Keys.Escape?s=i.Esc:n===a.Keys.Tab?s=i.Tab:n===a.Keys.ArrowUp||this.useLeftRightArrows&&n===a.Keys.ArrowLeft?(e=this.next({current:t.current,start:t.max,end:t.min,step:-1}),s=i.Navigate):(n===a.Keys.ArrowDown||this.useLeftRightArrows&&n===a.Keys.ArrowRight)&&(e=this.next({current:t.current,start:t.min,end:t.max,step:1}),s=i.Navigate),s!==i.Undefined&&this[i[s].toLowerCase()].emit(e),s},t.prototype.isEnter=function(t){return t===a.Keys.Enter||t===a.Keys.Space},t.prototype.next=function(t){return g(t.current)?t.current!==t.end?t.current+t.step:t.end:t.start},t.decorators=[{type:r.Injectable}],t.ctorParameters=function(){return[{type:void 0,decorators:[{type:r.Inject,args:[T]}]}]},t}(),B=function(){function t(){this.prevented=!1}return t.prototype.preventDefault=function(){this.prevented=!0},t.prototype.isDefaultPrevented=function(){return this.prevented},t}(),A={provide:T,useValue:{useLeftRightArrows:!0}},j=function(t){function e(e,n,o,i,s,p,u){var c=t.call(this,e,n,o,i,u)||this;return c.popupService=s,c.elRef=p,c.text="",c.icon="",c.iconClass="",c.imageUrl="",c.look="default",c.tabIndex=0,c.buttonClick=new r.EventEmitter,c.itemClick=new r.EventEmitter,c.onFocus=new r.EventEmitter,c.onBlur=new r.EventEmitter,c.open=new r.EventEmitter,c.close=new r.EventEmitter,c.listId=Object(a.guid)(),c.buttonText="",c._itemClick=c.itemClick,c._blur=c.onBlur,c}return Object(d.__extends)(e,t),Object.defineProperty(e.prototype,"disabled",{get:function(){return this._disabled},set:function(t){this._disabled=t},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"popupSettings",{get:function(){return this._popupSettings},set:function(t){this._popupSettings=Object.assign({animate:!0,popupClass:""},t)},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"data",{get:function(){return this._data||(this.data=[]),this._data},set:function(t){this._data=t||[]},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"openState",{get:function(){return this._open},set:function(t){if(!this.disabled){var e=new B;t?this.open.emit(e):this.close.emit(e),e.isDefaultPrevented()||this._toggle(t)}},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"active",{get:function(){return this._active},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"componentTabIndex",{get:function(){return this.disabled?-1:this.tabIndex},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"isFocused",{get:function(){return this._isFocused&&!this._disabled},set:function(t){this._isFocused=t},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"widgetClasses",{get:function(){return!0},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"dir",{get:function(){return this.direction},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"ariaLabel",{get:function(){return this.buttonText+" splitbutton"},enumerable:!0,configurable:!0}),e.prototype.onButtonFocus=function(){this.isFocused||(this._isFocused=!0,this.onFocus.emit())},e.prototype.onButtonClick=function(){this.buttonClick.emit()},e.prototype.keydown=function(t){this.keyDownHandler(t)},e.prototype.keypress=function(t){this.keyPressHandler(t)},e.prototype.keyup=function(t){this.keyUpHandler(t)},e.prototype.ngAfterViewInit=function(){this.updateButtonText()},e.prototype.ngOnChanges=function(t){t.hasOwnProperty("text")&&this.updateButtonText()},e.prototype.togglePopupVisibility=function(){t.prototype.togglePopupVisibility.call(this),Object(a.isDocumentAvailable)()&&this.button.nativeElement.focus()},e.prototype.wrapperContains=function(t){return this.wrapper===t||this.wrapper.contains(t)||this.popupRef&&this.popupRef.popupElement.contains(t)},Object.defineProperty(e.prototype,"anchorAlign",{get:function(){var t={horizontal:this.popupSettings.align||"left",vertical:"bottom"};return"rtl"!==this.direction||g(this.popupSettings.align)||(t.horizontal="right"),t},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"popupAlign",{get:function(){var t={horizontal:this.popupSettings.align||"left",vertical:"top"};return"rtl"!==this.direction||g(this.popupSettings.align)||(t.horizontal="right"),t},enumerable:!0,configurable:!0}),e.prototype.focus=function(){Object(a.isDocumentAvailable)()&&this.button.nativeElement.focus()},e.prototype.blur=function(){Object(a.isDocumentAvailable)()&&this.button.nativeElement.blur()},e.prototype.ngOnDestroy=function(){t.prototype.ngOnDestroy.call(this),this.destroyPopup()},e.prototype.toggle=function(t){var e=this;this.disabled||m(function(){return e._toggle(void 0===t?!e._open:t)})},Object.defineProperty(e.prototype,"isOpen",{get:function(){return this.openState},enumerable:!0,configurable:!0}),e.prototype.enterHandler=function(){if(!this.disabled)if(this.openState){var t=this.focusService.focused;g(t)&&-1!==t&&this.emitItemClickHandler(t)}else this.buttonClick.emit()},e.prototype.updateButtonText=function(){var t=this;if(Object(a.isDocumentAvailable)()){var e=this.wrapper.innerText.split("\n").join("").trim();setTimeout(function(){t.buttonText=e},0)}},Object.defineProperty(e.prototype,"appendTo",{get:function(){var t=this.popupSettings.appendTo;if(t&&"root"!==t)return"component"===t?this.containerRef:t},enumerable:!0,configurable:!0}),e.prototype._toggle=function(t){this._open=t,this.destroyPopup(),this._open&&this.createPopup()},e.prototype.createPopup=function(){var t=this;this.popupRef=this.popupService.open({anchor:this.elRef,anchorAlign:this.anchorAlign,animate:this.popupSettings.animate,appendTo:this.appendTo,content:this.popupTemplate,popupAlign:this.popupAlign,popupClass:this.popupClasses}),this.popupRef.popupAnchorViewportLeave.subscribe(function(){return t.openState=!1}),this.popupRef.popupOpen.subscribe(this.focusFirstItem.bind(this))},e.prototype.destroyPopup=function(){this.popupRef&&(this.popupRef.close(),this.popupRef=null)},e.decorators=[{type:r.Component,args:[{exportAs:"kendoSplitButton",providers:[S,I,A,p.LocalizationService,{provide:p.L10N_PREFIX,useValue:"kendo.splitbutton"}],selector:"kendo-splitbutton",template:'\n <button kendoButton\n #button\n role="listbox"\n type="button"\n [look]="look"\n [tabindex]="componentTabIndex"\n [disabled]="disabled"\n [icon]="icon"\n [class.k-state-active]="active"\n [iconClass]="iconClass"\n [imageUrl]="imageUrl"\n (focus)="onButtonFocus()"\n (click)="onButtonClick()"\n [attr.aria-disabled]="disabled"\n [attr.aria-expanded]="openState"\n [attr.aria-haspopup]="true"\n [attr.aria-owns]="listId"\n [attr.aria-label]="ariaLabel"\n >\n {{text}}<ng-content></ng-content>\n </button>\n <button kendoButton\n type="button"\n [disabled]="disabled"\n [icon]="\'arrow-s\'"\n [look]="look"\n [tabindex]="-1"\n (click)="togglePopupVisibility()">\n </button>\n <ng-template #popupTemplate>\n <kendo-button-list\n [id]="listId"\n [data]="data"\n [textField]="textField"\n [itemTemplate]="itemTemplate"\n (onItemBlur)="blurHandler()"\n (onItemClick)="onItemClick($event)"\n (keydown)="keyDownHandler($event)"\n (keypress)="keyPressHandler($event)"\n (keyup)="keyUpHandler($event)"\n >\n </kendo-button-list>\n </ng-template>\n <ng-container #container></ng-container>\n '}]}],e.ctorParameters=function(){return[{type:S},{type:I},{type:r.ElementRef},{type:r.NgZone},{type:l.PopupService},{type:r.ElementRef},{type:p.LocalizationService}]},e.propDecorators={text:[{type:r.Input}],icon:[{type:r.Input}],iconClass:[{type:r.Input}],imageUrl:[{type:r.Input}],look:[{type:r.Input}],disabled:[{type:r.Input}],popupSettings:[{type:r.Input}],tabIndex:[{type:r.Input}],textField:[{type:r.Input}],data:[{type:r.Input}],buttonClick:[{type:r.Output}],itemClick:[{type:r.Output}],onFocus:[{type:r.Output,args:["focus"]}],onBlur:[{type:r.Output,args:["blur"]}],open:[{type:r.Output}],close:[{type:r.Output}],itemTemplate:[{type:r.ContentChild,args:[C]}],button:[{type:r.ViewChild,args:["button"]}],popupTemplate:[{type:r.ViewChild,args:["popupTemplate"]}],containerRef:[{type:r.ViewChild,args:["container",{read:r.ViewContainerRef}]}],isFocused:[{type:r.HostBinding,args:["class.k-state-focused"]}],widgetClasses:[{type:r.HostBinding,args:["class.k-widget"]},{type:r.HostBinding,args:["class.k-split-button"]},{type:r.HostBinding,args:["class.k-button-group"]}],dir:[{type:r.HostBinding,args:["attr.dir"]}],keydown:[{type:r.HostListener,args:["keydown",["$event"]]}],keypress:[{type:r.HostListener,args:["keypress",["$event"]]}],keyup:[{type:r.HostListener,args:["keyup",["$event"]]}]},e}(P),F=function(){function t(){}return t.decorators=[{type:r.NgModule,args:[{declarations:[j],exports:[j,x],imports:[c.CommonModule,l.PopupModule,v,x]}]}],t}(),R={provide:T,useValue:{useLeftRightArrows:!0}},D=function(t){function e(e,n,o,i,s,p,u){var c=t.call(this,e,n,o,i,u)||this;return c.popupService=s,c.elRef=p,c.icon="",c.iconClass="",c.imageUrl="",c.primary=!1,c.look="default",c.tabIndex=0,c.itemClick=new r.EventEmitter,c.open=new r.EventEmitter,c.close=new r.EventEmitter,c.onFocus=new r.EventEmitter,c.onBlur=new r.EventEmitter,c.listId=Object(a.guid)(),c._itemClick=c.itemClick,c._blur=c.onBlur,c}return Object(d.__extends)(e,t),Object.defineProperty(e.prototype,"popupSettings",{get:function(){return this._popupSettings},set:function(t){this._popupSettings=Object.assign({animate:!0,popupClass:""},t)},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"disabled",{get:function(){return this._disabled},set:function(t){t&&this.openState&&(this.openState=!1),this._disabled=t},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"data",{get:function(){return this._data},set:function(t){this._data=t||[]},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"openState",{get:function(){return this._open},set:function(t){if(!this.disabled){var e=new B;t?this.open.emit(e):this.close.emit(e),e.isDefaultPrevented()||this._toggle(t)}},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"componentTabIndex",{get:function(){return this.disabled?-1:this.tabIndex},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"appendTo",{get:function(){var t=this.popupSettings.appendTo;if(t&&"root"!==t)return"component"===t?this.container:t},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"focused",{get:function(){return this._isFocused&&!this._disabled},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"widgetClasses",{get:function(){return!0},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"dir",{get:function(){return this.direction},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"active",{get:function(){return this._active},enumerable:!0,configurable:!0}),e.prototype.keydown=function(t){this.keyDownHandler(t)},e.prototype.keypress=function(t){this.keyPressHandler(t)},e.prototype.keyup=function(t){this.keyUpHandler(t)},e.prototype.mousedown=function(t){this._disabled&&t.preventDefault()},e.prototype.openPopup=function(){this.togglePopupVisibility()},Object.defineProperty(e.prototype,"anchorAlign",{get:function(){var t={horizontal:this.popupSettings.align||"left",vertical:"bottom"};return"rtl"!==this.direction||g(this.popupSettings.align)||(t.horizontal="right"),t},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"popupAlign",{get:function(){var t={horizontal:this.popupSettings.align||"left",vertical:"top"};return"rtl"!==this.direction||g(this.popupSettings.align)||(t.horizontal="right"),t},enumerable:!0,configurable:!0}),e.prototype.focus=function(){Object(a.isDocumentAvailable)()&&this.button.nativeElement.focus()},e.prototype.blur=function(){Object(a.isDocumentAvailable)()&&this.button.nativeElement.blur()},e.prototype.ngOnDestroy=function(){t.prototype.ngOnDestroy.call(this),this.destroyPopup()},e.prototype.toggle=function(t){var e=this;this.disabled||m(function(){return e._toggle(void 0===t?!e._open:t)})},Object.defineProperty(e.prototype,"isOpen",{get:function(){return this.openState},enumerable:!0,configurable:!0}),e.prototype.handleFocus=function(){this._disabled||this._isFocused||(this._isFocused=!0,this.onFocus.emit())},e.prototype.wrapperContains=function(t){return this.wrapper===t||this.wrapper.contains(t)||this.popupRef&&this.popupRef.popupElement.contains(t)},e.prototype.subscribeNavigationEvents=function(){this.navigationSubscription=this.navigationService.navigate.subscribe(this.onArrowKeyNavigate.bind(this)),this.enterPressSubscription=this.navigationService.enterpress.subscribe(this.onNavigationEnterPress.bind(this)),this.enterUpSubscription=this.navigationService.enterup.subscribe(this.onNavigationEnterUp.bind(this)),this.openSubscription=this.navigationService.open.subscribe(this.onNavigationOpen.bind(this)),this.closeSubscription=Object(s.merge)(this.navigationService.close,this.navigationService.esc).subscribe(this.onNavigationClose.bind(this))},e.prototype.onNavigationEnterPress=function(){this._disabled||this.openState||(this._active=!0)},e.prototype.onNavigationEnterUp=function(){if(this._disabled||this.openState||(this._active=!1),this.openState){var t=this.focusService.focused;g(t)&&-1!==t&&this.emitItemClickHandler(t)}this.togglePopupVisibility(),!this.openState&&Object(a.isDocumentAvailable)()&&this.button.nativeElement.focus()},e.prototype.onNavigationOpen=function(){this._disabled||this.openState||this.togglePopupVisibility()},e.prototype.onNavigationClose=function(){this.openState&&(this.togglePopupVisibility(),Object(a.isDocumentAvailable)()&&this.button.nativeElement.focus())},e.prototype.onArrowKeyNavigate=function(t){this.focusService.focus(t)},e.prototype._toggle=function(t){this._open!==t&&(this._open=t,this.destroyPopup(),this._open&&this.createPopup())},e.prototype.createPopup=function(){var t=this;this.popupRef=this.popupService.open({anchor:this.elRef,anchorAlign:this.anchorAlign,animate:this.popupSettings.animate,appendTo:this.appendTo,content:this.popupTemplate,popupAlign:this.popupAlign,popupClass:this.popupClasses}),this.popupRef.popupAnchorViewportLeave.subscribe(function(){return t.openState=!1}),this.popupRef.popupOpen.subscribe(this.focusFirstItem.bind(this))},e.prototype.destroyPopup=function(){this.popupRef&&(this.popupRef.close(),this.popupRef=null)},e.decorators=[{type:r.Component,args:[{exportAs:"kendoDropDownButton",providers:[S,I,R,p.LocalizationService,{provide:p.L10N_PREFIX,useValue:"kendo.dropdownbutton"}],selector:"kendo-dropdownbutton",template:'\n <button kendoButton #button\n role="menu"\n type="button"\n [tabindex]="componentTabIndex"\n [class.k-state-active]="active"\n [disabled]="disabled"\n [icon]="icon"\n [iconClass]="iconClass"\n [imageUrl]="imageUrl"\n (click)="openPopup()"\n (focus)="handleFocus()"\n [attr.aria-disabled]="disabled"\n [attr.aria-expanded]="openState"\n [attr.aria-haspopup]="true"\n [attr.aria-owns]="listId"\n [look]="look"\n [primary]="primary"\n >\n <ng-content></ng-content>\n </button>\n <ng-template #popupTemplate>\n <kendo-button-list\n #buttonList\n [id]="listId"\n [data]="data"\n [textField]="textField"\n [itemTemplate]="itemTemplate"\n (onItemClick)="onItemClick($event)"\n (keydown)="keyDownHandler($event)"\n (keypress)="keyPressHandler($event)"\n (keyup)="keyUpHandler($event)"\n >\n </kendo-button-list>\n </ng-template>\n <ng-container #container></ng-container>\n '}]}],e.ctorParameters=function(){return[{type:S},{type:I},{type:r.ElementRef},{type:r.NgZone},{type:l.PopupService},{type:r.ElementRef},{type:p.LocalizationService}]},e.propDecorators={icon:[{type:r.Input}],iconClass:[{type:r.Input}],imageUrl:[{type:r.Input}],popupSettings:[{type:r.Input}],textField:[{type:r.Input}],disabled:[{type:r.Input}],data:[{type:r.Input}],primary:[{type:r.Input}],look:[{type:r.Input}],tabIndex:[{type:r.Input}],itemClick:[{type:r.Output}],open:[{type:r.Output}],close:[{type:r.Output}],onFocus:[{type:r.Output,args:["focus"]}],onBlur:[{type:r.Output,args:["blur"]}],focused:[{type:r.HostBinding,args:["class.k-state-focused"]}],widgetClasses:[{type:r.HostBinding,args:["class.k-widget"]},{type:r.HostBinding,args:["class.k-dropdown-button"]}],dir:[{type:r.HostBinding,args:["attr.dir"]}],itemTemplate:[{type:r.ContentChild,args:[C]}],button:[{type:r.ViewChild,args:["button"]}],buttonList:[{type:r.ViewChild,args:["buttonList"]}],popupTemplate:[{type:r.ViewChild,args:["popupTemplate"]}],container:[{type:r.ViewChild,args:["container",{read:r.ViewContainerRef}]}],keydown:[{type:r.HostListener,args:["keydown",["$event"]]}],keypress:[{type:r.HostListener,args:["keypress",["$event"]]}],keyup:[{type:r.HostListener,args:["keyup",["$event"]]}],mousedown:[{type:r.HostListener,args:["mousedown",["$event"]]}]},e}(P),N=function(){function t(){}return t.decorators=[{type:r.NgModule,args:[{declarations:[D],exports:[D,x],imports:[c.CommonModule,l.PopupModule,x,v]}]}],t}(),z=function(){function t(){}return t.decorators=[{type:r.NgModule,args:[{exports:[_,v,F,N]}]}],t}()},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(6),s=n(12),a=n(26),p=function(t){function e(){var e=t.call(this)||this;return e.selection="multiple",e.look="default",e.navigationService=new a.ButtonGroupNavigationService,e}var n;return o.__extends(e,t),n=e,e.prototype.selectedChangeHandler=function(t,e){e.selected=t,e.selectedChange.emit(t)},o.__decorate([i.Input(),o.__metadata("design:type",Boolean)],e.prototype,"disabled",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"selection",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"width",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"look",void 0),o.__decorate([i.ViewChild("toolbarTemplate"),o.__metadata("design:type",i.TemplateRef)],e.prototype,"toolbarTemplate",void 0),o.__decorate([i.ViewChild("popupTemplate"),o.__metadata("design:type",i.TemplateRef)],e.prototype,"popupTemplate",void 0),o.__decorate([i.ContentChildren(i.forwardRef(function(){return s.ToolBarButtonComponent})),o.__metadata("design:type",i.QueryList)],e.prototype,"buttons",void 0),e=n=o.__decorate([i.Component({exportAs:"kendoToolBarButtonGroup",providers:[{provide:r.ToolBarToolComponent,useExisting:i.forwardRef(function(){return n})}],selector:"kendo-toolbar-buttongroup",template:'\n <ng-template #toolbarTemplate>\n <kendo-buttongroup\n [tabIndex]="tabIndex"\n [selection]="selection"\n [disabled]="disabled"\n [look]="look"\n [width]="width">\n <button type="button" kendoButton kendoToolbarFocusable\n *ngFor="let button of buttons"\n [ngStyle]="button.style"\n [ngClass]="button.className"\n [attr.title]="button.title"\n [disabled]="button.disabled"\n [togglable]="button.togglable"\n [primary]="button.primary"\n [selected]="button.selected"\n [icon]="button.icon"\n [iconClass]="button.iconClass"\n [imageUrl]="button.imageUrl"\n (click)="button.click.emit($event)"\n (selectedChange)="selectedChangeHandler($event, button)">\n {{ button.text }}\n </button>\n </kendo-buttongroup>\n </ng-template>\n <ng-template #popupTemplate>\n <kendo-buttongroup\n class="k-overflow-button"\n [tabIndex]="tabIndex"\n [disabled]="disabled"\n [look]="look"\n [width]="width">\n <button type="button" kendoButton\n class="k-overflow-button"\n *ngFor="let button of buttons"\n [ngStyle]="button.style"\n [ngClass]="button.className"\n [attr.title]="button.title"\n [disabled]="button.disabled"\n [togglable]="button.togglable"\n [primary]="button.primary"\n [selected]="button.selected"\n [icon]="button.icon"\n [iconClass]="button.iconClass"\n [imageUrl]="button.imageUrl"\n (click)="button.click.emit($event)"\n (selectedChange)="selectedChangeHandler($event, button)">\n {{ button.text }}\n </button>\n </kendo-buttongroup>\n </ng-template>\n '}),o.__metadata("design:paramtypes",[])],e)}(r.ToolBarToolComponent);e.ToolBarButtonGroupComponent=p},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(6),s=n(13),a=function(t){function e(){var e=t.call(this)||this;return e.text="",e.icon="",e.iconClass="",e.imageUrl="",e.textField="text",e.itemClick=new i.EventEmitter,e._popupSettings={animate:!0,popupClass:""},e.navigationService=new s.SingleFocusableNavigationService,e}var n;return o.__extends(e,t),n=e,Object.defineProperty(e.prototype,"popupSettings",{get:function(){return this._popupSettings},set:function(t){this._popupSettings=Object.assign({animate:!0,popupClass:""},t)},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"data",{get:function(){return this._data||(this.data=[]),this._data},set:function(t){this._data=t||[]},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"listData",{get:function(){var t;return[((t={})[this.textField]=this.text,t.icon=this.icon,t.iconClass=this.iconClass,t.imageUrl=this.imageUrl,t.disabled=this.disabled,t.tabIndex=this.tabIndex,t)].concat(this.data)},enumerable:!0,configurable:!0}),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"text",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"icon",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"iconClass",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"imageUrl",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Object),o.__metadata("design:paramtypes",[Object])],e.prototype,"popupSettings",null),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"textField",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Boolean)],e.prototype,"disabled",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Array),o.__metadata("design:paramtypes",[Array])],e.prototype,"data",null),o.__decorate([i.Output(),o.__metadata("design:type",i.EventEmitter)],e.prototype,"itemClick",void 0),o.__decorate([i.ViewChild("toolbarTemplate"),o.__metadata("design:type",i.TemplateRef)],e.prototype,"toolbarTemplate",void 0),o.__decorate([i.ViewChild("popupTemplate"),o.__metadata("design:type",i.TemplateRef)],e.prototype,"popupTemplate",void 0),o.__decorate([i.ViewChild("dropdownButton",{read:i.ElementRef}),o.__metadata("design:type",i.ElementRef)],e.prototype,"dropdownButton",void 0),e=n=o.__decorate([i.Component({exportAs:"kendoToolBarDropDownButton",providers:[{provide:r.ToolBarToolComponent,useExisting:i.forwardRef(function(){return n})}],selector:"kendo-toolbar-dropdownbutton",template:'\n <ng-template #toolbarTemplate>\n <kendo-dropdownbutton\n [icon]="icon"\n [iconClass]="iconClass"\n [imageUrl]="imageUrl"\n [disabled]="disabled"\n [tabIndex]="tabIndex"\n [data]="data"\n [textField]="textField"\n (itemClick)="itemClick.emit($event)">\n {{ text }}\n </kendo-dropdownbutton>\n </ng-template>\n <ng-template #popupTemplate>\n <kendo-toolbar-buttonlist\n [data]="listData"\n [textField]="textField"\n (itemClick)="itemClick.emit($event)">\n </kendo-toolbar-buttonlist>\n </ng-template>\n '}),o.__metadata("design:paramtypes",[])],e)}(r.ToolBarToolComponent);e.ToolBarDropDownButtonComponent=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(6),s=n(13),a=function(t){function e(){var e=t.call(this)||this;return e.text="",e.icon="",e.iconClass="",e.imageUrl="",e.textField="text",e.buttonClick=new i.EventEmitter,e.itemClick=new i.EventEmitter,e._popupSettings={animate:!0,popupClass:""},e.navigationService=new s.SingleFocusableNavigationService,e}var n;return o.__extends(e,t),n=e,Object.defineProperty(e.prototype,"popupSettings",{get:function(){return this._popupSettings||(this._popupSettings={animate:!0,popupClass:""}),this._popupSettings},set:function(t){this._popupSettings=t},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"data",{get:function(){return this._data||(this.data=[]),this._data},set:function(t){this._data=t||[]},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"listData",{get:function(){var t,e=((t={})[this.textField]=this.text,t.icon=this.icon,t.iconClass=this.iconClass,t.imageUrl=this.imageUrl,t.disabled=this.disabled,t.tabIndex=this.tabIndex,t),n=this.data.slice(0);return n.unshift(e),n},enumerable:!0,configurable:!0}),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"text",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"icon",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"iconClass",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"imageUrl",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Boolean)],e.prototype,"disabled",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Object),o.__metadata("design:paramtypes",[Object])],e.prototype,"popupSettings",null),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"textField",void 0),o.__decorate([i.Input(),o.__metadata("design:type",Array),o.__metadata("design:paramtypes",[Array])],e.prototype,"data",null),o.__decorate([i.Output(),o.__metadata("design:type",i.EventEmitter)],e.prototype,"buttonClick",void 0),o.__decorate([i.Output(),o.__metadata("design:type",i.EventEmitter)],e.prototype,"itemClick",void 0),o.__decorate([i.ViewChild("toolbarTemplate"),o.__metadata("design:type",i.TemplateRef)],e.prototype,"toolbarTemplate",void 0),o.__decorate([i.ViewChild("popupTemplate"),o.__metadata("design:type",i.TemplateRef)],e.prototype,"popupTemplate",void 0),o.__decorate([i.ViewChild("splitButton",{read:i.ElementRef}),o.__metadata("design:type",i.ElementRef)],e.prototype,"splitButton",void 0),e=n=o.__decorate([i.Component({exportAs:"kendoToolBarSplitButton",providers:[{provide:r.ToolBarToolComponent,useExisting:i.forwardRef(function(){return n})}],selector:"kendo-toolbar-splitbutton",template:'\n <ng-template #toolbarTemplate>\n <kendo-splitbutton\n [data]="data"\n [text]="text"\n [icon]="icon"\n [iconClass]="iconClass"\n [imageUrl]="imageUrl"\n [disabled]="disabled"\n [tabIndex]="tabIndex"\n [textField]="textField"\n (buttonClick)="buttonClick.emit($event)"\n (itemClick)="itemClick.emit($event)">\n </kendo-splitbutton>\n </ng-template>\n <ng-template #popupTemplate>\n <kendo-toolbar-buttonlist\n [data]="listData"\n [textField]="textField"\n (itemClick)="itemClick.emit($event)">\n </kendo-toolbar-buttonlist>\n </ng-template>\n '}),o.__metadata("design:paramtypes",[])],e)}(r.ToolBarToolComponent);e.ToolBarSplitButtonComponent=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(14);e.ToolBarComponent=o.ToolBarComponent;var i=n(6);e.ToolBarToolComponent=i.ToolBarToolComponent;var r=n(12);e.ToolBarButtonComponent=r.ToolBarButtonComponent;var s=n(20);e.ToolBarButtonGroupComponent=s.ToolBarButtonGroupComponent;var a=n(21);e.ToolBarDropDownButtonComponent=a.ToolBarDropDownButtonComponent;var p=n(22);e.ToolBarSplitButtonComponent=p.ToolBarSplitButtonComponent;var u=n(11);e.ToolBarSeparatorComponent=u.ToolBarSeparatorComponent;var c=n(27);e.ToolBarModule=c.ToolBarModule},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=function(){function t(){this.prevented=!1}return t.prototype.preventDefault=function(){this.prevented=!0},t.prototype.isDefaultPrevented=function(){return this.prevented},t}();e.PreventableEvent=o},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(9),s=function(){function t(){}return t.prototype.getElement=function(){return this.element.nativeElement},t.prototype.querySelector=function(t){return this.element.nativeElement.querySelector(t)},t.prototype.querySelectorAll=function(t){return this.element.nativeElement.querySelectorAll(t)},t.prototype.findFocusable=function(){return r.findFocusable(this.element.nativeElement,!1)},t.prototype.findFocusableChild=function(t){return t||(t=this.findFocusable()),r.findFocusableChild(t,!1)},t.prototype.findNextFocusableSibling=function(t){return t||(t=this.findFocusable()),r.findFocusableSibling(t,!1)},t.prototype.findPrevFocusableSibling=function(t){return t||(t=this.findFocusable()),r.findFocusableSibling(t,!1,!0)},t.prototype.setAttribute=function(t,e,n){this.renderer.setAttribute(t,e,n)},t=o.__decorate([i.Injectable()],t)}();e.RendererService=s},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(1),s=function(){function t(){}return Object.defineProperty(t.prototype,"toolbarNavigation",{get:function(){return this._navigationService},set:function(t){this._navigationService=t,this.keydownSubscription&&this.keydownSubscription.unsubscribe(),this.keydownSubscription=this._navigationService.keydown.subscribe(this.onKeydown.bind(this))},enumerable:!0,configurable:!0}),t.prototype.ngOnDestroy=function(){this.keydownSubscription&&this.keydownSubscription.unsubscribe()},t.prototype.register=function(t,e){"toolbar"===e?this.toolbarRenderer=t:this.overflowRenderer=t},t.prototype.canFocus=function(){return!0},t.prototype.focus=function(t){var e=this.buttons(),n=t?e[e.length-1]:e[0];this.toolbarNavigation.lock(),this.renderer().setAttribute(n,"tabindex","0"),n.focus(),this.current=n,this.isActive=!0},t.prototype.defocus=function(){var t=this;this.buttons().forEach(function(e){t.renderer().setAttribute(e,"tabindex","-1"),t.hasFocus(e)&&e.blur()}),this.current=void 0,this.isActive=!1},t.prototype.hasFocus=function(t){return document.activeElement!==t},t.prototype.buttons=function(){return Array.prototype.slice.call(this.renderer().querySelectorAll(".k-button"))},t.prototype.renderer=function(){return this.toolbarNavigation.isPopupFocused?this.overflowRenderer:this.toolbarRenderer},t.prototype.onKeydown=function(t){this.isActive&&(t.keyCode===r.Keys.ArrowLeft&&0===this.buttons().indexOf(this.current)&&(this.toolbarNavigation.unlock(),this.toolbarNavigation.focusPrev()),t.keyCode===r.Keys.ArrowRight&&this.buttons().indexOf(this.current)===this.buttons().length-1&&(this.toolbarNavigation.unlock(),this.toolbarNavigation.focusNext()),this.current=this.buttons().find(function(t){return 0===t.tabIndex}))},t=o.__decorate([i.Injectable()],t)}();e.ButtonGroupNavigationService=s},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=n(7),s=n(19),a=n(8),p=n(1),u=n(14),c=n(6),l=n(12),d=n(20),h=n(21),f=n(22),b=n(11),g=n(18),m=n(28),y=[c.ToolBarToolComponent,l.ToolBarButtonComponent,d.ToolBarButtonGroupComponent,h.ToolBarDropDownButtonComponent,f.ToolBarSplitButtonComponent,b.ToolBarSeparatorComponent],v=[g.ToolBarRendererComponent,m.ToolBarButtonListComponent],_=function(){function t(){}return t=o.__decorate([i.NgModule({declarations:[u.ToolBarComponent,y,v],exports:[u.ToolBarComponent,y],imports:[r.CommonModule,s.ButtonsModule,a.PopupModule,p.ResizeSensorModule]})],t)}();e.ToolBarModule=_},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var o=n(2),i=n(0),r=function(t){function e(){var e=null!==t&&t.apply(this,arguments)||this;return e.itemClick=new i.EventEmitter,e}return o.__extends(e,t),e.prototype.getText=function(t){if(t)return this.textField?t[this.textField]:t.text||t},e.prototype.onClick=function(t){t.click&&t.click(),this.itemClick.emit(this.data.indexOf(t))},o.__decorate([i.Input(),o.__metadata("design:type",Array)],e.prototype,"data",void 0),o.__decorate([i.Input(),o.__metadata("design:type",String)],e.prototype,"textField",void 0),o.__decorate([i.Output(),o.__metadata("design:type",i.EventEmitter)],e.prototype,"itemClick",void 0),e=o.__decorate([i.Component({selector:"kendo-toolbar-buttonlist",template:'\n <button type="button" tabindex="-1" kendoButton\n class="k-overflow-button"\n *ngFor="let item of data"\n [disabled]="item.disabled"\n [icon]="item.icon"\n [iconClass]="item.iconClass"\n [imageUrl]="item.imageUrl"\n (click)="onClick(item)">\n {{ getText(item) }}\n </button>\n '})],e)}(n(6).ToolBarToolComponent);e.ToolBarButtonListComponent=r}])});
|
import axios from 'axios';
/*
* Api Service
* */
const booksListURL = 'http://localhost:3001/booksList';
class ApiService {
getAllBooks() {
return axios.get(booksListURL);
}
getBookById(id) {
return axios.get(`${booksListURL}/${id}`);
}
addBook(book) {
return axios.post(booksListURL, book);
}
deleteBook(bookId) {
return axios.delete(`${booksListURL}/${bookId}`);
}
updateBook(bookId, newBook) {
return axios.put(`${booksListURL}/${bookId}`, newBook);
}
}
export default new ApiService();
|
from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-duration-pattern-1-NS"
@dataclass
class NistschemaSvIvListDurationPattern1:
class Meta:
name = "NISTSchema-SV-IV-list-duration-pattern-1"
namespace = "NISTSchema-SV-IV-list-duration-pattern-1-NS"
value: List[str] = field(
default_factory=list,
metadata={
"pattern": r"P\d\d75Y\d3M\d9DT0\dH\d2M3\dS P19\d\dY0\dM1\dDT0\dH1\dM\d1S P19\d\dY\d3M2\dDT\d4H\d7M\d7S P\d\d86Y\d9M\d5DT\d6H4\dM\d9S P19\d\dY\d2M\d2DT\d9H3\dM0\dS P\d\d90Y0\dM2\dDT\d7H2\dM0\dS P\d\d94Y\d0M1\dDT0\dH1\dM4\dS P\d\d71Y0\dM\d1DT\d3H\d7M4\dS",
"tokens": True,
}
)
|
from unittest import TestCase
from vsi.ast import (
AST,
PrintStatement,
Integer,
Float,
Varexpr,
BinOpexpr,
RelOpexpr,
Andexpr,
Orexpr,
Notexpr,
AssignStatement,
IfStatement,
WhileStatement
)
class TestAST(TestCase):
def test___repr___method(self):
ast = AST()
ast.insert_node(Integer(10))
ast_repr = repr(ast)
expected_repr = "Statements([Integer(10)])"
self.assertEqual(ast_repr, expected_repr)
def test_eval_method(self):
ast = AST()
ast.insert_node(AssignStatement("x", Integer(10)))
env = {}
ast.eval(env)
self.assertIn("x", env)
self.assertEqual(env["x"], 10)
def test_insert_node_method(self):
ast = AST()
ast.insert_node(Integer(10))
ast.insert_node(Varexpr("x"))
expected_nodes = [Integer(10), Varexpr("x")]
self.assertEqual(ast.nodes, expected_nodes)
class TestPrintStatment(TestCase):
def test___repr___method(self):
print_statment = PrintStatement(Integer(10))
print_statment_repr = repr(print_statment)
expected_repr = "PrintStatement(Integer(10))"
self.assertEqual(print_statment_repr, expected_repr)
class TestInteger(TestCase):
def test___repr___method(self):
integer = Integer(10)
integer_repr = repr(integer)
expected_repr = "Integer(10)"
self.assertEqual(integer_repr, expected_repr)
def test_eval_method(self):
integer = Integer(10)
env = {}
result = integer.eval(env)
self.assertEqual(result, 10)
class TestFloat(TestCase):
def test___repr___method(self):
_float = Float(1.2)
_float_repr = repr(_float)
expected_repr = "Float(1.2)"
self.assertEqual(_float_repr, expected_repr)
def test_eval_method(self):
_float = Float(1.2)
env = {}
result = _float.eval(env)
self.assertEqual(result, 1.2)
class TestVarexpr(TestCase):
def test___repr___method(self):
varexpr = Varexpr("x")
varexpr_repr = repr(varexpr)
expected_repr = "Varexpr(x)"
self.assertEqual(varexpr_repr, expected_repr)
def test_eval_method_with_defined_variable(self):
env = {"x": 10}
varexpr = Varexpr("x")
result = varexpr.eval(env)
self.assertEqual(result, 10)
class TestBinOpexpr(TestCase):
def test___repr___method(self):
binopexpr = BinOpexpr("+", Integer(1), Integer(1))
binopexpr_repr = repr(binopexpr)
expected_repr = "BinOpexpr(+, Integer(1), Integer(1))"
self.assertEqual(binopexpr_repr, expected_repr)
def test_eval_method_with_plus_operator(self):
binopexpr = BinOpexpr("+", Integer(12), Integer(5))
env = {}
result = binopexpr.eval(env)
self.assertEqual(result, 17)
def test_eval_method_with_minus_operator(self):
binopexpr = BinOpexpr("-", Integer(12), Integer(5))
env = {}
result = binopexpr.eval(env)
self.assertEqual(result, 7)
def test_eval_method_with_times_operator(self):
binopexpr = BinOpexpr("*", Integer(12), Integer(5))
env = {}
result = binopexpr.eval(env)
self.assertEqual(result, 60)
def test_eval_method_with_obelus_operator(self):
binopexpr = BinOpexpr("/", Integer(10), Integer(5))
env = {}
result = binopexpr.eval(env)
self.assertEqual(result, 2)
def test_eval_method_with_modulo_operator(self):
binopexpr = BinOpexpr("%", Integer(12), Integer(5))
env = {}
result = binopexpr.eval(env)
self.assertEqual(result, 2)
class TestRelOpexpr(TestCase):
def test___repr___method(self):
relopexpr = RelOpexpr("==", Integer(10), Integer(12))
relopexpr_repr = repr(relopexpr)
expected_repr = "RelOpexpr(==, Integer(10), Integer(12))"
self.assertEqual(relopexpr_repr, expected_repr)
def test_eval_method_with_equal_operator(self):
relopexpr = RelOpexpr("==", Integer(10), Integer(12))
env = {}
result = relopexpr.eval(env)
self.assertFalse(result)
def test_eval_method_with_greater_than_operator(self):
relopexpr = RelOpexpr(">", Integer(10), Integer(12))
env = {}
result = relopexpr.eval(env)
self.assertFalse(result)
def test_eval_method_with_less_than_operator(self):
relopexpr = RelOpexpr("<", Integer(10), Integer(12))
env = {}
result = relopexpr.eval(env)
self.assertTrue(result)
def test_eval_method_with_greater_than_or_equal_operator(self):
relopexpr = RelOpexpr(">=", Integer(10), Integer(12))
env = {}
result = relopexpr.eval(env)
self.assertFalse(result)
def test_eval_method_with_less_than_or_equal_operator(self):
relopexpr = RelOpexpr("<=", Integer(10), Integer(12))
env = {}
result = relopexpr.eval(env)
self.assertTrue(result)
def test_eval_method_with_not_equal_operator(self):
relopexpr = RelOpexpr("!=", Integer(10), Integer(12))
env = {}
result = relopexpr.eval(env)
self.assertTrue(result)
class TestAndexpr(TestCase):
def test___repr___method(self):
andexpr = Andexpr(Integer(1), Integer(2))
andexpr_repr = repr(andexpr)
expected_repr = "Andexpr(Integer(1), Integer(2))"
self.assertEqual(andexpr_repr, expected_repr)
def test_eval_method_with_true_expression(self):
andexpr = Andexpr(Integer(1), Integer(1))
env = {}
result = andexpr.eval(env)
self.assertTrue(result)
def test_eval_method_with_false_expression(self):
andexpr = Andexpr(Integer(1), Integer(0))
env = {}
result = andexpr.eval(env)
self.assertFalse(result)
class TestOrexpr(TestCase):
def test___repr___method(self):
orexpr = Orexpr(Integer(1), Integer(0))
orexpr_repr = repr(orexpr)
expected_repr = "Orexpr(Integer(1), Integer(0))"
self.assertEqual(orexpr_repr, expected_repr)
def test_eval_method_with_true_expression(self):
orexpr = Orexpr(Integer(1), Integer(0))
env = {}
result = orexpr.eval(env)
self.assertTrue(result)
def test_eval_method_with_false_expression(self):
orexpr = Orexpr(Integer(0), Integer(0))
env = {}
result = orexpr.eval(env)
self.assertFalse(result)
class TestNotexpr(TestCase):
def test___repr___method(self):
notexpr = Notexpr(Integer(1))
notexpr_repr = repr(notexpr)
expected_repr = "Notexpr(Integer(1))"
self.assertEqual(notexpr_repr, expected_repr)
def test_eval_method_with_true_expression(self):
notexpr = Notexpr(Integer(0))
env = {}
result = notexpr.eval(env)
self.assertTrue(result)
def test_eval_method_with_false_expression(self):
notexpr = Notexpr(Integer(1))
env = {}
result = notexpr.eval(env)
self.assertFalse(result)
class TestAssignStatement(TestCase):
def test___repr___method(self):
assign_stmt = AssignStatement("x", Integer(10))
assign_stmt_repr = repr(assign_stmt)
expected_repr = "AssignStatement(x, Integer(10))"
self.assertEqual(assign_stmt_repr, expected_repr)
def test_eval_method(self):
assign_stmt = AssignStatement("x", Integer(10))
env = {}
assign_stmt.eval(env)
self.assertIn("x", env)
self.assertEqual(env["x"], 10)
class TestIfStatement(TestCase):
def test___repr___method(self):
if_stmt = IfStatement(Integer(1),
BinOpexpr("+", Integer(1), Integer(2)),
# no else stmt
None
)
if_stmt_repr = repr(if_stmt)
expected_repr = "IfStatement(Integer(1), " \
"BinOpexpr(+, Integer(1), Integer(2)), None)"
self.assertEqual(if_stmt_repr, expected_repr)
def test_eval_method_with_true_condition_and_no_else_stmt(self):
if_stmt = IfStatement(Integer(1),
AssignStatement("x", Integer(10)),
# no else stmt
None
)
env = {}
if_stmt.eval(env)
self.assertIn("x", env)
self.assertEqual(env["x"], 10)
def test_eval_method_with_true_condition_and_else_stmt(self):
if_stmt = IfStatement(Integer(1),
AssignStatement("x", Integer(10)),
# else stmt
AssignStatement("y", Integer(12)),
)
env = {}
if_stmt.eval(env)
self.assertIn("x", env)
self.assertEqual(env["x"], 10)
self.assertNotIn("y", env)
def test_eval_method_with_false_condition_and_else_stmt(self):
if_stmt = IfStatement(Integer(0),
AssignStatement("x", Integer(10)),
# else stmt
AssignStatement("y", Integer(12)),
)
env = {}
if_stmt.eval(env)
self.assertNotIn("x", env)
self.assertIn("y", env)
self.assertEqual(env["y"], 12)
def test_eval_method_with_false_condition_and_no_else_stmt(self):
if_stmt = IfStatement(Integer(0),
AssignStatement("x", Integer(10)),
# eno lse stmt
None
)
env = {}
if_stmt.eval(env)
self.assertNotIn("x", env)
class TestWhileStatement(TestCase):
def test___repr___method(self):
while_stmt = WhileStatement(Integer(1),
BinOpexpr("+", Integer(1), Integer(2)),
)
while_stmt_repr = repr(while_stmt)
expected_repr = "WhileStatement(Integer(1), " \
"BinOpexpr(+, Integer(1), Integer(2)))"
self.assertEqual(while_stmt_repr, expected_repr)
def test_eval_method_with_true_expression(self):
env = {"x": 0}
while_stmt = WhileStatement(RelOpexpr("<", Varexpr("x"), Integer(10)),
AssignStatement("x",
BinOpexpr("+",
Varexpr("x"),
Integer(1)
)
)
)
while_stmt.eval(env)
self.assertEqual(env["x"], 10)
def test_eval_method_with_false_expression(self):
env = {"x": 0}
while_stmt = WhileStatement(RelOpexpr(">", Varexpr("x"), Integer(10)),
AssignStatement("x",
BinOpexpr("+",
Varexpr("x"),
Integer(1)
)
)
)
while_stmt.eval(env)
self.assertEqual(env["x"], 0)
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v5.services GoogleAdsFieldService API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
from google.ads.google_ads.v5.services import google_ads_field_service_client_config
from google.ads.google_ads.v5.services.transports import google_ads_field_service_grpc_transport
from google.ads.google_ads.v5.proto.services import google_ads_field_service_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads',
).version
class GoogleAdsFieldServiceClient(object):
"""Service to fetch Google Ads API fields."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v5.services.GoogleAdsFieldService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
GoogleAdsFieldServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def google_ads_field_path(cls, google_ads_field):
"""Return a fully-qualified google_ads_field string."""
return google.api_core.path_template.expand(
'googleAdsFields/{google_ads_field}',
google_ads_field=google_ads_field,
)
def __init__(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None, client_options=None):
"""Constructor.
Args:
transport (Union[~.GoogleAdsFieldServiceGrpcTransport,
Callable[[~.Credentials, type], ~.GoogleAdsFieldServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning, stacklevel=2)
else:
client_config = google_ads_field_service_client_config.config
if channel:
warnings.warn('The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning, stacklevel=2)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(client_options)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=google_ads_field_service_grpc_transport.GoogleAdsFieldServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = google_ads_field_service_grpc_transport.GoogleAdsFieldServiceGrpcTransport(
address=api_endpoint,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_google_ads_field(
self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns just the requested field.
Args:
resource_name (str): Required. The resource name of the field to get.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v5.types.GoogleAdsField` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_google_ads_field' not in self._inner_api_calls:
self._inner_api_calls['get_google_ads_field'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_google_ads_field,
default_retry=self._method_configs['GetGoogleAdsField'].retry,
default_timeout=self._method_configs['GetGoogleAdsField'].timeout,
client_info=self._client_info,
)
request = google_ads_field_service_pb2.GetGoogleAdsFieldRequest(
resource_name=resource_name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('resource_name', resource_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['get_google_ads_field'](request, retry=retry, timeout=timeout, metadata=metadata)
def search_google_ads_fields(
self,
query,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns all fields that match the search query.
Args:
query (str): Required. The query string.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.ads.googleads_v5.types.GoogleAdsField` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'search_google_ads_fields' not in self._inner_api_calls:
self._inner_api_calls['search_google_ads_fields'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.search_google_ads_fields,
default_retry=self._method_configs['SearchGoogleAdsFields'].retry,
default_timeout=self._method_configs['SearchGoogleAdsFields'].timeout,
client_info=self._client_info,
)
request = google_ads_field_service_pb2.SearchGoogleAdsFieldsRequest(
query=query,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(self._inner_api_calls['search_google_ads_fields'], retry=retry, timeout=timeout, metadata=metadata),
request=request,
items_field='results',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
|
/* Generated By:JavaCC: Do not edit this line. MyToken.h Version 8.0.0 */
/* JavaCCOptions:STATIC=true,SUPPORT_CLASS_VISIBILITY_PUBLIC=true,INCLUDE_FOR_TOKEN=null,TOKEN_SUPER_CLASS= */
#ifndef JAVACC_MY_TOKEN_H
#define JAVACC_MY_TOKEN_H
#include "JavaCC.h"
#include "Token.h"
namespace FOO {
namespace BAR {
/**
* Describes the input token stream.
*/
class MyToken
: public Token
{
private:
/**
* An integer that describes the kind of this token. This numbering
* system is determined by JavaCCParser, and a table of these numbers is
* stored in the file ...Constants.java.
*/
int _kind;
int _beginLine; // The line number of the first character of this Token.
int _beginColumn; // The column number of the first character of this Token.
int _endLine; // The line number of the last character of this Token.
int _endColumn; // The column number of the last character of this Token.
JJString _image; // The string image of the token.
/**
* A reference to the next regular (non-special) token from the input
* stream. If this is the last token from the input stream, or if the
* token manager has not read tokens beyond this one, this field is
* set to NULL. This is true only if this token is also a regular
* token. Otherwise, see below for a description of the contents of
* this field.
*/
Token* _next;
/**
* This field is used to access special tokens that occur prior to this
* token, but after the immediately preceding regular (non-special) token.
* If there are no such special tokens, this field is set to NULL.
* When there are more than one such special token, this field refers
* to the last of these special tokens, which in turn refers to the next
* previous special token through its specialToken field, and so on
* until the first special token (whose specialToke_ field is NULL).
* The next fields of special tokens refer to other special tokens that
* immediately follow it (without an intervening regular token). If there
* is no such token, this field is NULL.
*/
Token* _specialToken;
void* _value;
public:
MyToken();
MyToken(int kind);
MyToken(int kind, const JJString& image);
virtual ~MyToken();
int& kind();
int& beginLine();
int& beginColumn();
int& endLine();
int& endColumn();
JJString& image();
Token*& next();
Token*& specialToken();
const int& kind() const;
const int& beginLine() const;
const int& beginColumn() const;
const int& endLine() const;
const int& endColumn() const;
const JJString& image() const;
const Token* next() const;
const Token* specialToken() const;
const JJString& toString();
/**
* Returns a new Token, by default. However, if you want, you
* can create and return subclass objects based on the value of ofKind.
* Simply add the cases to the switch for all those special cases.
* For example, if you have a subclass of Token called IDToken that
* you want to create if ofKind is ID, simply add something like :
*
* case MyParserConstants.ID : return new IDToken(ofKind, image);
*
* to the following switch statement. Then you can cast matchedToken
* variable to the appropriate type and use sit in your lexical actions.
*/
static MyToken* newToken(int ofKind, const JJString& image);
static MyToken* newToken(int ofKind);
/**
* An optional attribute value of the Token.
* Tokens which are not used as syntactic sugar will often contain
* meaningful values that will be used later on by the compiler or
* interpreter. This attribute value is often different from the image.
* Any subclass of Token that actually wants to return a non-NULL value can
* override this method as appropriate.
*/
void*& value();
const void* value() const;
};
}
}
#endif
/* JavaCC - OriginalChecksum=e3baa43daacdfb1726cde686d5d46cdd (do not edit this line) */
|
/*
Copyright (c) 2003-2021, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['gl']={"editor":"Editor de texto mellorado","editorPanel":"Panel do editor de texto mellorado","common":{"editorHelp":"Prema ALT 0 para obter axuda","browseServer":"Examinar o servidor","url":"URL","protocol":"Protocolo","upload":"Enviar","uploadSubmit":"Enviar ao servidor","image":"Imaxe","flash":"Flash","form":"Formulario","checkbox":"Caixa de selección","radio":"Botón de opción","textField":"Campo de texto","textarea":"Área de texto","hiddenField":"Campo agochado","button":"Botón","select":"Campo de selección","imageButton":"Botón de imaxe","notSet":"<sen estabelecer>","id":"ID","name":"Nome","langDir":"Dirección de escritura do idioma","langDirLtr":"Esquerda a dereita (LTR)","langDirRtl":"Dereita a esquerda (RTL)","langCode":"Código do idioma","longDescr":"Descrición completa do URL","cssClass":"Clases da folla de estilos","advisoryTitle":"Título","cssStyle":"Estilo","ok":"Aceptar","cancel":"Cancelar","close":"Pechar","preview":"Vista previa","resize":"Redimensionar","generalTab":"Xeral","advancedTab":"Avanzado","validateNumberFailed":"Este valor non é un número.","confirmNewPage":"Calquera cambio que non gardara neste contido perderase.\r\nConfirma que quere cargar unha páxina nova?","confirmCancel":"Algunhas das opcións foron cambiadas.\r\nConfirma que quere pechar o diálogo?","options":"Opcións","target":"Destino","targetNew":"Nova xanela (_blank)","targetTop":"Xanela principal (_top)","targetSelf":"Mesma xanela (_self)","targetParent":"Xanela superior (_parent)","langDirLTR":"Esquerda a dereita (LTR)","langDirRTL":"Dereita a esquerda (RTL)","styles":"Estilo","cssClasses":"Clases da folla de estilos","width":"Largo","height":"Alto","align":"Aliñamento","left":"Esquerda","right":"Dereita","center":"Centro","justify":"Xustificado","alignLeft":"Aliñar á esquerda","alignRight":"Aliñar á dereita","alignCenter":"Aliñar ao centro","alignTop":"Arriba","alignMiddle":"Centro","alignBottom":"Abaixo","alignNone":"Ningún","invalidValue":"Valor incorrecto.","invalidHeight":"O alto debe ser un número.","invalidWidth":"O largo debe ser un número.","invalidLength":"O valor especificado para o campo «%1» debe ser un número positivo con ou sen unha unidade de medida correcta (%2).","invalidCssLength":"O valor especificado para o campo «%1» debe ser un número positivo con ou sen unha unidade de medida CSS correcta (px, %, in, cm, mm, em, ex, pt, ou pc).","invalidHtmlLength":"O valor especificado para o campo «%1» debe ser un número positivo con ou sen unha unidade de medida HTML correcta (px ou %).","invalidInlineStyle":"O valor especificado no estilo en liña debe consistir nunha ou máis tuplas co formato «nome : valor», separadas por punto e coma.","cssLengthTooltip":"Escriba un número para o valor en píxeles ou un número cunha unidade CSS correcta (px, %, in, cm, mm, em, ex, pt, ou pc).","unavailable":"%1<span class=\"cke_accessibility\">, non dispoñíbel</span>","keyboard":{"8":"Ir atrás","13":"Intro","16":"Maiús","17":"Ctrl","18":"Alt","32":"Espazo","35":"Fin","36":"Inicio","46":"Supr","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Orde"},"keyboardShortcut":"Atallo de teclado","optionDefault":"Predeterminado"},"about":{"copy":"Copyright © $1. Todos os dereitos reservados.","dlgTitle":"Sobre o CKEditor 4","moreInfo":"Para obter información sobre a licenza, visite o noso sitio web:"},"basicstyles":{"bold":"Negra","italic":"Cursiva","strike":"Riscado","subscript":"Subíndice","superscript":"Superíndice","underline":"Subliñado"},"blockquote":{"toolbar":"Cita"},"notification":{"closed":"Notificación pechada."},"toolbar":{"toolbarCollapse":"Contraer a barra de ferramentas","toolbarExpand":"Expandir a barra de ferramentas","toolbarGroups":{"document":"Documento","clipboard":"Portapapeis/desfacer","editing":"Edición","forms":"Formularios","basicstyles":"Estilos básicos","paragraph":"Paragrafo","links":"Ligazóns","insert":"Inserir","styles":"Estilos","colors":"Cores","tools":"Ferramentas"},"toolbars":"Barras de ferramentas do editor"},"clipboard":{"copy":"Copiar","copyError":"Os axustes de seguranza do seu navegador non permiten que o editor realice automaticamente as tarefas de copia. Use o teclado para iso (Ctrl/Cmd+C).","cut":"Cortar","cutError":"Os axustes de seguranza do seu navegador non permiten que o editor realice automaticamente as tarefas de corte. Use o teclado para iso (Ctrl/Cmd+X).","paste":"Pegar","pasteNotification":"Prema %1 para pegar. O seu navegador non admite pegar co botón da barra de ferramentas ou coa opción do menú contextual.","pasteArea":"Zona de pegado","pasteMsg":"Pegue o contido dentro da área de abaixo e prema Aceptar."},"contextmenu":{"options":"Opcións do menú contextual"},"elementspath":{"eleLabel":"Ruta dos elementos","eleTitle":"Elemento %1"},"filetools":{"loadError":"Produciuse un erro durante a lectura do ficheiro.","networkError":"Produciuse un erro na rede durante o envío do ficheiro.","httpError404":"Produciuse un erro HTTP durante o envío do ficheiro (404: Ficheiro non atopado).","httpError403":"Produciuse un erro HTTP durante o envío do ficheiro (403: Acceso denegado).","httpError":"Produciuse un erro HTTP durante o envío do ficheiro (erro de estado: %1).","noUrlError":"Non foi definido o URL para o envío.","responseError":"Resposta incorrecta do servidor."},"format":{"label":"Formato","panelTitle":"Formato do parágrafo","tag_address":"Enderezo","tag_div":"Normal (DIV)","tag_h1":"Enacabezado 1","tag_h2":"Encabezado 2","tag_h3":"Encabezado 3","tag_h4":"Encabezado 4","tag_h5":"Encabezado 5","tag_h6":"Encabezado 6","tag_p":"Normal","tag_pre":"Formatado"},"horizontalrule":{"toolbar":"Inserir unha liña horizontal"},"image":{"alt":"Texto alternativo","border":"Bordo","btnUpload":"Enviar ao servidor","button2Img":"Quere converter o botón da imaxe seleccionada nunha imaxe sinxela?","hSpace":"Esp.Horiz.","img2Button":"Quere converter a imaxe seleccionada nun botón de imaxe?","infoTab":"Información da imaxe","linkTab":"Ligazón","lockRatio":"Proporcional","menu":"Propiedades da imaxe","resetSize":"Tamaño orixinal","title":"Propiedades da imaxe","titleButton":"Propiedades do botón de imaxe","upload":"Cargar","urlMissing":"Non se atopa o URL da imaxe.","vSpace":"Esp.Vert.","validateBorder":"O bordo debe ser un número.","validateHSpace":"O espazado horizontal debe ser un número.","validateVSpace":"O espazado vertical debe ser un número."},"indent":{"indent":"Aumentar a sangría","outdent":"Reducir a sangría"},"fakeobjects":{"anchor":"Ancoraxe","flash":"Animación «Flash»","hiddenfield":"Campo agochado","iframe":"IFrame","unknown":"Obxecto descoñecido"},"link":{"acccessKey":"Chave de acceso","advanced":"Avanzado","advisoryContentType":"Tipo de contido informativo","advisoryTitle":"Título","anchor":{"toolbar":"Ancoraxe","menu":"Editar a ancoraxe","title":"Propiedades da ancoraxe","name":"Nome da ancoraxe","errorName":"Escriba o nome da ancoraxe","remove":"Retirar a ancoraxe"},"anchorId":"Polo ID do elemento","anchorName":"Polo nome da ancoraxe","charset":"Codificación do recurso ligado","cssClasses":"Clases da folla de estilos","download":"Forzar a descarga","displayText":"Amosar o texto","emailAddress":"Enderezo de correo","emailBody":"Corpo da mensaxe","emailSubject":"Asunto da mensaxe","id":"ID","info":"Información da ligazón","langCode":"Código do idioma","langDir":"Dirección de escritura do idioma","langDirLTR":"Esquerda a dereita (LTR)","langDirRTL":"Dereita a esquerda (RTL)","menu":"Editar a ligazón","name":"Nome","noAnchors":"(Non hai ancoraxes dispoñíbeis no documento)","noEmail":"Escriba o enderezo de correo","noUrl":"Escriba a ligazón URL","noTel":"Escriba o número de teléfono","other":"<other>","phoneNumber":"Número de teléfono","popupDependent":"Dependente (Netscape)","popupFeatures":"Características da xanela emerxente","popupFullScreen":"Pantalla completa (IE)","popupLeft":"Posición esquerda","popupLocationBar":"Barra de localización","popupMenuBar":"Barra do menú","popupResizable":"Redimensionábel","popupScrollBars":"Barras de desprazamento","popupStatusBar":"Barra de estado","popupToolbar":"Barra de ferramentas","popupTop":"Posición superior","rel":"Relación","selectAnchor":"Seleccionar unha ancoraxe","styles":"Estilo","tabIndex":"Índice de tabulación","target":"Destino","targetFrame":"<marco>","targetFrameName":"Nome do marco de destino","targetPopup":"<xanela emerxente>","targetPopupName":"Nome da xanela emerxente","title":"Ligazón","toAnchor":"Ligar coa ancoraxe no testo","toEmail":"Correo","toUrl":"URL","toPhone":"Teléfono","toolbar":"Ligazón","type":"Tipo de ligazón","unlink":"Eliminar a ligazón","upload":"Enviar"},"list":{"bulletedlist":"Inserir/retirar lista viñeteada","numberedlist":"Inserir/retirar lista numerada"},"magicline":{"title":"Inserir aquí o parágrafo"},"maximize":{"maximize":"Maximizar","minimize":"Minimizar"},"pastetext":{"button":"Pegar como texto simple","pasteNotification":"Prema %1 para pegar. O seu navegador non admite pegar co botón da barra de ferramentas ou coa opción do menú contextual.","title":"Pegar como texto simple"},"pastefromword":{"confirmCleanup":"O texto que quere pegar semella ser copiado desde o Word. Quere depuralo antes de pegalo?","error":"Non foi posíbel depurar os datos pegados por mor dun erro interno","title":"Pegar desde Word","toolbar":"Pegar desde Word"},"removeformat":{"toolbar":"Retirar o formato"},"sourcearea":{"toolbar":"Orixe"},"specialchar":{"options":"Opcións de caracteres especiais","title":"Seleccione un carácter especial","toolbar":"Inserir un carácter especial"},"scayt":{"btn_about":"About SCAYT","btn_dictionaries":"Dictionaries","btn_disable":"Disable SCAYT","btn_enable":"Enable SCAYT","btn_langs":"Languages","btn_options":"Options","text_title":"Spell Check As You Type"},"stylescombo":{"label":"Estilos","panelTitle":"Estilos de formatando","panelTitle1":"Estilos de bloque","panelTitle2":"Estilos de liña","panelTitle3":"Estilos de obxecto"},"table":{"border":"Tamaño do bordo","caption":"Título","cell":{"menu":"Cela","insertBefore":"Inserir a cela á esquerda","insertAfter":"Inserir a cela á dereita","deleteCell":"Eliminar celas","merge":"Combinar celas","mergeRight":"Combinar á dereita","mergeDown":"Combinar cara abaixo","splitHorizontal":"Dividir a cela en horizontal","splitVertical":"Dividir a cela en vertical","title":"Propiedades da cela","cellType":"Tipo de cela","rowSpan":"Expandir filas","colSpan":"Expandir columnas","wordWrap":"Axustar ao contido","hAlign":"Aliñación horizontal","vAlign":"Aliñación vertical","alignBaseline":"Liña de base","bgColor":"Cor do fondo","borderColor":"Cor do bordo","data":"Datos","header":"Cabeceira","yes":"Si","no":"Non","invalidWidth":"O largo da cela debe ser un número.","invalidHeight":"O alto da cela debe ser un número.","invalidRowSpan":"A expansión de filas debe ser un número enteiro.","invalidColSpan":"A expansión de columnas debe ser un número enteiro.","chooseColor":"Escoller"},"cellPad":"Marxe interior da cela","cellSpace":"Marxe entre celas","column":{"menu":"Columna","insertBefore":"Inserir a columna á esquerda","insertAfter":"Inserir a columna á dereita","deleteColumn":"Borrar Columnas"},"columns":"Columnas","deleteTable":"Borrar Táboa","headers":"Cabeceiras","headersBoth":"Ambas","headersColumn":"Primeira columna","headersNone":"Ningún","headersRow":"Primeira fila","heightUnit":"unidade do alto","invalidBorder":"O tamaño do bordo debe ser un número.","invalidCellPadding":"A marxe interior debe ser un número positivo.","invalidCellSpacing":"A marxe entre celas debe ser un número positivo.","invalidCols":"O número de columnas debe ser un número maior que 0.","invalidHeight":"O alto da táboa debe ser un número.","invalidRows":"O número de filas debe ser un número maior que 0","invalidWidth":"O largo da táboa debe ser un número.","menu":"Propiedades da táboa","row":{"menu":"Fila","insertBefore":"Inserir a fila por riba","insertAfter":"Inserir a fila por baixo","deleteRow":"Eliminar filas"},"rows":"Filas","summary":"Resumo","title":"Propiedades da táboa","toolbar":"Taboa","widthPc":"porcentaxe","widthPx":"píxeles","widthUnit":"unidade do largo"},"undo":{"redo":"Refacer","undo":"Desfacer"},"widget":{"move":"Prema e arrastre para mover","label":"Trebello %1"},"uploadwidget":{"abort":"Envío interrompido polo usuario.","doneOne":"Ficheiro enviado satisfactoriamente.","doneMany":"%1 ficheiros enviados satisfactoriamente.","uploadOne":"Enviando o ficheiro ({percentage}%)...","uploadMany":"Enviando ficheiros, {current} de {max} feito o ({percentage}%)..."},"imagebase":{"captionPlaceholder":"Introducir o título da imaxe"},"easyimage":{"commands":{"fullImage":"Imaxe de tamaño completo","sideImage":"Imaxe lateral","altText":"Cambiar o texto alternativo da imaxe","upload":"Enviar imaxe"},"uploadFailed":"Non foi posíbel enviar a imaxe por mor dun erro da rede."}};
|
"use strict";
const fs = require("fs-extra");
const pMap = require("p-map");
module.exports = removeTempLicenses;
function removeTempLicenses(packagesToBeLicensed) {
if (!packagesToBeLicensed.length) {
return Promise.resolve();
}
return pMap(packagesToBeLicensed, pkg => fs.remove(pkg.licensePath));
}
|
import numpy as np
import pandas as pd
import warnings
from scipy import sparse
from sklearn.base import BaseEstimator
from libpysal import weights
from esda.crand import (
crand as _crand_plus,
njit as _njit,
_prepare_univariate,
_prepare_bivariate
)
PERMUTATIONS = 999
class Join_Counts_Local_BV(BaseEstimator):
"""Univariate Local Join Count Statistic"""
def __init__(self, connectivity=None, permutations=PERMUTATIONS, n_jobs=1,
keep_simulations=True, seed=None):
"""
Initialize a Local_Join_Counts_BV estimator
Arguments
---------
connectivity : scipy.sparse matrix object
the connectivity structure describing
the relationships between observed units.
Need not be row-standardized.
permutations : int
number of random permutations for calculation of pseudo
p_values
n_jobs : int
Number of cores to be used in the conditional randomisation. If -1,
all available cores are used.
keep_simulations : Boolean
(default=True)
If True, the entire matrix of replications under the null
is stored in memory and accessible; otherwise, replications
are not saved
seed : None/int
Seed to ensure reproducibility of conditional randomizations.
Must be set here, and not outside of the function, since numba
does not correctly interpret external seeds
nor numpy.random.RandomState instances.
"""
self.connectivity = connectivity
self.permutations = permutations
self.n_jobs = n_jobs
self.keep_simulations = keep_simulations
self.seed = seed
def fit(self, x, z, case="CLC", n_jobs=1, permutations=999):
"""
Arguments
---------
x : numpy.ndarray
array containing binary (0/1) data
z : numpy.ndarray
array containing binary (0/1) data
Returns
-------
the fitted estimator.
Notes
-----
Technical details and derivations can be found in :cite:`AnselinLi2019`.
Examples
--------
>>> import libpysal
>>> w = libpysal.weights.lat2W(4, 4)
>>> x = np.ones(16)
>>> x[0:8] = 0
>>> z = [0,1,0,1,1,1,1,1,0,0,1,1,0,0,1,1]
>>> LJC_BV_C1 = Local_Join_Counts_BV(connectivity=w).fit(x, z, case="BJC")
>>> LJC_BV_C2 = Local_Join_Counts_BV(connectivity=w).fit(x, z, case="CLC")
>>> LJC_BV_C1.LJC
>>> LJC_BV_C1.p_sim
>>> LJC_BV_C2.LJC
>>> LJC_BV_C2.p_sim
Commpop data replicating GeoDa tutorial (Case 1)
>>> import libpysal
>>> import geopandas as gpd
>>> commpop = gpd.read_file("https://github.com/jeffcsauer/GSOC2020/raw/master/validation/data/commpop.gpkg")
>>> w = libpysal.weights.Queen.from_dataframe(commpop)
>>> LJC_BV_Case1 = Local_Join_Counts_BV(connectivity=w).fit(commpop['popneg'], commpop['popplus'], case='BJC')
>>> LJC_BV_Case1.LJC
>>> LJC_BV_Case1.p_sim
Guerry data replicating GeoDa tutorial (Case 2)
>>> import libpysal
>>> import geopandas as gpd
>>> guerry = libpysal.examples.load_example('Guerry')
>>> guerry_ds = gpd.read_file(guerry.get_path('Guerry.shp'))
>>> guerry_ds['infq5'] = 0
>>> guerry_ds['donq5'] = 0
>>> guerry_ds.loc[(guerry_ds['Infants'] > 23574), 'infq5'] = 1
>>> guerry_ds.loc[(guerry_ds['Donatns'] > 10973), 'donq5'] = 1
>>> w = libpysal.weights.Queen.from_dataframe(guerry_ds)
>>> LJC_BV_Case2 = Local_Join_Counts_BV(connectivity=w).fit(guerry_ds['infq5'], guerry_ds['donq5'], case='CLC')
>>> LJC_BV_Case2.LJC
>>> LJC_BV_Case2.p_sim
"""
# Need to ensure that the np.array() are of
# dtype='float' for numba
x = np.array(x, dtype='float')
z = np.array(z, dtype='float')
w = self.connectivity
# Fill the diagonal with 0s
w = weights.util.fill_diagonal(w, val=0)
w.transform = 'b'
self.x = x
self.z = z
self.n = len(x)
self.w = w
self.case = case
keep_simulations = self.keep_simulations
n_jobs = self.n_jobs
seed = self.seed
self.LJC = self._statistic(x, z, w, case=case)
if permutations:
if case == "BJC":
self.p_sim, self.rjoins = _crand_plus(
z=np.column_stack((x, z)),
w=self.w,
observed=self.LJC,
permutations=permutations,
keep=True,
n_jobs=n_jobs,
stat_func=_ljc_bv_case1
)
# Set p-values for those with LJC of 0 to NaN
self.p_sim[self.LJC == 0] = 'NaN'
elif case == "CLC":
self.p_sim, self.rjoins = _crand_plus(
z=np.column_stack((x, z)),
w=self.w,
observed=self.LJC,
permutations=permutations,
keep=True,
n_jobs=n_jobs,
stat_func=_ljc_bv_case2
)
# Set p-values for those with LJC of 0 to NaN
self.p_sim[self.LJC == 0] = 'NaN'
else:
raise NotImplementedError(f'The requested LJC method ({case}) \
is not currently supported!')
return self
@staticmethod
def _statistic(x, z, w, case):
# Create adjacency list. Note that remove_symmetric=False - this is
# different from the esda.Join_Counts() function.
adj_list = w.to_adjlist(remove_symmetric=False)
# First, set up a series that maps the values
# to the weights table
zseries_x = pd.Series(x, index=w.id_order)
zseries_z = pd.Series(z, index=w.id_order)
# Map the values to the focal (i) values
focal_x = zseries_x.loc[adj_list.focal].values
focal_z = zseries_z.loc[adj_list.focal].values
# Map the values to the neighbor (j) values
neighbor_x = zseries_x.loc[adj_list.neighbor].values
neighbor_z = zseries_z.loc[adj_list.neighbor].values
if case == "BJC":
BJC = (focal_x == 1) & (focal_z == 0) & \
(neighbor_x == 0) & (neighbor_z == 1)
adj_list_BJC = pd.DataFrame(adj_list.focal.values,
BJC.astype('uint8')).reset_index()
adj_list_BJC.columns = ['BJC', 'ID']
adj_list_BJC = adj_list_BJC.groupby(by='ID').sum()
return (np.array(adj_list_BJC.BJC.values, dtype='float'))
elif case == "CLC":
CLC = (focal_x == 1) & (focal_z == 1) & \
(neighbor_x == 1) & (neighbor_z == 1)
adj_list_CLC = pd.DataFrame(adj_list.focal.values,
CLC.astype('uint8')).reset_index()
adj_list_CLC.columns = ['CLC', 'ID']
adj_list_CLC = adj_list_CLC.groupby(by='ID').sum()
return (np.array(adj_list_CLC.CLC.values, dtype='float'))
else:
raise NotImplementedError(f'The requested LJC method ({case}) \
is not currently supported!')
# --------------------------------------------------------------
# Conditional Randomization Function Implementations
# --------------------------------------------------------------
# Note: scaling not used
@_njit(fastmath=True)
def _ljc_bv_case1(i, z, permuted_ids, weights_i, scaling):
zx = z[:, 0]
zy = z[:, 1]
zyi, zyrand = _prepare_univariate(i, zy, permuted_ids, weights_i)
return zx[i] * (zyrand @ weights_i)
@_njit(fastmath=True)
def _ljc_bv_case2(i, z, permuted_ids, weights_i, scaling):
zx = z[:, 0]
zy = z[:, 1]
zxi, zxrand, zyi, zyrand = _prepare_bivariate(i, z, permuted_ids, weights_i)
zf = zxrand * zyrand
return zy[i] * (zf @ weights_i)
|
class PlayerPortraitGameGUI {
static getClassName() {
return "PlayerPortraitGameGUI";
}
static initialize() {
if (Game.debugMode) BABYLON.Tools.Log("Initializing PlayerPortraitGameGUI");
PlayerPortraitGameGUI.playerName = null;
PlayerPortraitGameGUI.playerIcon = null;
PlayerPortraitGameGUI.playerHealthBar = null;
PlayerPortraitGameGUI.playerHealthText = null;
PlayerPortraitGameGUI.playerStaminaBar = null;
PlayerPortraitGameGUI.playerStaminaText = null;
PlayerPortraitGameGUI.isVisible = false;
PlayerPortraitGameGUI.controller = PlayerPortraitGameGUI.generateController();
PlayerPortraitGameGUI.initialized = true;
PlayerPortraitGameGUI.containerAlpha = 0.75;
PlayerPortraitGameGUI.entityID = null;
PlayerPortraitGameGUI.cachedEntity = null;
PlayerPortraitGameGUI.targetIsCreature = null;
PlayerPortraitGameGUI.isVisible = false;
}
static generateController() {
var portrait = GameGUI.createRectangle("playerPortrait");
portrait.verticalAlignment = BABYLON.GUI.Control.VERTICAL_ALIGNMENT_TOP;
portrait.horizontalAlignment = BABYLON.GUI.Control.HORIZONTAL_ALIGNMENT_LEFT;
portrait.height = GameGUI.getFontSize(4);
portrait.width = GameGUI.getFontSize(14);
portrait.top = 0;
portrait.left = 0;
var portraitBackground = GameGUI.createRectangle("portraitBackground");
portraitBackground.verticalAlignment = BABYLON.GUI.Control.VERTICAL_ALIGNMENT_TOP;
portraitBackground.horizontalAlignment = BABYLON.GUI.Control.HORIZONTAL_ALIGNMENT_LEFT;
portraitBackground.height = GameGUI.getFontSize(4);
portraitBackground.width = 1;
portraitBackground.top = 0;
portraitBackground.left = 0;
portraitBackground.alpha = PlayerPortraitGameGUI.containerAlpha;
var portraitAvatarContainer = GameGUI.createRectangle();
portraitAvatarContainer.verticalAlignment = BABYLON.GUI.Control.VERTICAL_ALIGNMENT_TOP;
portraitAvatarContainer.horizontalAlignment = BABYLON.GUI.Control.HORIZONTAL_ALIGNMENT_LEFT;
portraitAvatarContainer.height = GameGUI.getFontSize(4);
portraitAvatarContainer.width = 0.33;
portraitAvatarContainer.top = 0;
portraitAvatarContainer.left = 0;
var portraitAvatar = new BABYLON.GUI.Image("portraitAvatar", "resources/images/icons/characters/genericCharacter.svg");
portraitAvatar.stretch = BABYLON.GUI.Image.STRETCH_UNIFORM;
var portraitStats = new BABYLON.GUI.StackPanel("portraitStats");
portraitStats.isVertical = true;
portraitStats.height = GameGUI.getFontSize(4);
portraitStats.width = GameGUI.getFontSize(10);
portraitStats.top = 0;
portraitStats.left = "21%";
var portraitName = GameGUI.createTextBlock("playerName");
portraitName.text = "Your Name Here";
portraitName.textHorizontalAlignment = BABYLON.GUI.Control.HORIZONTAL_ALIGNMENT_CENTER;
portraitName.height = GameGUI.getFontSize();
portraitName.width = 1.0;
var portraitStatsHealthContainer = GameGUI.createRectangle("portraitStatsHealthContainer");
portraitStatsHealthContainer.height = GameGUI.getFontSize();
portraitStatsHealthContainer.width = 0.85;
var portraitStatsHealthText = GameGUI.createTextBlock("portraitStatsHealthText");
portraitStatsHealthText.text = "";
portraitStatsHealthText.textVerticalAlignment = BABYLON.GUI.Control.VERTICAL_ALIGNMENT_CENTER;
portraitStatsHealthText.textHorizontalAlignment = BABYLON.GUI.Control.HORIZONTAL_ALIGNMENT_CENTER;
var portraitStatsHealthSlider = new BABYLON.GUI.Slider("portraitStatsHealth");
portraitStatsHealthSlider.minimum = 0;
portraitStatsHealthSlider.maximum = 100;
portraitStatsHealthSlider.isVertical = false;
portraitStatsHealthSlider.displayThumb = false;
portraitStatsHealthSlider.left = "16px";
portraitStatsHealthSlider.height = GameGUI.getFontSize(1.5);
portraitStatsHealthSlider.thumbWidth = 0;
portraitStatsHealthSlider.isEnabled = false;
portraitStatsHealthSlider.color = "red";
var portraitStatsStaminaContainer = GameGUI.createRectangle("portraitStatsStaminaContainer");
portraitStatsStaminaContainer.height = GameGUI.getFontSize();
portraitStatsStaminaContainer.width = 0.85;
var portraitStatsStaminaText = GameGUI.createTextBlock("portraitStatsStaminaText");
portraitStatsStaminaText.text = "";
portraitStatsStaminaText.textVerticalAlignment = BABYLON.GUI.Control.VERTICAL_ALIGNMENT_CENTER;
portraitStatsStaminaText.textHorizontalAlignment = BABYLON.GUI.Control.HORIZONTAL_ALIGNMENT_CENTER;
var portraitStatsStaminaSlider = new BABYLON.GUI.Slider("portraitStatsStaminaSlider");
portraitStatsStaminaSlider.minimum = 0;
portraitStatsStaminaSlider.maximum = 100;
portraitStatsStaminaSlider.isVertical = false;
portraitStatsStaminaSlider.displayThumb = false;
portraitStatsStaminaSlider.left = "16px";
portraitStatsStaminaSlider.height = GameGUI.getFontSize(1.5);
portraitStatsStaminaSlider.thumbWidth = 0;
portraitStatsStaminaSlider.isEnabled = false;
portraitStatsStaminaSlider.color = "green";
portrait.addControl(portraitBackground);
portrait.addControl(portraitAvatarContainer);
portraitAvatarContainer.addControl(portraitAvatar);
portrait.addControl(portraitStats);
portraitStats.addControl(portraitName);
portraitStatsHealthContainer.addControl(portraitStatsHealthSlider);
portraitStatsHealthContainer.addControl(portraitStatsHealthText);
portraitStats.addControl(portraitStatsHealthContainer);
portraitStatsStaminaContainer.addControl(portraitStatsStaminaSlider);
portraitStatsStaminaContainer.addControl(portraitStatsStaminaText);
portraitStats.addControl(portraitStatsStaminaContainer);
portrait.zIndex = 10;
PlayerPortraitGameGUI.playerName = portraitName;
PlayerPortraitGameGUI.playerIcon = portraitAvatar;
PlayerPortraitGameGUI.playerHealthBar = portraitStatsHealthSlider;
PlayerPortraitGameGUI.playerHealthText = portraitStatsHealthText;
PlayerPortraitGameGUI.playerStaminaBar = portraitStatsStaminaSlider;
PlayerPortraitGameGUI.playerStaminaText = portraitStatsStaminaText;
return portrait;
}
static resize() {
return 0;
}
static show() {
PlayerPortraitGameGUI.controller.isVisible = true;
PlayerPortraitGameGUI.isVisible = true;
return 0;
}
static hide() {
PlayerPortraitGameGUI.controller.isVisible = false;
PlayerPortraitGameGUI.isVisible = false;
return 0;
}
static clear() {
PlayerPortraitGameGUI.entityID = null;
PlayerPortraitGameGUI.cachedEntity = null;
PlayerPortraitGameGUI.targetIsCreature = false;
}
static set(entityController = Game.player) {
if (!(entityController instanceof EntityController)) {
PlayerPortraitGameGUI.clear();
return 2;
}
if (!Game.hasCachedEntity(entityController.entityID)) {
Game.getEntity(entityController.entityID);
PlayerPortraitGameGUI.clear();
return 1;
}
PlayerPortraitGameGUI.entityID = entityController.entityID;
PlayerPortraitGameGUI.cachedEntity = Game.getCachedEntity(entityController.entityID);
PlayerPortraitGameGUI.update();
PlayerPortraitGameGUI.setImage(PlayerPortraitGameGUI.cachedEntity.iconID);
PlayerPortraitGameGUI.setName(PlayerPortraitGameGUI.cachedEntity.name);
return 0;
}
static update() {
PlayerPortraitGameGUI.setHealthSlider(PlayerPortraitGameGUI.cachedEntity.health/PlayerPortraitGameGUI.cachedEntity.maxHealth*100);
PlayerPortraitGameGUI.setHealthText(PlayerPortraitGameGUI.cachedEntity.health + "/" + PlayerPortraitGameGUI.cachedEntity.maxHealth);
PlayerPortraitGameGUI.setStaminaSlider((PlayerPortraitGameGUI.cachedEntity.health-PlayerPortraitGameGUI.cachedEntity.stamina)/PlayerPortraitGameGUI.cachedEntity.health*100);
let number = PlayerPortraitGameGUI.cachedEntity.health - PlayerPortraitGameGUI.cachedEntity.stamina;
if (number < 0) {
number = 0;
}
PlayerPortraitGameGUI.setStaminaText(number + "/" + PlayerPortraitGameGUI.cachedEntity.health);
}
static setImage(iconID = "genericCharacter") {
PlayerPortraitGameGUI.playerIcon.domImage.setAttribute("src", Game.getIcon(iconID));
}
static setName(string) {
PlayerPortraitGameGUI.playerName.text = string;
}
static setHealthSlider(int = 100) {
PlayerPortraitGameGUI.playerHealthBar.value = int;
}
static setHealthText(text = "") {
PlayerPortraitGameGUI.playerHealthText.text = text;
}
static setStaminaSlider(int = 100) {
PlayerPortraitGameGUI.playerStaminaBar.value = int;
}
static setStaminaText(text = "") {
PlayerPortraitGameGUI.playerStaminaText.text = text;
}
static getController() {
return PlayerPortraitGameGUI.controller;
}
}
|
/*
* WARNING: do not edit!
* Generated by makefile from include\openssl\x509_vfy.h.in
*
* Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#ifndef OPENSSL_X509_VFY_H
# define OPENSSL_X509_VFY_H
# pragma once
# include <openssl/macros.h>
# ifndef OPENSSL_NO_DEPRECATED_3_0
# define HEADER_X509_VFY_H
# endif
/*
* Protect against recursion, x509.h and x509_vfy.h each include the other.
*/
# ifndef OPENSSL_X509_H
# include <openssl/x509.h>
# endif
# include <openssl/opensslconf.h>
# include <openssl/lhash.h>
# include <openssl/bio.h>
# include <openssl/crypto.h>
# include <openssl/symhacks.h>
#ifdef __cplusplus
extern "C" {
#endif
/*-
SSL_CTX -> X509_STORE
-> X509_LOOKUP
->X509_LOOKUP_METHOD
-> X509_LOOKUP
->X509_LOOKUP_METHOD
SSL -> X509_STORE_CTX
->X509_STORE
The X509_STORE holds the tables etc for verification stuff.
A X509_STORE_CTX is used while validating a single certificate.
The X509_STORE has X509_LOOKUPs for looking up certs.
The X509_STORE then calls a function to actually verify the
certificate chain.
*/
typedef enum {
X509_LU_NONE = 0,
X509_LU_X509, X509_LU_CRL
} X509_LOOKUP_TYPE;
#ifndef OPENSSL_NO_DEPRECATED_1_1_0
#define X509_LU_RETRY -1
#define X509_LU_FAIL 0
#endif
SKM_DEFINE_STACK_OF_INTERNAL(X509_LOOKUP, X509_LOOKUP, X509_LOOKUP)
#define sk_X509_LOOKUP_num(sk) OPENSSL_sk_num(ossl_check_const_X509_LOOKUP_sk_type(sk))
#define sk_X509_LOOKUP_value(sk, idx) ((X509_LOOKUP *)OPENSSL_sk_value(ossl_check_const_X509_LOOKUP_sk_type(sk), (idx)))
#define sk_X509_LOOKUP_new(cmp) ((STACK_OF(X509_LOOKUP) *)OPENSSL_sk_new(ossl_check_X509_LOOKUP_compfunc_type(cmp)))
#define sk_X509_LOOKUP_new_null() ((STACK_OF(X509_LOOKUP) *)OPENSSL_sk_new_null())
#define sk_X509_LOOKUP_new_reserve(cmp, n) ((STACK_OF(X509_LOOKUP) *)OPENSSL_sk_new_reserve(ossl_check_X509_LOOKUP_compfunc_type(cmp), (n)))
#define sk_X509_LOOKUP_reserve(sk, n) OPENSSL_sk_reserve(ossl_check_X509_LOOKUP_sk_type(sk), (n))
#define sk_X509_LOOKUP_free(sk) OPENSSL_sk_free(ossl_check_X509_LOOKUP_sk_type(sk))
#define sk_X509_LOOKUP_zero(sk) OPENSSL_sk_zero(ossl_check_X509_LOOKUP_sk_type(sk))
#define sk_X509_LOOKUP_delete(sk, i) ((X509_LOOKUP *)OPENSSL_sk_delete(ossl_check_X509_LOOKUP_sk_type(sk), (i)))
#define sk_X509_LOOKUP_delete_ptr(sk, ptr) ((X509_LOOKUP *)OPENSSL_sk_delete_ptr(ossl_check_X509_LOOKUP_sk_type(sk), ossl_check_X509_LOOKUP_type(ptr)))
#define sk_X509_LOOKUP_push(sk, ptr) OPENSSL_sk_push(ossl_check_X509_LOOKUP_sk_type(sk), ossl_check_X509_LOOKUP_type(ptr))
#define sk_X509_LOOKUP_unshift(sk, ptr) OPENSSL_sk_unshift(ossl_check_X509_LOOKUP_sk_type(sk), ossl_check_X509_LOOKUP_type(ptr))
#define sk_X509_LOOKUP_pop(sk) ((X509_LOOKUP *)OPENSSL_sk_pop(ossl_check_X509_LOOKUP_sk_type(sk)))
#define sk_X509_LOOKUP_shift(sk) ((X509_LOOKUP *)OPENSSL_sk_shift(ossl_check_X509_LOOKUP_sk_type(sk)))
#define sk_X509_LOOKUP_pop_free(sk, freefunc) OPENSSL_sk_pop_free(ossl_check_X509_LOOKUP_sk_type(sk),ossl_check_X509_LOOKUP_freefunc_type(freefunc))
#define sk_X509_LOOKUP_insert(sk, ptr, idx) OPENSSL_sk_insert(ossl_check_X509_LOOKUP_sk_type(sk), ossl_check_X509_LOOKUP_type(ptr), (idx))
#define sk_X509_LOOKUP_set(sk, idx, ptr) ((X509_LOOKUP *)OPENSSL_sk_set(ossl_check_X509_LOOKUP_sk_type(sk), (idx), ossl_check_X509_LOOKUP_type(ptr)))
#define sk_X509_LOOKUP_find(sk, ptr) OPENSSL_sk_find(ossl_check_X509_LOOKUP_sk_type(sk), ossl_check_X509_LOOKUP_type(ptr))
#define sk_X509_LOOKUP_find_ex(sk, ptr) OPENSSL_sk_find_ex(ossl_check_X509_LOOKUP_sk_type(sk), ossl_check_X509_LOOKUP_type(ptr))
#define sk_X509_LOOKUP_sort(sk) OPENSSL_sk_sort(ossl_check_X509_LOOKUP_sk_type(sk))
#define sk_X509_LOOKUP_is_sorted(sk) OPENSSL_sk_is_sorted(ossl_check_const_X509_LOOKUP_sk_type(sk))
#define sk_X509_LOOKUP_dup(sk) ((STACK_OF(X509_LOOKUP) *)OPENSSL_sk_dup(ossl_check_const_X509_LOOKUP_sk_type(sk)))
#define sk_X509_LOOKUP_deep_copy(sk, copyfunc, freefunc) ((STACK_OF(X509_LOOKUP) *)OPENSSL_sk_deep_copy(ossl_check_const_X509_LOOKUP_sk_type(sk), ossl_check_X509_LOOKUP_copyfunc_type(copyfunc), ossl_check_X509_LOOKUP_freefunc_type(freefunc)))
#define sk_X509_LOOKUP_set_cmp_func(sk, cmp) ((sk_X509_LOOKUP_compfunc)OPENSSL_sk_set_cmp_func(ossl_check_X509_LOOKUP_sk_type(sk), ossl_check_X509_LOOKUP_compfunc_type(cmp)))
SKM_DEFINE_STACK_OF_INTERNAL(X509_OBJECT, X509_OBJECT, X509_OBJECT)
#define sk_X509_OBJECT_num(sk) OPENSSL_sk_num(ossl_check_const_X509_OBJECT_sk_type(sk))
#define sk_X509_OBJECT_value(sk, idx) ((X509_OBJECT *)OPENSSL_sk_value(ossl_check_const_X509_OBJECT_sk_type(sk), (idx)))
#define sk_X509_OBJECT_new(cmp) ((STACK_OF(X509_OBJECT) *)OPENSSL_sk_new(ossl_check_X509_OBJECT_compfunc_type(cmp)))
#define sk_X509_OBJECT_new_null() ((STACK_OF(X509_OBJECT) *)OPENSSL_sk_new_null())
#define sk_X509_OBJECT_new_reserve(cmp, n) ((STACK_OF(X509_OBJECT) *)OPENSSL_sk_new_reserve(ossl_check_X509_OBJECT_compfunc_type(cmp), (n)))
#define sk_X509_OBJECT_reserve(sk, n) OPENSSL_sk_reserve(ossl_check_X509_OBJECT_sk_type(sk), (n))
#define sk_X509_OBJECT_free(sk) OPENSSL_sk_free(ossl_check_X509_OBJECT_sk_type(sk))
#define sk_X509_OBJECT_zero(sk) OPENSSL_sk_zero(ossl_check_X509_OBJECT_sk_type(sk))
#define sk_X509_OBJECT_delete(sk, i) ((X509_OBJECT *)OPENSSL_sk_delete(ossl_check_X509_OBJECT_sk_type(sk), (i)))
#define sk_X509_OBJECT_delete_ptr(sk, ptr) ((X509_OBJECT *)OPENSSL_sk_delete_ptr(ossl_check_X509_OBJECT_sk_type(sk), ossl_check_X509_OBJECT_type(ptr)))
#define sk_X509_OBJECT_push(sk, ptr) OPENSSL_sk_push(ossl_check_X509_OBJECT_sk_type(sk), ossl_check_X509_OBJECT_type(ptr))
#define sk_X509_OBJECT_unshift(sk, ptr) OPENSSL_sk_unshift(ossl_check_X509_OBJECT_sk_type(sk), ossl_check_X509_OBJECT_type(ptr))
#define sk_X509_OBJECT_pop(sk) ((X509_OBJECT *)OPENSSL_sk_pop(ossl_check_X509_OBJECT_sk_type(sk)))
#define sk_X509_OBJECT_shift(sk) ((X509_OBJECT *)OPENSSL_sk_shift(ossl_check_X509_OBJECT_sk_type(sk)))
#define sk_X509_OBJECT_pop_free(sk, freefunc) OPENSSL_sk_pop_free(ossl_check_X509_OBJECT_sk_type(sk),ossl_check_X509_OBJECT_freefunc_type(freefunc))
#define sk_X509_OBJECT_insert(sk, ptr, idx) OPENSSL_sk_insert(ossl_check_X509_OBJECT_sk_type(sk), ossl_check_X509_OBJECT_type(ptr), (idx))
#define sk_X509_OBJECT_set(sk, idx, ptr) ((X509_OBJECT *)OPENSSL_sk_set(ossl_check_X509_OBJECT_sk_type(sk), (idx), ossl_check_X509_OBJECT_type(ptr)))
#define sk_X509_OBJECT_find(sk, ptr) OPENSSL_sk_find(ossl_check_X509_OBJECT_sk_type(sk), ossl_check_X509_OBJECT_type(ptr))
#define sk_X509_OBJECT_find_ex(sk, ptr) OPENSSL_sk_find_ex(ossl_check_X509_OBJECT_sk_type(sk), ossl_check_X509_OBJECT_type(ptr))
#define sk_X509_OBJECT_sort(sk) OPENSSL_sk_sort(ossl_check_X509_OBJECT_sk_type(sk))
#define sk_X509_OBJECT_is_sorted(sk) OPENSSL_sk_is_sorted(ossl_check_const_X509_OBJECT_sk_type(sk))
#define sk_X509_OBJECT_dup(sk) ((STACK_OF(X509_OBJECT) *)OPENSSL_sk_dup(ossl_check_const_X509_OBJECT_sk_type(sk)))
#define sk_X509_OBJECT_deep_copy(sk, copyfunc, freefunc) ((STACK_OF(X509_OBJECT) *)OPENSSL_sk_deep_copy(ossl_check_const_X509_OBJECT_sk_type(sk), ossl_check_X509_OBJECT_copyfunc_type(copyfunc), ossl_check_X509_OBJECT_freefunc_type(freefunc)))
#define sk_X509_OBJECT_set_cmp_func(sk, cmp) ((sk_X509_OBJECT_compfunc)OPENSSL_sk_set_cmp_func(ossl_check_X509_OBJECT_sk_type(sk), ossl_check_X509_OBJECT_compfunc_type(cmp)))
SKM_DEFINE_STACK_OF_INTERNAL(X509_VERIFY_PARAM, X509_VERIFY_PARAM, X509_VERIFY_PARAM)
#define sk_X509_VERIFY_PARAM_num(sk) OPENSSL_sk_num(ossl_check_const_X509_VERIFY_PARAM_sk_type(sk))
#define sk_X509_VERIFY_PARAM_value(sk, idx) ((X509_VERIFY_PARAM *)OPENSSL_sk_value(ossl_check_const_X509_VERIFY_PARAM_sk_type(sk), (idx)))
#define sk_X509_VERIFY_PARAM_new(cmp) ((STACK_OF(X509_VERIFY_PARAM) *)OPENSSL_sk_new(ossl_check_X509_VERIFY_PARAM_compfunc_type(cmp)))
#define sk_X509_VERIFY_PARAM_new_null() ((STACK_OF(X509_VERIFY_PARAM) *)OPENSSL_sk_new_null())
#define sk_X509_VERIFY_PARAM_new_reserve(cmp, n) ((STACK_OF(X509_VERIFY_PARAM) *)OPENSSL_sk_new_reserve(ossl_check_X509_VERIFY_PARAM_compfunc_type(cmp), (n)))
#define sk_X509_VERIFY_PARAM_reserve(sk, n) OPENSSL_sk_reserve(ossl_check_X509_VERIFY_PARAM_sk_type(sk), (n))
#define sk_X509_VERIFY_PARAM_free(sk) OPENSSL_sk_free(ossl_check_X509_VERIFY_PARAM_sk_type(sk))
#define sk_X509_VERIFY_PARAM_zero(sk) OPENSSL_sk_zero(ossl_check_X509_VERIFY_PARAM_sk_type(sk))
#define sk_X509_VERIFY_PARAM_delete(sk, i) ((X509_VERIFY_PARAM *)OPENSSL_sk_delete(ossl_check_X509_VERIFY_PARAM_sk_type(sk), (i)))
#define sk_X509_VERIFY_PARAM_delete_ptr(sk, ptr) ((X509_VERIFY_PARAM *)OPENSSL_sk_delete_ptr(ossl_check_X509_VERIFY_PARAM_sk_type(sk), ossl_check_X509_VERIFY_PARAM_type(ptr)))
#define sk_X509_VERIFY_PARAM_push(sk, ptr) OPENSSL_sk_push(ossl_check_X509_VERIFY_PARAM_sk_type(sk), ossl_check_X509_VERIFY_PARAM_type(ptr))
#define sk_X509_VERIFY_PARAM_unshift(sk, ptr) OPENSSL_sk_unshift(ossl_check_X509_VERIFY_PARAM_sk_type(sk), ossl_check_X509_VERIFY_PARAM_type(ptr))
#define sk_X509_VERIFY_PARAM_pop(sk) ((X509_VERIFY_PARAM *)OPENSSL_sk_pop(ossl_check_X509_VERIFY_PARAM_sk_type(sk)))
#define sk_X509_VERIFY_PARAM_shift(sk) ((X509_VERIFY_PARAM *)OPENSSL_sk_shift(ossl_check_X509_VERIFY_PARAM_sk_type(sk)))
#define sk_X509_VERIFY_PARAM_pop_free(sk, freefunc) OPENSSL_sk_pop_free(ossl_check_X509_VERIFY_PARAM_sk_type(sk),ossl_check_X509_VERIFY_PARAM_freefunc_type(freefunc))
#define sk_X509_VERIFY_PARAM_insert(sk, ptr, idx) OPENSSL_sk_insert(ossl_check_X509_VERIFY_PARAM_sk_type(sk), ossl_check_X509_VERIFY_PARAM_type(ptr), (idx))
#define sk_X509_VERIFY_PARAM_set(sk, idx, ptr) ((X509_VERIFY_PARAM *)OPENSSL_sk_set(ossl_check_X509_VERIFY_PARAM_sk_type(sk), (idx), ossl_check_X509_VERIFY_PARAM_type(ptr)))
#define sk_X509_VERIFY_PARAM_find(sk, ptr) OPENSSL_sk_find(ossl_check_X509_VERIFY_PARAM_sk_type(sk), ossl_check_X509_VERIFY_PARAM_type(ptr))
#define sk_X509_VERIFY_PARAM_find_ex(sk, ptr) OPENSSL_sk_find_ex(ossl_check_X509_VERIFY_PARAM_sk_type(sk), ossl_check_X509_VERIFY_PARAM_type(ptr))
#define sk_X509_VERIFY_PARAM_sort(sk) OPENSSL_sk_sort(ossl_check_X509_VERIFY_PARAM_sk_type(sk))
#define sk_X509_VERIFY_PARAM_is_sorted(sk) OPENSSL_sk_is_sorted(ossl_check_const_X509_VERIFY_PARAM_sk_type(sk))
#define sk_X509_VERIFY_PARAM_dup(sk) ((STACK_OF(X509_VERIFY_PARAM) *)OPENSSL_sk_dup(ossl_check_const_X509_VERIFY_PARAM_sk_type(sk)))
#define sk_X509_VERIFY_PARAM_deep_copy(sk, copyfunc, freefunc) ((STACK_OF(X509_VERIFY_PARAM) *)OPENSSL_sk_deep_copy(ossl_check_const_X509_VERIFY_PARAM_sk_type(sk), ossl_check_X509_VERIFY_PARAM_copyfunc_type(copyfunc), ossl_check_X509_VERIFY_PARAM_freefunc_type(freefunc)))
#define sk_X509_VERIFY_PARAM_set_cmp_func(sk, cmp) ((sk_X509_VERIFY_PARAM_compfunc)OPENSSL_sk_set_cmp_func(ossl_check_X509_VERIFY_PARAM_sk_type(sk), ossl_check_X509_VERIFY_PARAM_compfunc_type(cmp)))
int X509_STORE_set_depth(X509_STORE *store, int depth);
typedef int (*X509_STORE_CTX_verify_cb)(int, X509_STORE_CTX *);
int X509_STORE_CTX_print_verify_cb(int ok, X509_STORE_CTX *ctx);
typedef int (*X509_STORE_CTX_verify_fn)(X509_STORE_CTX *);
typedef int (*X509_STORE_CTX_get_issuer_fn)(X509 **issuer,
X509_STORE_CTX *ctx, X509 *x);
typedef int (*X509_STORE_CTX_check_issued_fn)(X509_STORE_CTX *ctx,
X509 *x, X509 *issuer);
typedef int (*X509_STORE_CTX_check_revocation_fn)(X509_STORE_CTX *ctx);
typedef int (*X509_STORE_CTX_get_crl_fn)(X509_STORE_CTX *ctx,
X509_CRL **crl, X509 *x);
typedef int (*X509_STORE_CTX_check_crl_fn)(X509_STORE_CTX *ctx, X509_CRL *crl);
typedef int (*X509_STORE_CTX_cert_crl_fn)(X509_STORE_CTX *ctx,
X509_CRL *crl, X509 *x);
typedef int (*X509_STORE_CTX_check_policy_fn)(X509_STORE_CTX *ctx);
typedef STACK_OF(X509)
*(*X509_STORE_CTX_lookup_certs_fn)(X509_STORE_CTX *ctx,
const X509_NAME *nm);
typedef STACK_OF(X509_CRL)
*(*X509_STORE_CTX_lookup_crls_fn)(const X509_STORE_CTX *ctx,
const X509_NAME *nm);
typedef int (*X509_STORE_CTX_cleanup_fn)(X509_STORE_CTX *ctx);
void X509_STORE_CTX_set_depth(X509_STORE_CTX *ctx, int depth);
# define X509_STORE_CTX_set_app_data(ctx,data) \
X509_STORE_CTX_set_ex_data(ctx,0,data)
# define X509_STORE_CTX_get_app_data(ctx) \
X509_STORE_CTX_get_ex_data(ctx,0)
# define X509_L_FILE_LOAD 1
# define X509_L_ADD_DIR 2
# define X509_L_ADD_STORE 3
# define X509_L_LOAD_STORE 4
# define X509_LOOKUP_load_file(x,name,type) \
X509_LOOKUP_ctrl((x),X509_L_FILE_LOAD,(name),(long)(type),NULL)
# define X509_LOOKUP_add_dir(x,name,type) \
X509_LOOKUP_ctrl((x),X509_L_ADD_DIR,(name),(long)(type),NULL)
# define X509_LOOKUP_add_store(x,name) \
X509_LOOKUP_ctrl((x),X509_L_ADD_STORE,(name),0,NULL)
# define X509_LOOKUP_load_store(x,name) \
X509_LOOKUP_ctrl((x),X509_L_LOAD_STORE,(name),0,NULL)
# define X509_LOOKUP_load_file_with_libctx(x, name, type, libctx, propq) \
X509_LOOKUP_ctrl_with_libctx((x), X509_L_FILE_LOAD, (name), (long)(type), NULL,\
(libctx), (propq))
# define X509_LOOKUP_load_store_with_libctx(x, name, libctx, propq) \
X509_LOOKUP_ctrl_with_libctx((x), X509_L_LOAD_STORE, (name), 0, NULL, \
(libctx), (propq))
# define X509_LOOKUP_add_store_with_libctx(x, name, libctx, propq) \
X509_LOOKUP_ctrl_with_libctx((x), X509_L_ADD_STORE, (name), 0, NULL, \
(libctx), (propq))
# define X509_V_OK 0
# define X509_V_ERR_UNSPECIFIED 1
# define X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT 2
# define X509_V_ERR_UNABLE_TO_GET_CRL 3
# define X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE 4
# define X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE 5
# define X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY 6
# define X509_V_ERR_CERT_SIGNATURE_FAILURE 7
# define X509_V_ERR_CRL_SIGNATURE_FAILURE 8
# define X509_V_ERR_CERT_NOT_YET_VALID 9
# define X509_V_ERR_CERT_HAS_EXPIRED 10
# define X509_V_ERR_CRL_NOT_YET_VALID 11
# define X509_V_ERR_CRL_HAS_EXPIRED 12
# define X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD 13
# define X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD 14
# define X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD 15
# define X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD 16
# define X509_V_ERR_OUT_OF_MEM 17
# define X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT 18
# define X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN 19
# define X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY 20
# define X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE 21
# define X509_V_ERR_CERT_CHAIN_TOO_LONG 22
# define X509_V_ERR_CERT_REVOKED 23
# define X509_V_ERR_NO_ISSUER_PUBLIC_KEY 24
# define X509_V_ERR_PATH_LENGTH_EXCEEDED 25
# define X509_V_ERR_INVALID_PURPOSE 26
# define X509_V_ERR_CERT_UNTRUSTED 27
# define X509_V_ERR_CERT_REJECTED 28
/* These are 'informational' when looking for issuer cert */
# define X509_V_ERR_SUBJECT_ISSUER_MISMATCH 29
# define X509_V_ERR_AKID_SKID_MISMATCH 30
# define X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH 31
# define X509_V_ERR_KEYUSAGE_NO_CERTSIGN 32
# define X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER 33
# define X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION 34
# define X509_V_ERR_KEYUSAGE_NO_CRL_SIGN 35
# define X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION 36
# define X509_V_ERR_INVALID_NON_CA 37
# define X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED 38
# define X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE 39
# define X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED 40
# define X509_V_ERR_INVALID_EXTENSION 41
# define X509_V_ERR_INVALID_POLICY_EXTENSION 42
# define X509_V_ERR_NO_EXPLICIT_POLICY 43
# define X509_V_ERR_DIFFERENT_CRL_SCOPE 44
# define X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE 45
# define X509_V_ERR_UNNESTED_RESOURCE 46
# define X509_V_ERR_PERMITTED_VIOLATION 47
# define X509_V_ERR_EXCLUDED_VIOLATION 48
# define X509_V_ERR_SUBTREE_MINMAX 49
/* The application is not happy */
# define X509_V_ERR_APPLICATION_VERIFICATION 50
# define X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE 51
# define X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX 52
# define X509_V_ERR_UNSUPPORTED_NAME_SYNTAX 53
# define X509_V_ERR_CRL_PATH_VALIDATION_ERROR 54
/* Another issuer check debug option */
# define X509_V_ERR_PATH_LOOP 55
/* Suite B mode algorithm violation */
# define X509_V_ERR_SUITE_B_INVALID_VERSION 56
# define X509_V_ERR_SUITE_B_INVALID_ALGORITHM 57
# define X509_V_ERR_SUITE_B_INVALID_CURVE 58
# define X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM 59
# define X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED 60
# define X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 61
/* Host, email and IP check errors */
# define X509_V_ERR_HOSTNAME_MISMATCH 62
# define X509_V_ERR_EMAIL_MISMATCH 63
# define X509_V_ERR_IP_ADDRESS_MISMATCH 64
/* DANE TLSA errors */
# define X509_V_ERR_DANE_NO_MATCH 65
/* security level errors */
# define X509_V_ERR_EE_KEY_TOO_SMALL 66
# define X509_V_ERR_CA_KEY_TOO_SMALL 67
# define X509_V_ERR_CA_MD_TOO_WEAK 68
/* Caller error */
# define X509_V_ERR_INVALID_CALL 69
/* Issuer lookup error */
# define X509_V_ERR_STORE_LOOKUP 70
/* Certificate transparency */
# define X509_V_ERR_NO_VALID_SCTS 71
# define X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION 72
/* OCSP status errors */
# define X509_V_ERR_OCSP_VERIFY_NEEDED 73 /* Need OCSP verification */
# define X509_V_ERR_OCSP_VERIFY_FAILED 74 /* Couldn't verify cert through OCSP */
# define X509_V_ERR_OCSP_CERT_UNKNOWN 75 /* Certificate wasn't recognized by the OCSP responder */
# define X509_V_ERR_UNSUPPORTED_SIGNATURE_ALGORITHM 76
# define X509_V_ERR_SIGNATURE_ALGORITHM_MISMATCH 77
/* Errors in case a check in X509_V_FLAG_X509_STRICT mode fails */
# define X509_V_ERR_SIGNATURE_ALGORITHM_INCONSISTENCY 78
# define X509_V_ERR_INVALID_CA 79
# define X509_V_ERR_PATHLEN_INVALID_FOR_NON_CA 80
# define X509_V_ERR_PATHLEN_WITHOUT_KU_KEY_CERT_SIGN 81
# define X509_V_ERR_KU_KEY_CERT_SIGN_INVALID_FOR_NON_CA 82
# define X509_V_ERR_ISSUER_NAME_EMPTY 83
# define X509_V_ERR_SUBJECT_NAME_EMPTY 84
# define X509_V_ERR_MISSING_AUTHORITY_KEY_IDENTIFIER 85
# define X509_V_ERR_MISSING_SUBJECT_KEY_IDENTIFIER 86
# define X509_V_ERR_EMPTY_SUBJECT_ALT_NAME 87
# define X509_V_ERR_EMPTY_SUBJECT_SAN_NOT_CRITICAL 88
# define X509_V_ERR_CA_BCONS_NOT_CRITICAL 89
# define X509_V_ERR_AUTHORITY_KEY_IDENTIFIER_CRITICAL 90
# define X509_V_ERR_SUBJECT_KEY_IDENTIFIER_CRITICAL 91
# define X509_V_ERR_CA_CERT_MISSING_KEY_USAGE 92
# define X509_V_ERR_EXTENSIONS_REQUIRE_VERSION_3 93
# define X509_V_ERR_EC_KEY_EXPLICIT_PARAMS 94
/* Certificate verify flags */
# ifndef OPENSSL_NO_DEPRECATED_1_1_0
# define X509_V_FLAG_CB_ISSUER_CHECK 0x0 /* Deprecated */
# endif
/* Use check time instead of current time */
# define X509_V_FLAG_USE_CHECK_TIME 0x2
/* Lookup CRLs */
# define X509_V_FLAG_CRL_CHECK 0x4
/* Lookup CRLs for whole chain */
# define X509_V_FLAG_CRL_CHECK_ALL 0x8
/* Ignore unhandled critical extensions */
# define X509_V_FLAG_IGNORE_CRITICAL 0x10
/* Disable workarounds for broken certificates */
# define X509_V_FLAG_X509_STRICT 0x20
/* Enable proxy certificate validation */
# define X509_V_FLAG_ALLOW_PROXY_CERTS 0x40
/* Enable policy checking */
# define X509_V_FLAG_POLICY_CHECK 0x80
/* Policy variable require-explicit-policy */
# define X509_V_FLAG_EXPLICIT_POLICY 0x100
/* Policy variable inhibit-any-policy */
# define X509_V_FLAG_INHIBIT_ANY 0x200
/* Policy variable inhibit-policy-mapping */
# define X509_V_FLAG_INHIBIT_MAP 0x400
/* Notify callback that policy is OK */
# define X509_V_FLAG_NOTIFY_POLICY 0x800
/* Extended CRL features such as indirect CRLs, alternate CRL signing keys */
# define X509_V_FLAG_EXTENDED_CRL_SUPPORT 0x1000
/* Delta CRL support */
# define X509_V_FLAG_USE_DELTAS 0x2000
/* Check self-signed CA signature */
# define X509_V_FLAG_CHECK_SS_SIGNATURE 0x4000
/* Use trusted store first */
# define X509_V_FLAG_TRUSTED_FIRST 0x8000
/* Suite B 128 bit only mode: not normally used */
# define X509_V_FLAG_SUITEB_128_LOS_ONLY 0x10000
/* Suite B 192 bit only mode */
# define X509_V_FLAG_SUITEB_192_LOS 0x20000
/* Suite B 128 bit mode allowing 192 bit algorithms */
# define X509_V_FLAG_SUITEB_128_LOS 0x30000
/* Allow partial chains if at least one certificate is in trusted store */
# define X509_V_FLAG_PARTIAL_CHAIN 0x80000
/*
* If the initial chain is not trusted, do not attempt to build an alternative
* chain. Alternate chain checking was introduced in 1.1.0. Setting this flag
* will force the behaviour to match that of previous versions.
*/
# define X509_V_FLAG_NO_ALT_CHAINS 0x100000
/* Do not check certificate/CRL validity against current time */
# define X509_V_FLAG_NO_CHECK_TIME 0x200000
# define X509_VP_FLAG_DEFAULT 0x1
# define X509_VP_FLAG_OVERWRITE 0x2
# define X509_VP_FLAG_RESET_FLAGS 0x4
# define X509_VP_FLAG_LOCKED 0x8
# define X509_VP_FLAG_ONCE 0x10
/* Internal use: mask of policy related options */
# define X509_V_FLAG_POLICY_MASK (X509_V_FLAG_POLICY_CHECK \
| X509_V_FLAG_EXPLICIT_POLICY \
| X509_V_FLAG_INHIBIT_ANY \
| X509_V_FLAG_INHIBIT_MAP)
int X509_OBJECT_idx_by_subject(STACK_OF(X509_OBJECT) *h, X509_LOOKUP_TYPE type,
const X509_NAME *name);
X509_OBJECT *X509_OBJECT_retrieve_by_subject(STACK_OF(X509_OBJECT) *h,
X509_LOOKUP_TYPE type,
const X509_NAME *name);
X509_OBJECT *X509_OBJECT_retrieve_match(STACK_OF(X509_OBJECT) *h,
X509_OBJECT *x);
int X509_OBJECT_up_ref_count(X509_OBJECT *a);
X509_OBJECT *X509_OBJECT_new(void);
void X509_OBJECT_free(X509_OBJECT *a);
X509_LOOKUP_TYPE X509_OBJECT_get_type(const X509_OBJECT *a);
X509 *X509_OBJECT_get0_X509(const X509_OBJECT *a);
int X509_OBJECT_set1_X509(X509_OBJECT *a, X509 *obj);
X509_CRL *X509_OBJECT_get0_X509_CRL(const X509_OBJECT *a);
int X509_OBJECT_set1_X509_CRL(X509_OBJECT *a, X509_CRL *obj);
X509_STORE *X509_STORE_new(void);
void X509_STORE_free(X509_STORE *v);
int X509_STORE_lock(X509_STORE *ctx);
int X509_STORE_unlock(X509_STORE *ctx);
int X509_STORE_up_ref(X509_STORE *v);
STACK_OF(X509_OBJECT) *X509_STORE_get0_objects(const X509_STORE *v);
STACK_OF(X509) *X509_STORE_get1_all_certs(X509_STORE *st);
STACK_OF(X509) *X509_STORE_CTX_get1_certs(X509_STORE_CTX *st,
const X509_NAME *nm);
STACK_OF(X509_CRL) *X509_STORE_CTX_get1_crls(const X509_STORE_CTX *st,
const X509_NAME *nm);
int X509_STORE_set_flags(X509_STORE *ctx, unsigned long flags);
int X509_STORE_set_purpose(X509_STORE *ctx, int purpose);
int X509_STORE_set_trust(X509_STORE *ctx, int trust);
int X509_STORE_set1_param(X509_STORE *ctx, const X509_VERIFY_PARAM *pm);
X509_VERIFY_PARAM *X509_STORE_get0_param(const X509_STORE *ctx);
void X509_STORE_set_verify(X509_STORE *ctx, X509_STORE_CTX_verify_fn verify);
#define X509_STORE_set_verify_func(ctx, func) \
X509_STORE_set_verify((ctx),(func))
void X509_STORE_CTX_set_verify(X509_STORE_CTX *ctx,
X509_STORE_CTX_verify_fn verify);
X509_STORE_CTX_verify_fn X509_STORE_get_verify(const X509_STORE *ctx);
void X509_STORE_set_verify_cb(X509_STORE *ctx,
X509_STORE_CTX_verify_cb verify_cb);
# define X509_STORE_set_verify_cb_func(ctx,func) \
X509_STORE_set_verify_cb((ctx),(func))
X509_STORE_CTX_verify_cb X509_STORE_get_verify_cb(const X509_STORE *ctx);
void X509_STORE_set_get_issuer(X509_STORE *ctx,
X509_STORE_CTX_get_issuer_fn get_issuer);
X509_STORE_CTX_get_issuer_fn X509_STORE_get_get_issuer(const X509_STORE *ctx);
void X509_STORE_set_check_issued(X509_STORE *ctx,
X509_STORE_CTX_check_issued_fn check_issued);
X509_STORE_CTX_check_issued_fn X509_STORE_get_check_issued(const X509_STORE *ctx);
void X509_STORE_set_check_revocation(X509_STORE *ctx,
X509_STORE_CTX_check_revocation_fn check_revocation);
X509_STORE_CTX_check_revocation_fn
X509_STORE_get_check_revocation(const X509_STORE *ctx);
void X509_STORE_set_get_crl(X509_STORE *ctx,
X509_STORE_CTX_get_crl_fn get_crl);
X509_STORE_CTX_get_crl_fn X509_STORE_get_get_crl(const X509_STORE *ctx);
void X509_STORE_set_check_crl(X509_STORE *ctx,
X509_STORE_CTX_check_crl_fn check_crl);
X509_STORE_CTX_check_crl_fn X509_STORE_get_check_crl(const X509_STORE *ctx);
void X509_STORE_set_cert_crl(X509_STORE *ctx,
X509_STORE_CTX_cert_crl_fn cert_crl);
X509_STORE_CTX_cert_crl_fn X509_STORE_get_cert_crl(const X509_STORE *ctx);
void X509_STORE_set_check_policy(X509_STORE *ctx,
X509_STORE_CTX_check_policy_fn check_policy);
X509_STORE_CTX_check_policy_fn X509_STORE_get_check_policy(const X509_STORE *ctx);
void X509_STORE_set_lookup_certs(X509_STORE *ctx,
X509_STORE_CTX_lookup_certs_fn lookup_certs);
X509_STORE_CTX_lookup_certs_fn X509_STORE_get_lookup_certs(const X509_STORE *ctx);
void X509_STORE_set_lookup_crls(X509_STORE *ctx,
X509_STORE_CTX_lookup_crls_fn lookup_crls);
#define X509_STORE_set_lookup_crls_cb(ctx, func) \
X509_STORE_set_lookup_crls((ctx), (func))
X509_STORE_CTX_lookup_crls_fn X509_STORE_get_lookup_crls(const X509_STORE *ctx);
void X509_STORE_set_cleanup(X509_STORE *ctx,
X509_STORE_CTX_cleanup_fn cleanup);
X509_STORE_CTX_cleanup_fn X509_STORE_get_cleanup(const X509_STORE *ctx);
#define X509_STORE_get_ex_new_index(l, p, newf, dupf, freef) \
CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_X509_STORE, l, p, newf, dupf, freef)
int X509_STORE_set_ex_data(X509_STORE *ctx, int idx, void *data);
void *X509_STORE_get_ex_data(const X509_STORE *ctx, int idx);
X509_STORE_CTX *X509_STORE_CTX_new_with_libctx(OPENSSL_CTX *libctx,
const char *propq);
X509_STORE_CTX *X509_STORE_CTX_new(void);
int X509_STORE_CTX_get1_issuer(X509 **issuer, X509_STORE_CTX *ctx, X509 *x);
void X509_STORE_CTX_free(X509_STORE_CTX *ctx);
int X509_STORE_CTX_init(X509_STORE_CTX *ctx, X509_STORE *store,
X509 *x509, STACK_OF(X509) *chain);
void X509_STORE_CTX_set0_trusted_stack(X509_STORE_CTX *ctx, STACK_OF(X509) *sk);
void X509_STORE_CTX_cleanup(X509_STORE_CTX *ctx);
X509_STORE *X509_STORE_CTX_get0_store(const X509_STORE_CTX *ctx);
X509 *X509_STORE_CTX_get0_cert(const X509_STORE_CTX *ctx);
STACK_OF(X509)* X509_STORE_CTX_get0_untrusted(const X509_STORE_CTX *ctx);
void X509_STORE_CTX_set0_untrusted(X509_STORE_CTX *ctx, STACK_OF(X509) *sk);
void X509_STORE_CTX_set_verify_cb(X509_STORE_CTX *ctx,
X509_STORE_CTX_verify_cb verify);
X509_STORE_CTX_verify_cb X509_STORE_CTX_get_verify_cb(const X509_STORE_CTX *ctx);
X509_STORE_CTX_verify_fn X509_STORE_CTX_get_verify(const X509_STORE_CTX *ctx);
X509_STORE_CTX_get_issuer_fn X509_STORE_CTX_get_get_issuer(const X509_STORE_CTX *ctx);
X509_STORE_CTX_check_issued_fn X509_STORE_CTX_get_check_issued(const X509_STORE_CTX *ctx);
X509_STORE_CTX_check_revocation_fn X509_STORE_CTX_get_check_revocation(const X509_STORE_CTX *ctx);
X509_STORE_CTX_get_crl_fn X509_STORE_CTX_get_get_crl(const X509_STORE_CTX *ctx);
X509_STORE_CTX_check_crl_fn X509_STORE_CTX_get_check_crl(const X509_STORE_CTX *ctx);
X509_STORE_CTX_cert_crl_fn X509_STORE_CTX_get_cert_crl(const X509_STORE_CTX *ctx);
X509_STORE_CTX_check_policy_fn X509_STORE_CTX_get_check_policy(const X509_STORE_CTX *ctx);
X509_STORE_CTX_lookup_certs_fn X509_STORE_CTX_get_lookup_certs(const X509_STORE_CTX *ctx);
X509_STORE_CTX_lookup_crls_fn X509_STORE_CTX_get_lookup_crls(const X509_STORE_CTX *ctx);
X509_STORE_CTX_cleanup_fn X509_STORE_CTX_get_cleanup(const X509_STORE_CTX *ctx);
#ifndef OPENSSL_NO_DEPRECATED_1_1_0
# define X509_STORE_CTX_get_chain X509_STORE_CTX_get0_chain
# define X509_STORE_CTX_set_chain X509_STORE_CTX_set0_untrusted
# define X509_STORE_CTX_trusted_stack X509_STORE_CTX_set0_trusted_stack
# define X509_STORE_get_by_subject X509_STORE_CTX_get_by_subject
# define X509_STORE_get1_certs X509_STORE_CTX_get1_certs
# define X509_STORE_get1_crls X509_STORE_CTX_get1_crls
/* the following macro is misspelled; use X509_STORE_get1_certs instead */
# define X509_STORE_get1_cert X509_STORE_CTX_get1_certs
/* the following macro is misspelled; use X509_STORE_get1_crls instead */
# define X509_STORE_get1_crl X509_STORE_CTX_get1_crls
#endif
X509_LOOKUP *X509_STORE_add_lookup(X509_STORE *v, X509_LOOKUP_METHOD *m);
X509_LOOKUP_METHOD *X509_LOOKUP_hash_dir(void);
X509_LOOKUP_METHOD *X509_LOOKUP_file(void);
X509_LOOKUP_METHOD *X509_LOOKUP_store(void);
typedef int (*X509_LOOKUP_ctrl_fn)(X509_LOOKUP *ctx, int cmd, const char *argc,
long argl, char **ret);
typedef int (*X509_LOOKUP_ctrl_with_libctx_fn)(
X509_LOOKUP *ctx, int cmd, const char *argc, long argl, char **ret,
OPENSSL_CTX *libctx, const char *propq);
typedef int (*X509_LOOKUP_get_by_subject_fn)(X509_LOOKUP *ctx,
X509_LOOKUP_TYPE type,
const X509_NAME *name,
X509_OBJECT *ret);
typedef int (*X509_LOOKUP_get_by_subject_with_libctx_fn)(X509_LOOKUP *ctx,
X509_LOOKUP_TYPE type,
const X509_NAME *name,
X509_OBJECT *ret,
OPENSSL_CTX *libctx,
const char *propq);
typedef int (*X509_LOOKUP_get_by_issuer_serial_fn)(X509_LOOKUP *ctx,
X509_LOOKUP_TYPE type,
const X509_NAME *name,
const ASN1_INTEGER *serial,
X509_OBJECT *ret);
typedef int (*X509_LOOKUP_get_by_fingerprint_fn)(X509_LOOKUP *ctx,
X509_LOOKUP_TYPE type,
const unsigned char* bytes,
int len,
X509_OBJECT *ret);
typedef int (*X509_LOOKUP_get_by_alias_fn)(X509_LOOKUP *ctx,
X509_LOOKUP_TYPE type,
const char *str,
int len,
X509_OBJECT *ret);
X509_LOOKUP_METHOD *X509_LOOKUP_meth_new(const char *name);
void X509_LOOKUP_meth_free(X509_LOOKUP_METHOD *method);
int X509_LOOKUP_meth_set_new_item(X509_LOOKUP_METHOD *method,
int (*new_item) (X509_LOOKUP *ctx));
int (*X509_LOOKUP_meth_get_new_item(const X509_LOOKUP_METHOD* method))
(X509_LOOKUP *ctx);
int X509_LOOKUP_meth_set_free(X509_LOOKUP_METHOD *method,
void (*free_fn) (X509_LOOKUP *ctx));
void (*X509_LOOKUP_meth_get_free(const X509_LOOKUP_METHOD* method))
(X509_LOOKUP *ctx);
int X509_LOOKUP_meth_set_init(X509_LOOKUP_METHOD *method,
int (*init) (X509_LOOKUP *ctx));
int (*X509_LOOKUP_meth_get_init(const X509_LOOKUP_METHOD* method))
(X509_LOOKUP *ctx);
int X509_LOOKUP_meth_set_shutdown(X509_LOOKUP_METHOD *method,
int (*shutdown) (X509_LOOKUP *ctx));
int (*X509_LOOKUP_meth_get_shutdown(const X509_LOOKUP_METHOD* method))
(X509_LOOKUP *ctx);
int X509_LOOKUP_meth_set_ctrl(X509_LOOKUP_METHOD *method,
X509_LOOKUP_ctrl_fn ctrl_fn);
X509_LOOKUP_ctrl_fn X509_LOOKUP_meth_get_ctrl(const X509_LOOKUP_METHOD *method);
int X509_LOOKUP_meth_set_get_by_subject(X509_LOOKUP_METHOD *method,
X509_LOOKUP_get_by_subject_fn fn);
X509_LOOKUP_get_by_subject_fn X509_LOOKUP_meth_get_get_by_subject(
const X509_LOOKUP_METHOD *method);
int X509_LOOKUP_meth_set_get_by_issuer_serial(X509_LOOKUP_METHOD *method,
X509_LOOKUP_get_by_issuer_serial_fn fn);
X509_LOOKUP_get_by_issuer_serial_fn X509_LOOKUP_meth_get_get_by_issuer_serial(
const X509_LOOKUP_METHOD *method);
int X509_LOOKUP_meth_set_get_by_fingerprint(X509_LOOKUP_METHOD *method,
X509_LOOKUP_get_by_fingerprint_fn fn);
X509_LOOKUP_get_by_fingerprint_fn X509_LOOKUP_meth_get_get_by_fingerprint(
const X509_LOOKUP_METHOD *method);
int X509_LOOKUP_meth_set_get_by_alias(X509_LOOKUP_METHOD *method,
X509_LOOKUP_get_by_alias_fn fn);
X509_LOOKUP_get_by_alias_fn X509_LOOKUP_meth_get_get_by_alias(
const X509_LOOKUP_METHOD *method);
int X509_STORE_add_cert(X509_STORE *ctx, X509 *x);
int X509_STORE_add_crl(X509_STORE *ctx, X509_CRL *x);
int X509_STORE_CTX_get_by_subject(const X509_STORE_CTX *vs,
X509_LOOKUP_TYPE type,
const X509_NAME *name, X509_OBJECT *ret);
X509_OBJECT *X509_STORE_CTX_get_obj_by_subject(X509_STORE_CTX *vs,
X509_LOOKUP_TYPE type,
const X509_NAME *name);
int X509_LOOKUP_ctrl(X509_LOOKUP *ctx, int cmd, const char *argc,
long argl, char **ret);
int X509_LOOKUP_ctrl_with_libctx(X509_LOOKUP *ctx, int cmd, const char *argc,
long argl, char **ret,
OPENSSL_CTX *libctx, const char *propq);
int X509_load_cert_file(X509_LOOKUP *ctx, const char *file, int type);
int X509_load_cert_file_with_libctx(X509_LOOKUP *ctx, const char *file, int type,
OPENSSL_CTX *libctx, const char *propq);
int X509_load_crl_file(X509_LOOKUP *ctx, const char *file, int type);
int X509_load_cert_crl_file(X509_LOOKUP *ctx, const char *file, int type);
int X509_load_cert_crl_file_with_libctx(X509_LOOKUP *ctx, const char *file,
int type, OPENSSL_CTX *libctx,
const char *propq);
X509_LOOKUP *X509_LOOKUP_new(X509_LOOKUP_METHOD *method);
void X509_LOOKUP_free(X509_LOOKUP *ctx);
int X509_LOOKUP_init(X509_LOOKUP *ctx);
int X509_LOOKUP_by_subject(X509_LOOKUP *ctx, X509_LOOKUP_TYPE type,
const X509_NAME *name, X509_OBJECT *ret);
int X509_LOOKUP_by_subject_with_libctx(X509_LOOKUP *ctx, X509_LOOKUP_TYPE type,
const X509_NAME *name, X509_OBJECT *ret,
OPENSSL_CTX *libctx, const char *propq);
int X509_LOOKUP_by_issuer_serial(X509_LOOKUP *ctx, X509_LOOKUP_TYPE type,
const X509_NAME *name,
const ASN1_INTEGER *serial,
X509_OBJECT *ret);
int X509_LOOKUP_by_fingerprint(X509_LOOKUP *ctx, X509_LOOKUP_TYPE type,
const unsigned char *bytes, int len,
X509_OBJECT *ret);
int X509_LOOKUP_by_alias(X509_LOOKUP *ctx, X509_LOOKUP_TYPE type,
const char *str, int len, X509_OBJECT *ret);
int X509_LOOKUP_set_method_data(X509_LOOKUP *ctx, void *data);
void *X509_LOOKUP_get_method_data(const X509_LOOKUP *ctx);
X509_STORE *X509_LOOKUP_get_store(const X509_LOOKUP *ctx);
int X509_LOOKUP_shutdown(X509_LOOKUP *ctx);
int X509_STORE_load_file(X509_STORE *ctx, const char *file);
int X509_STORE_load_path(X509_STORE *ctx, const char *path);
int X509_STORE_load_store(X509_STORE *ctx, const char *store);
int X509_STORE_load_locations(X509_STORE *ctx,
const char *file,
const char *dir);
int X509_STORE_set_default_paths(X509_STORE *ctx);
int X509_STORE_load_file_with_libctx(X509_STORE *ctx, const char *file,
OPENSSL_CTX *libctx, const char *propq);
int X509_STORE_load_store_with_libctx(X509_STORE *ctx, const char *store,
OPENSSL_CTX *libctx, const char *propq);
int X509_STORE_load_locations_with_libctx(X509_STORE *ctx,
const char *file, const char *dir,
OPENSSL_CTX *libctx, const char *propq);
int X509_STORE_set_default_paths_with_libctx(X509_STORE *ctx,
OPENSSL_CTX *libctx,
const char *propq);
#define X509_STORE_CTX_get_ex_new_index(l, p, newf, dupf, freef) \
CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_X509_STORE_CTX, l, p, newf, dupf, freef)
int X509_STORE_CTX_set_ex_data(X509_STORE_CTX *ctx, int idx, void *data);
void *X509_STORE_CTX_get_ex_data(const X509_STORE_CTX *ctx, int idx);
int X509_STORE_CTX_get_error(const X509_STORE_CTX *ctx);
void X509_STORE_CTX_set_error(X509_STORE_CTX *ctx, int s);
int X509_STORE_CTX_get_error_depth(const X509_STORE_CTX *ctx);
void X509_STORE_CTX_set_error_depth(X509_STORE_CTX *ctx, int depth);
X509 *X509_STORE_CTX_get_current_cert(const X509_STORE_CTX *ctx);
void X509_STORE_CTX_set_current_cert(X509_STORE_CTX *ctx, X509 *x);
X509 *X509_STORE_CTX_get0_current_issuer(const X509_STORE_CTX *ctx);
X509_CRL *X509_STORE_CTX_get0_current_crl(const X509_STORE_CTX *ctx);
X509_STORE_CTX *X509_STORE_CTX_get0_parent_ctx(const X509_STORE_CTX *ctx);
STACK_OF(X509) *X509_STORE_CTX_get0_chain(const X509_STORE_CTX *ctx);
STACK_OF(X509) *X509_STORE_CTX_get1_chain(const X509_STORE_CTX *ctx);
void X509_STORE_CTX_set_cert(X509_STORE_CTX *c, X509 *x);
void X509_STORE_CTX_set0_verified_chain(X509_STORE_CTX *c, STACK_OF(X509) *sk);
void X509_STORE_CTX_set0_crls(X509_STORE_CTX *c, STACK_OF(X509_CRL) *sk);
int X509_STORE_CTX_set_purpose(X509_STORE_CTX *ctx, int purpose);
int X509_STORE_CTX_set_trust(X509_STORE_CTX *ctx, int trust);
int X509_STORE_CTX_purpose_inherit(X509_STORE_CTX *ctx, int def_purpose,
int purpose, int trust);
void X509_STORE_CTX_set_flags(X509_STORE_CTX *ctx, unsigned long flags);
void X509_STORE_CTX_set_time(X509_STORE_CTX *ctx, unsigned long flags,
time_t t);
X509_POLICY_TREE *X509_STORE_CTX_get0_policy_tree(const X509_STORE_CTX *ctx);
int X509_STORE_CTX_get_explicit_policy(const X509_STORE_CTX *ctx);
int X509_STORE_CTX_get_num_untrusted(const X509_STORE_CTX *ctx);
X509_VERIFY_PARAM *X509_STORE_CTX_get0_param(const X509_STORE_CTX *ctx);
void X509_STORE_CTX_set0_param(X509_STORE_CTX *ctx, X509_VERIFY_PARAM *param);
int X509_STORE_CTX_set_default(X509_STORE_CTX *ctx, const char *name);
/*
* Bridge opacity barrier between libcrypt and libssl, also needed to support
* offline testing in test/danetest.c
*/
void X509_STORE_CTX_set0_dane(X509_STORE_CTX *ctx, SSL_DANE *dane);
#define DANE_FLAG_NO_DANE_EE_NAMECHECKS (1L << 0)
/* X509_VERIFY_PARAM functions */
X509_VERIFY_PARAM *X509_VERIFY_PARAM_new(void);
void X509_VERIFY_PARAM_free(X509_VERIFY_PARAM *param);
int X509_VERIFY_PARAM_inherit(X509_VERIFY_PARAM *to,
const X509_VERIFY_PARAM *from);
int X509_VERIFY_PARAM_set1(X509_VERIFY_PARAM *to,
const X509_VERIFY_PARAM *from);
int X509_VERIFY_PARAM_set1_name(X509_VERIFY_PARAM *param, const char *name);
int X509_VERIFY_PARAM_set_flags(X509_VERIFY_PARAM *param,
unsigned long flags);
int X509_VERIFY_PARAM_clear_flags(X509_VERIFY_PARAM *param,
unsigned long flags);
unsigned long X509_VERIFY_PARAM_get_flags(const X509_VERIFY_PARAM *param);
int X509_VERIFY_PARAM_set_purpose(X509_VERIFY_PARAM *param, int purpose);
int X509_VERIFY_PARAM_set_trust(X509_VERIFY_PARAM *param, int trust);
void X509_VERIFY_PARAM_set_depth(X509_VERIFY_PARAM *param, int depth);
void X509_VERIFY_PARAM_set_auth_level(X509_VERIFY_PARAM *param, int auth_level);
time_t X509_VERIFY_PARAM_get_time(const X509_VERIFY_PARAM *param);
void X509_VERIFY_PARAM_set_time(X509_VERIFY_PARAM *param, time_t t);
int X509_VERIFY_PARAM_add0_policy(X509_VERIFY_PARAM *param,
ASN1_OBJECT *policy);
int X509_VERIFY_PARAM_set1_policies(X509_VERIFY_PARAM *param,
STACK_OF(ASN1_OBJECT) *policies);
int X509_VERIFY_PARAM_set_inh_flags(X509_VERIFY_PARAM *param,
uint32_t flags);
uint32_t X509_VERIFY_PARAM_get_inh_flags(const X509_VERIFY_PARAM *param);
char *X509_VERIFY_PARAM_get0_host(X509_VERIFY_PARAM *param, int idx);
int X509_VERIFY_PARAM_set1_host(X509_VERIFY_PARAM *param,
const char *name, size_t namelen);
int X509_VERIFY_PARAM_add1_host(X509_VERIFY_PARAM *param,
const char *name, size_t namelen);
void X509_VERIFY_PARAM_set_hostflags(X509_VERIFY_PARAM *param,
unsigned int flags);
unsigned int X509_VERIFY_PARAM_get_hostflags(const X509_VERIFY_PARAM *param);
char *X509_VERIFY_PARAM_get0_peername(const X509_VERIFY_PARAM *param);
void X509_VERIFY_PARAM_move_peername(X509_VERIFY_PARAM *, X509_VERIFY_PARAM *);
char *X509_VERIFY_PARAM_get0_email(X509_VERIFY_PARAM *param);
int X509_VERIFY_PARAM_set1_email(X509_VERIFY_PARAM *param,
const char *email, size_t emaillen);
char *X509_VERIFY_PARAM_get1_ip_asc(X509_VERIFY_PARAM *param);
int X509_VERIFY_PARAM_set1_ip(X509_VERIFY_PARAM *param,
const unsigned char *ip, size_t iplen);
int X509_VERIFY_PARAM_set1_ip_asc(X509_VERIFY_PARAM *param,
const char *ipasc);
int X509_VERIFY_PARAM_get_depth(const X509_VERIFY_PARAM *param);
int X509_VERIFY_PARAM_get_auth_level(const X509_VERIFY_PARAM *param);
const char *X509_VERIFY_PARAM_get0_name(const X509_VERIFY_PARAM *param);
int X509_VERIFY_PARAM_add0_table(X509_VERIFY_PARAM *param);
int X509_VERIFY_PARAM_get_count(void);
const X509_VERIFY_PARAM *X509_VERIFY_PARAM_get0(int id);
const X509_VERIFY_PARAM *X509_VERIFY_PARAM_lookup(const char *name);
void X509_VERIFY_PARAM_table_cleanup(void);
/* Non positive return values are errors */
#define X509_PCY_TREE_FAILURE -2 /* Failure to satisfy explicit policy */
#define X509_PCY_TREE_INVALID -1 /* Inconsistent or invalid extensions */
#define X509_PCY_TREE_INTERNAL 0 /* Internal error, most likely malloc */
/*
* Positive return values form a bit mask, all but the first are internal to
* the library and don't appear in results from X509_policy_check().
*/
#define X509_PCY_TREE_VALID 1 /* The policy tree is valid */
#define X509_PCY_TREE_EMPTY 2 /* The policy tree is empty */
#define X509_PCY_TREE_EXPLICIT 4 /* Explicit policy required */
int X509_policy_check(X509_POLICY_TREE **ptree, int *pexplicit_policy,
STACK_OF(X509) *certs,
STACK_OF(ASN1_OBJECT) *policy_oids, unsigned int flags);
void X509_policy_tree_free(X509_POLICY_TREE *tree);
int X509_policy_tree_level_count(const X509_POLICY_TREE *tree);
X509_POLICY_LEVEL *X509_policy_tree_get0_level(const X509_POLICY_TREE *tree,
int i);
STACK_OF(X509_POLICY_NODE)
*X509_policy_tree_get0_policies(const X509_POLICY_TREE *tree);
STACK_OF(X509_POLICY_NODE)
*X509_policy_tree_get0_user_policies(const X509_POLICY_TREE *tree);
int X509_policy_level_node_count(X509_POLICY_LEVEL *level);
X509_POLICY_NODE *X509_policy_level_get0_node(const X509_POLICY_LEVEL *level,
int i);
const ASN1_OBJECT *X509_policy_node_get0_policy(const X509_POLICY_NODE *node);
STACK_OF(POLICYQUALINFO)
*X509_policy_node_get0_qualifiers(const X509_POLICY_NODE *node);
const X509_POLICY_NODE
*X509_policy_node_get0_parent(const X509_POLICY_NODE *node);
#ifdef __cplusplus
}
#endif
#endif
|
import torch.nn as nn
class FCN(nn.Module):
def __init__(self, d_model, n_commands, n_args, args_dim=256):
super().__init__()
self.n_args = n_args
self.args_dim = args_dim
self.command_fcn = nn.Linear(d_model, n_commands)
self.args_fcn = nn.Linear(d_model, n_args * args_dim)
def forward(self, out):
S, N, _ = out.shape
command_logits = self.command_fcn(out) # Shape [S, N, n_commands]
args_logits = self.args_fcn(out) # Shape [S, N, n_args * args_dim]
args_logits = args_logits.reshape(S, N, self.n_args, self.args_dim) # Shape [S, N, n_args, args_dim]
return command_logits, args_logits
class HierarchFCN(nn.Module):
def __init__(self, d_model, dim_z):
super().__init__()
self.visibility_fcn = nn.Linear(d_model, 2)
self.z_fcn = nn.Linear(d_model, dim_z)
def forward(self, out):
G, N, _ = out.shape
visibility_logits = self.visibility_fcn(out) # Shape [G, N, 2]
z = self.z_fcn(out) # Shape [G, N, dim_z]
return visibility_logits.unsqueeze(0), z.unsqueeze(0)
class ResNet(nn.Module):
def __init__(self, d_model):
super().__init__()
self.linear1 = nn.Sequential(
nn.Linear(d_model, d_model), nn.ReLU()
)
self.linear2 = nn.Sequential(
nn.Linear(d_model, d_model), nn.ReLU()
)
self.linear3 = nn.Sequential(
nn.Linear(d_model, d_model), nn.ReLU()
)
self.linear4 = nn.Sequential(
nn.Linear(d_model, d_model), nn.ReLU()
)
def forward(self, z):
z = z + self.linear1(z)
z = z + self.linear2(z)
z = z + self.linear3(z)
z = z + self.linear4(z)
return z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sparse_tools.py."""
from __future__ import absolute_import
import numpy
from scipy.sparse import csc_matrix
import unittest
from fermilib.ops import FermionOperator
from fermilib.transforms import jordan_wigner, get_sparse_operator
from fermilib.utils._sparse_tools import *
class SparseOperatorTest(unittest.TestCase):
def test_kronecker_operators(self):
self.assertAlmostEqual(
0, numpy.amax(numpy.absolute(
kronecker_operators(3 * [identity_csc]) -
kronecker_operators(3 * [pauli_x_csc]) ** 2)))
def test_qubit_jw_fermion_integration(self):
# Initialize a random fermionic operator.
fermion_operator = FermionOperator(((3, 1), (2, 1), (1, 0), (0, 0)),
-4.3)
fermion_operator += FermionOperator(((3, 1), (1, 0)), 8.17)
fermion_operator += 3.2 * FermionOperator()
# Map to qubits and compare matrix versions.
qubit_operator = jordan_wigner(fermion_operator)
qubit_sparse = get_sparse_operator(qubit_operator)
qubit_spectrum = sparse_eigenspectrum(qubit_sparse)
fermion_sparse = jordan_wigner_sparse(fermion_operator)
fermion_spectrum = sparse_eigenspectrum(fermion_sparse)
self.assertAlmostEqual(0., numpy.amax(
numpy.absolute(fermion_spectrum - qubit_spectrum)))
class JordanWignerSparseTest(unittest.TestCase):
def test_jw_sparse_0create(self):
expected = csc_matrix(([1], ([1], [0])), shape=(2, 2))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('0^')).A,
expected.A))
def test_jw_sparse_1annihilate(self):
expected = csc_matrix(([1, 1], ([0, 2], [1, 3])), shape=(4, 4))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('1')).A,
expected.A))
def test_jw_sparse_0create_2annihilate(self):
expected = csc_matrix(([-1j, 1j],
([4, 6], [1, 3])),
shape=(8, 8))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('0^ 2', -1j)).A,
expected.A))
def test_jw_sparse_0create_3annihilate(self):
expected = csc_matrix(([-1j, 1j, 1j, -1j],
([8, 10, 12, 14], [1, 3, 5, 7])),
shape=(16, 16))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('0^ 3', -1j)).A,
expected.A))
def test_jw_sparse_twobody(self):
expected = csc_matrix(([1, 1], ([6, 14], [5, 13])), shape=(16, 16))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('2^ 1^ 1 3')).A,
expected.A))
if __name__ == '__main__':
unittest.main()
|
// https://leetcode.com/problems/two-sum/description/
/**
* @param {number[]} nums
* @param {number} target
* @return {number[]}
*/
var twoSum = function(nums, target) {
var a = [];
for (var i = 0, len = nums.length; i < len; i++) {
var tmp = target - nums[i];
if (a[tmp] !== undefined) return [a[tmp], i];
a[nums[i]] = i;
}
};
|
"""
Texture atlas for SpriteList
The long term goal is to rely on pyglet's texture atlas, but
it's still unclear what features we need supported in arcade
so need to prototype something to get started.
We're still building on pyglet's allocator.
Pyglet atlases are located here:
https://github.com/einarf/pyglet/blob/master/pyglet/image/atlas.py
"""
import math
import time
import logging
from typing import Dict, List, Tuple, Sequence, TYPE_CHECKING
from array import array
import PIL
from arcade.gl.framebuffer import Framebuffer
from collections import deque
from contextlib import contextmanager
from PIL import Image
import arcade
from pyglet.image.atlas import (
Allocator,
AllocatorException,
)
if TYPE_CHECKING:
from arcade import ArcadeContext, Texture
from arcade.gl import Texture as GLTexture
# How many texture coordinates to store
TEXCOORD_BUFFER_SIZE = 8192
# The amount of pixels we increase the atlas when scanning for a reasonable size.
# It must divide. Must be a power of two number like 64, 256, 512 etx
RESIZE_STEP = 128
LOG = logging.getLogger(__name__)
class AtlasRegion:
"""
Stores information about where a texture is located
:param str atlas: The atlas this region belongs to
:param str texture: The arcade texture
:param int x: The x position of the texture
:param int y: The y position of the texture
:param int width: The width of the texture in pixels
:param int height: The height of the texture in pixels
"""
__slots__ = (
"atlas",
"texture",
"x",
"y",
"width",
"height",
"texture_coordinates",
"texture_coordinates_buffer",
"texture_id",
)
def __init__(
self,
atlas: "TextureAtlas",
texture: "Texture",
x: int,
y: int,
width: int,
height: int,
):
self.atlas = atlas
self.texture = texture
self.x = x
self.y = y
self.width = width
self.height = height
# start_x, start_y, normalized_width, normalized_height
self.texture_coordinates = (
self.x / self.atlas.width,
(self.atlas.height - self.y - self.height) / self.atlas.height,
self.width / self.atlas.width,
self.height / self.atlas.height,
)
def verify_image_size(self):
"""
Verify the image has the right size.
The internal image of a texture can be tampered with
at any point causing an atlas update to fail.
"""
if self.texture.image.size != (self.width, self.height):
raise ValueError((
f"Texture '{self.texture.name}' change their internal image "
f"size from {self.width}x{self.height} to "
f"{self.texture.image.size[0]}x{self.texture.image.size[1]}. "
"It's not possible to fit this into the old allocated area in the atlas. "
))
class TextureAtlas:
"""
A texture atlas with a size in a context.
A texture atlas is a large texture containing several textures
so OpenGL can easily batch draw thousands or hundreds of thousands
of sprites on one draw operation.
This is a fairly simple atlas that stores horizontal strips were
the height of the strip is the texture/image with the larges height.
Adding a texture to this atlas generates a texture id.
This id is used the sprite list vertex data to reference what
texture each sprite is using. The actual texture coordinates
are located in a float32 texture this atlas is responsible for
keeping up to date.
:param Tuple[int, int] size: The width and height of the atlas in pixels
:param int border: Currently no effect; Should always be 1 to avoid textures bleeding
:param Sequence[arcade.Texture] textures: The texture for this atlas
:param bool auto_resize: Automatically resize the atlas when full
:param Context ctx: The context for this atlas (will use window context if left empty)
"""
def __init__(
self,
size: Tuple[int, int],
*,
border: int = 1,
textures: Sequence["Texture"] = None,
auto_resize: bool = True,
ctx: "ArcadeContext" = None,
):
self._ctx = ctx or arcade.get_window().ctx
self._max_size = self._ctx.limits.MAX_VIEWPORT_DIMS
self._size: Tuple[int, int] = size
self._border: int = 1
self._allocator = Allocator(*self._size)
self._auto_resize = auto_resize
self._check_size(self._size)
self._texture = self._ctx.texture(size, components=4)
# Creating an fbo makes us able to clear the texture
self._fbo = self._ctx.framebuffer(color_attachments=[self._texture])
# A dictionary of all the allocated regions
# The key is the cache name for a texture
self._atlas_regions: Dict[str, AtlasRegion] = dict()
# A set of textures this atlas contains for fast lookups + set operations
self._textures: List["Texture"] = []
# Texture containing texture coordinates
self._uv_texture = self._ctx.texture(
(TEXCOORD_BUFFER_SIZE, 1), components=4, dtype="f4"
)
self._uv_texture.filter = self._ctx.NEAREST, self._ctx.NEAREST
self._uv_data = array("f", [0] * TEXCOORD_BUFFER_SIZE * 4)
# Free slots in the texture coordinate texture
self._uv_slots_free = deque(i for i in range(0, TEXCOORD_BUFFER_SIZE))
# Map texture names to slots
self._uv_slots: Dict[str, int] = dict()
self._uv_data_changed = True
# Add all the textures
for tex in textures or []:
self.add(tex)
@property
def width(self) -> int:
"""
The width of the texture atlas in pixels
:rtype: int
"""
return self._size[0]
@property
def height(self) -> int:
"""
The height of the texture atlas in pixels
:rtype: int
"""
return self._size[1]
@property
def size(self) -> Tuple[int, int]:
"""
The width and height of the texture atlas in pixels
:rtype: Tuple[int,int]
"""
return self._size
@property
def max_width(self) -> int:
"""
The maximum width of the atlas in pixels
:rtype: int
"""
return self._max_size[0]
@property
def max_height(self) -> int:
"""
The maximum height of the atlas in pixels
:rtype: int
"""
return self._max_size[1]
@property
def max_size(self) -> Tuple[int, int]:
"""
The maximum size of the atlas in pixels (x, y)
:rtype: Tuple[int,int]
"""
return self._max_size
@property
def auto_resize(self) -> bool:
"""
Get or set the auto resize flag for the atlas.
If enabled the atlas will resize itself when full.
:rtype: bool
"""
return self._auto_resize
@auto_resize.setter
def auto_resize(self, value: bool):
self._auto_resize = value
@property
def border(self) -> int:
"""
The texture border in pixels
:rtype: int
"""
return self._border
@property
def texture(self) -> "GLTexture":
"""
The atlas texture
:rtype: Texture
"""
return self._texture
@property
def uv_texture(self) -> "GLTexture":
"""
Texture coordinate texture.
:rtype: Texture
"""
return self._uv_texture
@property
def fbo(self) -> Framebuffer:
"""The framebuffer object for this atlas"""
return self._fbo
def add(self, texture: "Texture") -> Tuple[int, AtlasRegion]:
"""
Add a texture to the atlas.
:param Texture texture: The texture to add
:return: texture_id, AtlasRegion tuple
"""
if self.has_texture(texture):
slot = self.get_texture_id(texture.name)
region = self.get_region_info(texture.name)
return slot, region
LOG.info("Attempting to add texture: %s", texture.name)
try:
x, y, slot, region = self.allocate(texture)
except AllocatorException:
LOG.info("[%s] No room for %s size %s", id(self), texture.name, texture.image.size)
if self._auto_resize:
width = min(self.width * 2, self.max_width)
height = min(self.height * 2, self.max_height)
if self._size == (width, height):
raise
self.resize((width, height))
return self.add(texture)
else:
raise
self.write_texture(texture, x, y)
return slot, region
def allocate(self, texture: "Texture") -> Tuple[int, int, int, AtlasRegion]:
"""
Attempts to allocate space for a texture in the atlas.
This doesn't write the texture to the atlas texture itself.
It only allocates space.
:return: The x, y texture_id, TextureRegion
"""
# Allocate space for texture
try:
x, y = self._allocator.alloc(
texture.image.width + self.border * 2,
texture.image.height + self.border * 2,
)
except AllocatorException:
raise AllocatorException(
f"No more space for texture {texture.name} size={texture.image.size}"
)
LOG.debug("Allocated new space for texture %s : %s %s", texture.name, x, y)
# Store a texture region for this allocation
region = AtlasRegion(
self,
texture,
x + self._border,
y + self._border,
texture.image.width,
texture.image.height,
)
self._atlas_regions[texture.name] = region
# Get the existing slot for this texture or grab a new one.
# Existing slots for textures will only happen when re-bulding
# the atlas since we want to keep the same slots to avoid
# re-bulding the sprite list
existing_slot = self._uv_slots.get(texture.name)
slot = existing_slot if existing_slot is not None else self._uv_slots_free.popleft()
self._uv_slots[texture.name] = slot
self._uv_data[slot * 4] = region.texture_coordinates[0]
self._uv_data[slot * 4 + 1] = region.texture_coordinates[1]
self._uv_data[slot * 4 + 2] = region.texture_coordinates[2]
self._uv_data[slot * 4 + 3] = region.texture_coordinates[3]
self._uv_data_changed = True
self._textures.append(texture)
return x, y, slot, region
def write_texture(self, texture: "Texture", x: int, y: int):
"""
Writes an arcade texture to a subsection of the texture atlas
"""
# NOTE: We convert to RGBA when padding the image data
# if texture.image.mode != "RGBA":
# LOG.warning(f"TextureAtlas: Converting texture '{texture.name}' to RGBA")
# texture.image = texture.image.convert("RGBA")
self.write_image(texture.image, x, y)
def write_image(self, image: PIL.Image.Image, x: int, y: int) -> None:
"""
Write a PIL image to the atlas in a specific region.
:param PIL.Image.Image image: The pillow image
:param int x: The x position to write the texture
:param int y: The y position to write the texture
"""
# NOTE: We assume border is at least 1 here
# Write into atlas at the allocated location + a 1 pixel border
viewport = (
x + self._border - 1,
y + self._border - 1,
image.width + 2,
image.height + 2,
)
# print(image.size, viewport,"|", x, y, self._border)
# Pad the 1-pixel border with repeating data
tmp = Image.new('RGBA', (image.width + 2, image.height + 2))
tmp.paste(image, (1, 1))
tmp.paste(tmp.crop((1 , 1 , image.width+1, 2 )), (1 , 0 )) # noqa
tmp.paste(tmp.crop((1 , image.height, image.width+1, image.height+1)), (1 , image.height+1)) # noqa
tmp.paste(tmp.crop((1 , 0 , 2, image.height+2)), (0 , 0 )) # noqa
tmp.paste(tmp.crop((image.width, 0 , image.width+1, image.height+2)), (image.width+1, 0 )) # noqa
# Write the image directly to graphics memory in the allocated space
self._texture.write(tmp.tobytes(), 0, viewport=viewport)
def remove(self, texture: "Texture") -> None:
"""
Remove a texture from the atlas.
This doesn't remove the image from the underlying texture.
To physically remove the data you need to ``rebuild()``.
:param Texture texture: The texture to remove
"""
self._textures.remove(texture)
del self._atlas_regions[texture.name]
# Reclaim the uv slot
slot = self._uv_slots[texture.name]
del self._uv_slots[texture.name]
self._uv_slots_free.appendleft(slot)
def update_texture_image(self, texture: "Texture"):
"""
Updates the internal image of a texture in the atlas texture.
The new image needs to be the exact same size as the original
one meaning the texture already need to exist in the atlas.
This can be used in cases were the image is manipulated in some way
and we need a quick way to sync these changes to graphics memory.
This operation is fairly expensive, but still orders of magnitude
faster than removing the old texture, adding the new one and
re-building the entire atlas.
:param Texture texture: The texture to update
"""
region = self._atlas_regions[texture.name]
region.verify_image_size()
viewport = (
region.x,
region.y,
region.width,
region.height,
)
self._texture.write(texture.image.tobytes(), 0, viewport=viewport)
def get_region_info(self, name: str) -> AtlasRegion:
"""
Get the region info for a texture
:return: The AtlasRegion for the given texture name
"""
return self._atlas_regions[name]
def get_texture_id(self, name: str) -> int:
"""
Get the uv slot for a texture name
:return: The texture id for the given texture name
"""
return self._uv_slots[name]
def has_texture(self, texture: "Texture") -> bool:
"""Check if a texture is already in the atlas"""
return texture.name in self._atlas_regions
# TODO: Possibly let user decide the resize function
# def resize(self, size: Tuple[int, int]) -> None:
# """
# Resize the texture atlas.
# This will cause a full rebuild.
# :param Tuple[int,int]: The new size
# """
# # if size == self._size:
# # return
# self._check_size(size)
# self._size = size
# self._texture = None
# self._fbo = None
# gc.collect() # Try to force garbage collection of the gl resource asap
# self._texture = self._ctx.texture(size, components=4)
# self._fbo = self._ctx.framebuffer(color_attachments=[self._texture])
# self.rebuild()
def resize(self, size: Tuple[int, int]) -> None:
"""
Resize the atlas on the gpu.
This will copy the pixel data from the old to the
new atlas retaining the exact same data.
This is useful if the atlas was rendered into directly
and we don't have to transfer each texture individually
from system memory to graphics memory.
:param Tuple[int,int] size: The new size
"""
LOG.info("[%s] Resizing atlas from %s to %s", id(self), self._size, size)
if size == self._size:
return
resize_start = time.perf_counter()
self._check_size(size)
self._size = size
# Keep the old atlas texture and uv texture
uv_texture_old = self._uv_texture
texture_old = self._texture
self._uv_texture.write(self._uv_data, 0)
# Create new atlas texture and uv texture + fbo
self._uv_texture = self._ctx.texture(
(TEXCOORD_BUFFER_SIZE, 1), components=4, dtype="f4"
)
self._texture = self._ctx.texture(size, components=4)
self._fbo = self._ctx.framebuffer(color_attachments=[self._texture])
textures = self._textures
self.clear(texture_ids=False, texture=False)
for texture in sorted(textures, key=lambda x: x.image.size[1]):
self.allocate(texture)
# Write the new UV data
self._uv_texture.write(self._uv_data, 0)
self._uv_data_changed = False
# Bind textures for atlas copy shader
texture_old.use(0)
self._texture.use(1)
uv_texture_old.use(2)
self._uv_texture.use(3)
self._ctx.atlas_resize_program["projection"] = arcade.create_orthogonal_projection(
0, self.width, self.height, 0,
)
with self._fbo.activate():
self._ctx.disable(self._ctx.BLEND)
self._ctx.atlas_geometry.render(
self._ctx.atlas_resize_program,
mode=self._ctx.POINTS,
vertices=TEXCOORD_BUFFER_SIZE,
)
LOG.info("[%s] Atlas resize took %s seconds", id(self), time.perf_counter() - resize_start)
def rebuild(self) -> None:
"""Rebuild the underlying atlas texture.
This method also tries to organize the textures
more efficiently ordering them by size.
The texture ids will persist so the sprite list
don't need to be rebuilt.
"""
# Hold a reference to the old textures
textures = self._textures
# Clear the atlas but keep the uv slot mapping
self.clear(texture_ids=False)
# Add textures back sorted by height to potentially make more room
for texture in sorted(textures, key=lambda x: x.image.size[1]):
self.add(texture)
def clear(self, texture_ids: bool = True, texture: bool = True) -> None:
"""
Clear and reset the texture atlas.
Note that also clearing "texture_ids" makes the atlas
lose track of the old texture ids. This
means the sprite list must be rebuild from scratch.
:param bool texture_ids: Clear the assigned texture ids
:param bool texture: Clear the contents of the atlas texture itself
"""
if texture:
self._fbo.clear()
self._textures = []
self._atlas_regions = dict()
self._allocator = Allocator(*self._size)
if texture_ids:
self._uv_slots_free = deque(i for i in range(TEXCOORD_BUFFER_SIZE))
self._uv_slots = dict()
def use_uv_texture(self, unit: int = 0) -> None:
"""
Bind the texture coordinate texture to a channel.
In addition this method writes the texture
coordinate to the texture if the data is stale.
This is to avoid a full update every time a texture
is added to the atlas.
:param int unit: The texture unit to bind the uv texture
"""
if self._uv_data_changed:
self._uv_texture.write(self._uv_data, 0)
self._uv_data_changed = False
self._uv_texture.use(unit)
@contextmanager
def render_into(
self, texture: "Texture",
projection: Tuple[float, float, float, float] = None,
):
"""
Render directly into a sub-section of the atlas.
The sub-section is defined by the already allocated space
of the texture supplied in this method.
By default the projection will be set to match the texture area size
were `0, 0` is the lower left corner and `width, height` (of texture)
is the upper right corner.
This method should should be used with the ``with`` statement::
with atlas.render_into(texture):
# Draw commands here
# Specify projection
with atlas.render_into(texture, projection=(0, 100, 0, 100))
# Draw geometry
:param Texture texture: The texture area to render into
:param Tuple[float,float,float,float] projection: The ortho projection to render with.
This parameter can be left blank if no projection changes are needed.
The tuple values are: (left, right, button, top)
"""
region = self._atlas_regions[texture.name]
proj_prev = self._ctx.projection_2d
# Use provided projection or default
projection = projection or (0, region.width, 0, region.height)
# Flip the top and bottom because we need to render things upside down
projection = projection[0], projection[1], projection[3], projection[2]
self._ctx.projection_2d = projection
with self._fbo.activate() as fbo:
fbo.viewport = region.x, region.y, region.width, region.height
try:
yield fbo
finally:
fbo.viewport = 0, 0, *self._fbo.size
self._ctx.projection_2d = proj_prev
@classmethod
def create_from_texture_sequence(cls, textures: Sequence["Texture"], border: int = 1) -> "TextureAtlas":
"""
Create a texture atlas of a reasonable size from a sequence of textures.
:param Sequence[Texture] textures: A sequence of textures (list, set, tuple, generator etc.)
:param int border: The border for the atlas in pixels (space between each texture)
"""
textures = sorted(set(textures), key=lambda x: x.image.size[1])
size = TextureAtlas.calculate_minimum_size(textures)
return TextureAtlas(size, textures=textures, border=border)
@classmethod
def calculate_minimum_size(cls, textures: Sequence["Texture"], border: int = 1):
"""
Calculate the minimum atlas size needed to store the
the provided sequence of textures
:param Sequence[Texture] textures: Sequence of textures
:param border:
:return: An estimated minimum size as a (width, height) tuple
"""
# Try to guess some sane minimum size to reduce the brute force iterations
total_area = sum(t.image.size[0] * t.image.size[1] for t in textures)
sqrt_size = int(math.sqrt(total_area))
start_size = sqrt_size or RESIZE_STEP
if start_size % RESIZE_STEP:
start_size = sqrt_size + (64 - sqrt_size % RESIZE_STEP)
# For now we just brute force a solution by gradually
# increasing the atlas size using the allocator as a guide.
for size in range(start_size, 16385, RESIZE_STEP):
allocator = Allocator(size, size)
try:
for texture in textures:
allocator.alloc(
texture.image.width + border * 2,
texture.image.height + border * 2,
)
except AllocatorException:
continue
break
else:
raise ValueError("Too many textures to fit into one atlas")
return size, size
def to_image(self) -> Image.Image:
"""
Convert the atlas to a Pillow image
:return: A pillow image containing the atlas texture
"""
return Image.frombytes("RGBA", self._texture.size, bytes(self._texture.read()))
def show(self) -> None:
"""Show the texture atlas using Pillow"""
self.to_image().show()
def save(self, path: str) -> None:
"""
Save the texture atlas to a png.
:param str path: The path to save the atlas on disk
"""
self.to_image().save(path, format="png")
def _check_size(self, size: Tuple[int, int]) -> None:
"""Check it the atlas exceeds the hardware limitations"""
if size[0] > self._max_size[0] or size[1] > self._max_size[1]:
raise ValueError(
"Attempting to create or resize an atlas to "
f"{size} past its maximum size of {self._max_size}"
)
|
'use strict';
angular.module('simple-form', [
'ngRoute',
'angular-repeat-n',
'review-service'
]).
config(['$routeProvider', function($routeProvider) {
$routeProvider.when('/', {
templateUrl: 'templates/home.html',
controller: 'HomeCtrl'
})
.when('/form', {
templateUrl: 'templates/form.html',
controller: 'FormCtrl'
})
.otherwise({
redirectTo: '/'
});
}]);
|
// Copyright (c) 2015 Geometry Factory
// All rights reserved.
//
// This file is part of CGAL (www.cgal.org).
// You can redistribute it and/or modify it under the terms of the GNU
// General Public License as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// Licensees holding a valid commercial license may use this file in
// accordance with the commercial license agreement provided with the software.
//
// This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
// WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
//
// $URL$
// $Id$
//
// Author(s) : Simon Giraudot
#ifndef CGAL_READ_PLY_POINTS_H
#define CGAL_READ_PLY_POINTS_H
#include <CGAL/license/Point_set_processing_3.h>
#include <CGAL/config.h>
#if defined(CGAL_CFG_NO_CPP0X_RVALUE_REFERENCE) || defined(CGAL_CFG_NO_CPP0X_VARIADIC_TEMPLATES)
#error CGAL PLY reader requires a C++11 compiler
#endif
#include <tuple>
#include <CGAL/property_map.h>
#include <CGAL/value_type_traits.h>
#include <CGAL/point_set_processing_assertions.h>
#include <CGAL/Kernel_traits.h>
#include <boost/version.hpp>
#include <boost/cstdint.hpp>
#include <iostream>
#include <sstream>
#include <string>
namespace CGAL {
// PLY types:
// name type number of bytes
// ---------------------------------------
// char character 1
// uchar unsigned character 1
// short short integer 2
// ushort unsigned short integer 2
// int integer 4
// uint unsigned integer 4
// float single-precision float 4
// double double-precision float 8
/**
\ingroup PkgPointSetProcessingIOPly
Class used to identify a %PLY property as a type and a name.
\sa `read_ply_points_with_properties()`
*/
template <typename T>
struct PLY_property
{
typedef T type;
const char* name;
PLY_property (const char* name) : name (name) { }
};
/**
\ingroup PkgPointSetProcessingIOPly
Generates a %PLY property handler to read 3D points. Points are
constructed from the input using 3 %PLY properties of type
`double` and named `x`, `y` and `z`.
\sa `read_ply_points_with_properties()`
\tparam PointMap the property map used to store points.
*/
template <typename PointMap>
std::tuple<PointMap,
typename Kernel_traits<typename PointMap::value_type>::Kernel::Construct_point_3,
PLY_property<double>, PLY_property<double>, PLY_property<double> >
make_ply_point_reader(PointMap point_map)
{
return std::make_tuple (point_map, typename Kernel_traits<typename PointMap::value_type>::Kernel::Construct_point_3(),
PLY_property<double>("x"), PLY_property<double>("y"), PLY_property<double>("z"));
}
/**
\ingroup PkgPointSetProcessingIOPly
Generates a %PLY property handler to read 3D normal
vectors. Vectors are constructed from the input using 3 PLY
properties of type `double` and named `nx`, `ny` and `nz`.
\sa `read_ply_points_with_properties()`
\tparam VectorMap the property map used to store vectors.
*/
template <typename VectorMap>
std::tuple<VectorMap,
typename Kernel_traits<typename VectorMap::value_type>::Kernel::Construct_vector_3,
PLY_property<double>, PLY_property<double>, PLY_property<double> >
make_ply_normal_reader(VectorMap normal_map)
{
return std::make_tuple (normal_map, typename Kernel_traits<typename VectorMap::value_type>::Kernel::Construct_vector_3(),
PLY_property<double>("nx"), PLY_property<double>("ny"), PLY_property<double>("nz"));
}
/// \cond SKIP_IN_MANUAL
namespace internal {
namespace PLY {
class PLY_read_number
{
protected:
std::string m_name;
std::size_t m_format;
public:
PLY_read_number (std::string name, std::size_t format)
: m_name (name), m_format (format) { }
virtual ~PLY_read_number() { }
const std::string& name () const { return m_name; }
virtual void get (std::istream& stream) const = 0;
// The two following functions prevent the stream to only extract
// ONE character (= what the types char imply) by requiring
// explicitely an integer object when reading the stream
void read_ascii (std::istream& stream, boost::int8_t& c) const
{
short s;
stream >> s;
c = static_cast<char>(s);
}
void read_ascii (std::istream& stream, boost::uint8_t& c) const
{
unsigned short s;
stream >> s;
c = static_cast<unsigned char>(s);
}
// Default template when Type is not a char type
template <typename Type>
void read_ascii (std::istream& stream, Type& t) const
{
stream >> t;
}
template <typename Type>
Type read (std::istream& stream) const
{
if (m_format == 0) // Ascii
{
Type t;
read_ascii (stream, t);
return t;
}
else // Binary (2 = little endian)
{
union
{
char uChar[sizeof (Type)];
Type type;
} buffer;
std::size_t size = sizeof (Type);
stream.read(buffer.uChar, size);
if (m_format == 2) // Big endian
{
for (std::size_t i = 0; i < size / 2; ++ i)
{
unsigned char tmp = buffer.uChar[i];
buffer.uChar[i] = buffer.uChar[size - 1 - i];
buffer.uChar[size - 1 - i] = tmp;
}
}
return buffer.type;
}
return Type();
}
};
template <typename Type>
class PLY_read_typed_number : public PLY_read_number
{
mutable Type m_buffer;
public:
PLY_read_typed_number (std::string name, std::size_t format)
: PLY_read_number (name, format)
{
}
void get (std::istream& stream) const
{
m_buffer = (this->read<Type> (stream));
}
const Type& buffer() const
{
return m_buffer;
}
};
class PLY_reader
{
std::vector<PLY_read_number*> m_readers;
public:
std::size_t m_nb_points;
PLY_reader () : m_nb_points (0) { }
const std::vector<PLY_read_number*>& readers() const { return m_readers; }
template <typename Stream>
bool init (Stream& stream)
{
std::size_t lineNumber = 0; // current line number
enum Format { ASCII = 0, BINARY_LITTLE_ENDIAN = 1, BINARY_BIG_ENDIAN = 2};
Format format = ASCII;
std::string line;
std::istringstream iss;
// Check the order of the properties of the point set
bool reading_properties = false;
while (getline (stream,line))
{
iss.clear();
iss.str (line);
++ lineNumber;
// Reads file signature on first line
if (lineNumber == 1)
{
std::string signature;
if (!(iss >> signature) || (signature != "ply"))
{
// if wrong file format
std::cerr << "Error: incorrect file format line " << lineNumber << " of file" << std::endl;
return false;
}
}
// Reads format on 2nd line
else if (lineNumber == 2)
{
std::string tag, format_string, version;
if ( !(iss >> tag >> format_string >> version) )
{
std::cerr << "Error line " << lineNumber << " of file" << std::endl;
return false;
}
if (format_string == "ascii") format = ASCII;
else if (format_string == "binary_little_endian") format = BINARY_LITTLE_ENDIAN;
else if (format_string == "binary_big_endian") format = BINARY_BIG_ENDIAN;
else
{
std::cerr << "Error: unknown file format \"" << format_string << "\" line " << lineNumber << std::endl;
return false;
}
}
// Comments and vertex properties
else
{
std::string keyword;
if (!(iss >> keyword))
{
std::cerr << "Error line " << lineNumber << " of file" << std::endl;
return false;
}
if (keyword == "property")
{
if (!reading_properties)
continue;
std::string type, name;
if (!(iss >> type >> name))
{
std::cerr << "Error line " << lineNumber << " of file" << std::endl;
return false;
}
if ( type == "char" || type == "int8")
m_readers.push_back (new PLY_read_typed_number<boost::int8_t> (name, format));
else if (type == "uchar" || type == "uint8")
m_readers.push_back (new PLY_read_typed_number<boost::uint8_t> (name, format));
else if (type == "short" || type == "int16")
m_readers.push_back (new PLY_read_typed_number<boost::int16_t> (name, format));
else if (type == "ushort" || type == "uint16")
m_readers.push_back (new PLY_read_typed_number<boost::uint16_t> (name, format));
else if (type == "int" || type == "int32")
m_readers.push_back (new PLY_read_typed_number<boost::int32_t> (name, format));
else if (type == "uint" || type == "uint32")
m_readers.push_back (new PLY_read_typed_number<boost::uint32_t> (name, format));
else if (type == "float" || type == "float32")
m_readers.push_back (new PLY_read_typed_number<float> (name, format));
else if (type == "double" || type == "float64")
m_readers.push_back (new PLY_read_typed_number<double> (name, format));
continue;
}
else
reading_properties = false;
// ignore comments and properties (if not in element
// vertex - cf below - properties are useless in our case)
if (keyword == "comment" || keyword == "property")
continue;
// When end_header is reached, stop loop and begin reading points
if (keyword == "end_header")
break;
if (keyword == "element")
{
std::string type;
std::size_t number;
if (!(iss >> type >> number))
{
std::cerr << "Error line " << lineNumber << " of file" << std::endl;
return false;
}
if (type == "vertex")
{
m_nb_points = number;
reading_properties = true;
}
else
{
reading_properties = false;
continue;
}
}
}
}
return true;
}
~PLY_reader ()
{
for (std::size_t i = 0; i < m_readers.size (); ++ i)
delete m_readers[i];
m_readers.clear();
}
template <typename Type>
bool does_tag_exist (const char* tag)
{
return does_tag_exist (tag, Type());
}
template <typename Type>
void assign (Type& t, const char* tag)
{
for (std::size_t i = 0; i < m_readers.size (); ++ i)
if (m_readers[i]->name () == tag)
{
PLY_read_typed_number<Type>*
reader = dynamic_cast<PLY_read_typed_number<Type>*>(m_readers[i]);
CGAL_assertion (reader != NULL);
t = reader->buffer();
return;
}
}
template <typename Type>
bool does_tag_exist (const char* tag, Type)
{
for (std::size_t i = 0; i < m_readers.size (); ++ i)
if (m_readers[i]->name () == tag)
return (dynamic_cast<PLY_read_typed_number<Type>*>(m_readers[i]) != NULL);
return false;
}
bool does_tag_exist (const char* tag, double)
{
for (std::size_t i = 0; i < m_readers.size (); ++ i)
if (m_readers[i]->name () == tag)
return (dynamic_cast<PLY_read_typed_number<double>*>(m_readers[i]) != NULL
|| dynamic_cast<PLY_read_typed_number<float>*>(m_readers[i]) != NULL);
return false;
}
void assign (double& t, const char* tag)
{
for (std::size_t i = 0; i < m_readers.size (); ++ i)
if (m_readers[i]->name () == tag)
{
PLY_read_typed_number<double>*
reader_double = dynamic_cast<PLY_read_typed_number<double>*>(m_readers[i]);
if (reader_double == NULL)
{
PLY_read_typed_number<float>*
reader_float = dynamic_cast<PLY_read_typed_number<float>*>(m_readers[i]);
CGAL_assertion (reader_float != NULL);
t = reader_float->buffer();
}
else
t = reader_double->buffer();
return;
}
}
};
template <class Reader, class T>
void get_value(Reader& r, T& v, PLY_property<T>& wrapper)
{
return r.assign(v, wrapper.name);
}
template <std::size_t N>
struct Filler
{
template <class Reader, class Value_tuple, class PLY_property_tuple>
static void fill(Reader& r, Value_tuple& values, PLY_property_tuple wrappers)
{
get_value(r, std::get<N>(values), std::get<N+2>(wrappers));
Filler<N-1>::fill(r, values, wrappers);
}
};
template<int ...>
struct seq { };
template<int N, int ...S>
struct gens : gens<N-1, N-1, S...> { };
template<int ...S>
struct gens<0, S...> {
typedef seq<S...> type;
};
template<class ValueType, class Functor, class Tuple, int ...S>
ValueType call_functor(Functor f, Tuple t, seq<S...>) {
return f(std::get<S>(t) ...);
}
template <class ValueType, class Functor, typename ... T>
ValueType call_functor(Functor f, std::tuple<T...>& t)
{
return call_functor<ValueType>(f, t, typename gens<sizeof...(T)>::type());
}
template<>
struct Filler<0>
{
template <class Reader, class Value_tuple, class PLY_property_tuple>
static void fill(Reader& r, Value_tuple& values, PLY_property_tuple wrappers)
{
get_value(r, std::get<0>(values), std::get<2>(wrappers));
}
};
template <typename OutputValueType,
typename PropertyMap,
typename Constructor,
typename ... T>
void process_properties (PLY_reader& reader, OutputValueType& new_element,
std::tuple<PropertyMap, Constructor, PLY_property<T>...>& current)
{
typedef typename PropertyMap::value_type PmapValueType;
std::tuple<T...> values;
Filler<sizeof...(T)-1>::fill(reader, values, current);
PmapValueType new_value = call_functor<PmapValueType>(std::get<1>(current), values);
put (std::get<0>(current), new_element, new_value);
}
template <typename OutputValueType,
typename PropertyMap,
typename Constructor,
typename ... T,
typename NextPropertyBinder,
typename ... PropertyMapBinders>
void process_properties (PLY_reader& reader, OutputValueType& new_element,
std::tuple<PropertyMap, Constructor, PLY_property<T>...>& current,
NextPropertyBinder& next,
PropertyMapBinders&& ... properties)
{
typedef typename PropertyMap::value_type PmapValueType;
std::tuple<T...> values;
Filler<sizeof...(T)-1>::fill(reader, values, current);
PmapValueType new_value = call_functor<PmapValueType>(std::get<1>(current), values);
put (std::get<0>(current), new_element, new_value);
process_properties (reader, new_element, next, properties...);
}
template <typename OutputValueType, typename PropertyMap, typename T>
void process_properties (PLY_reader& reader, OutputValueType& new_element,
std::pair<PropertyMap, PLY_property<T> >& current)
{
T new_value = T();
reader.assign (new_value, current.second.name);
put (current.first, new_element, new_value);
}
template <typename OutputValueType, typename PropertyMap, typename T,
typename NextPropertyBinder, typename ... PropertyMapBinders>
void process_properties (PLY_reader& reader, OutputValueType& new_element,
std::pair<PropertyMap, PLY_property<T> >& current,
NextPropertyBinder& next,
PropertyMapBinders&& ... properties)
{
T new_value = T();
reader.assign (new_value, current.second.name);
put (current.first, new_element, new_value);
process_properties (reader, new_element, next, properties...);
}
} // namespace PLY
} // namespace internal
/// \endcond
//===================================================================================
/// \ingroup PkgPointSetProcessingIOPly
/// Reads user-selected points properties from a .ply stream (ASCII or
/// binary).
/// Potential additional point properties and faces are ignored.
///
/// Properties are handled through a variadic list of property
/// handlers. A `PropertyHandler` can either be:
///
/// - A `std::pair<PropertyMap, PLY_property<T> >` if the user wants
/// to read a %PLY property as a scalar value T (for example, storing
/// an `int` %PLY property into an `int` variable).
///
/// - A `std::tuple<PropertyMap, Constructor,
/// PLY_property<T>...>` if the user wants to use one or several PLY
/// properties to construct a complex object (for example, storing 3
/// `uchar` %PLY properties into a %Color object that can for example
/// be a `CGAL::cpp11::array<unsigned char, 3>`). In that case, the
/// second element of the tuple should be a functor that constructs
/// the value type of `PropertyMap` from N objects of types `T`.
///
/// @sa `make_ply_point_reader()`
/// @sa `make_ply_normal_reader()`
///
/// @cgalRequiresCPP11
///
/// @tparam OutputIteratorValueType type of objects that can be put in `OutputIterator`.
/// It is default to `value_type_traits<OutputIterator>::%type` and can be omitted when the default is fine.
/// @tparam OutputIterator iterator over output points.
/// @tparam PropertyHandler handlers to recover properties.
///
/// @return `true` on success.
// This variant requires all parameters.
//-----------------------------------------------------------------------------------
template <typename OutputIteratorValueType,
typename OutputIterator,
typename ... PropertyHandler>
bool read_ply_points_with_properties (std::istream& stream,
OutputIterator output,
PropertyHandler&& ... properties)
{
typedef typename value_type_traits<OutputIterator>::type OutputValueType;
if(!stream)
{
std::cerr << "Error: cannot open file" << std::endl;
return false;
}
internal::PLY::PLY_reader reader;
if (!(reader.init (stream)))
return false;
std::size_t points_read = 0;
while (!(stream.eof()) && points_read < reader.m_nb_points)
{
for (std::size_t i = 0; i < reader.readers().size (); ++ i)
reader.readers()[i]->get (stream);
OutputValueType new_element;
internal::PLY::process_properties (reader, new_element, properties...);
*(output ++) = new_element;
++ points_read;
}
// Skip remaining lines
return (points_read == reader.m_nb_points);
}
/// \cond SKIP_IN_MANUAL
template <typename OutputIterator,
typename ... PropertyHandler>
bool read_ply_points_with_properties (std::istream& stream,
OutputIterator output,
PropertyHandler&& ... properties)
{
typedef typename value_type_traits<OutputIterator>::type OutputValueType;
return read_ply_points_with_properties<OutputValueType>
(stream, output, properties...);
}
/// \endcond
//===================================================================================
/// \ingroup PkgPointSetProcessingIOPly
/// Reads points (positions + normals, if available) from a .ply
/// stream (ASCII or binary).
/// Potential additional point properties and faces are ignored.
///
/// @tparam OutputIteratorValueType type of objects that can be put in `OutputIterator`.
/// It is default to `value_type_traits<OutputIterator>::%type` and can be omitted when the default is fine.
/// @tparam OutputIterator iterator over output points.
/// @tparam PointPMap is a model of `WritablePropertyMap` with value type `CGAL::Point_3`.
/// @tparam NormalPMap is a model of `WritablePropertyMap` with value type `CGAL::Vector_3`.
///
/// @return `true` on success.
///
/// @cgalRequiresCPP11
// This variant requires all parameters.
//-----------------------------------------------------------------------------------
template < typename OutputIteratorValueType,
typename OutputIterator,
typename PointPMap,
typename NormalPMap >
bool read_ply_points_and_normals(std::istream& stream, ///< input stream.
OutputIterator output, ///< output iterator over points.
PointPMap point_pmap, ///< property map: value_type of OutputIterator -> Point_3.
NormalPMap normal_pmap) ///< property map: value_type of OutputIterator -> Vector_3.
{
return read_ply_points_with_properties (stream, output,
make_ply_point_reader (point_pmap),
make_ply_normal_reader (normal_pmap));
}
/// @cond SKIP_IN_MANUAL
template < typename OutputIterator,
typename PointPMap,
typename NormalPMap >
bool read_ply_points_and_normals(std::istream& stream, ///< input stream.
OutputIterator output, ///< output iterator over points.
PointPMap point_pmap, ///< property map: value_type of OutputIterator -> Point_3.
NormalPMap normal_pmap) ///< property map: value_type of OutputIterator -> Vector_3.
{
// just deduce value_type of OutputIterator
return read_ply_points_and_normals
<typename value_type_traits<OutputIterator>::type>(stream,
output,
point_pmap,
normal_pmap);
}
//-----------------------------------------------------------------------------------
/// @endcond
/// @cond SKIP_IN_MANUAL
// This variant creates a default point property map = Identity_property_map.
//-----------------------------------------------------------------------------------
template < typename OutputIteratorValueType,
typename OutputIterator,
typename NormalPMap >
bool read_ply_points_and_normals(std::istream& stream, ///< input stream.
OutputIterator output, ///< output iterator over points.
NormalPMap normal_pmap) ///< property map: value_type of OutputIterator -> Vector_3.
{
return read_ply_points_and_normals
<OutputIteratorValueType>(stream,
output,
make_identity_property_map(OutputIteratorValueType()),
normal_pmap);
}
template < typename OutputIterator,
typename NormalPMap >
bool read_ply_points_and_normals(std::istream& stream, ///< input stream.
OutputIterator output, ///< output iterator over points.
NormalPMap normal_pmap) ///< property map: value_type of OutputIterator -> Vector_3.
{
// just deduce value_type of OutputIterator
return read_ply_points_and_normals
<typename value_type_traits<OutputIterator>::type>(stream,
output,
normal_pmap);
}
//-----------------------------------------------------------------------------------
/// @endcond
//===================================================================================
/// \ingroup PkgPointSetProcessingIOPly
/// Reads points (position only) from a .ply stream (ASCII or binary).
/// Potential additional point properties (including normals) and faces are ignored.
///
/// @tparam OutputIteratorValueType type of objects that can be put in `OutputIterator`.
/// It is default to `value_type_traits<OutputIterator>::%type` and can be omitted when the default is fine.
/// @tparam OutputIterator iterator over output points.
/// @tparam PointPMap is a model of `WritablePropertyMap` with value_type `CGAL::Point_3`.
/// It can be omitted if the value type of `OutputIterator` is convertible to `CGAL::Point_3`.
///
/// @return `true` on success.
///
/// @cgalRequiresCPP11
// This variant requires all parameters.
//-----------------------------------------------------------------------------------
template < typename OutputIteratorValueType,
typename OutputIterator,
typename PointPMap >
bool read_ply_points(std::istream& stream, ///< input stream.
OutputIterator output, ///< output iterator over points.
PointPMap point_pmap) ///< property map: value_type of OutputIterator -> Point_3.
{
return read_ply_points_with_properties (stream, output,
make_ply_point_reader (point_pmap));
}
/// @cond SKIP_IN_MANUAL
template < typename OutputIterator,
typename PointPMap >
bool read_ply_points(std::istream& stream, ///< input stream.
OutputIterator output, ///< output iterator over points.
PointPMap point_pmap) ///< property map: value_type of OutputIterator -> Point_3.
{
// just deduce value_type of OutputIterator
return read_ply_points
<typename value_type_traits<OutputIterator>::type>(stream,
output,
point_pmap);
}
//-----------------------------------------------------------------------------------
/// @endcond
/// @cond SKIP_IN_MANUAL
// This variant creates a default point property map = Identity_property_map.
//-----------------------------------------------------------------------------------
template < typename OutputIteratorValueType,
typename OutputIterator >
bool read_ply_points(std::istream& stream, ///< input stream.
OutputIterator output) ///< output iterator over points.
{
return read_ply_points
<OutputIteratorValueType>(stream,
output,
make_identity_property_map(OutputIteratorValueType())
);
}
template < typename OutputIterator>
bool read_ply_points(std::istream& stream, ///< input stream.
OutputIterator output) ///< output iterator over points.
{
// just deduce value_type of OutputIterator
return read_ply_points
<typename value_type_traits<OutputIterator>::type>(stream,
output);
}
//-----------------------------------------------------------------------------------
/// @endcond
} //namespace CGAL
#endif // CGAL_READ_PLY_POINTS_H
|
#include <Kernel.h>
#include <Shell.h>
bool OnLoad(CWindow *win, CEvent *evt) {
Logger(" OnLoad(%X, %X)\n", win, evt);
if (!win || !evt) return false;
bool status = true;
if (win->CallBack) status = win->CallBack(win, evt);
return status;
}
|
import authReducer from './authReducer';
import projectReducer from './projectReducer';
import { combineReducers } from 'redux';
import { firestoreReducer } from 'redux-firestore';
import { firebaseReducer } from 'react-redux-firebase';
const rootReducer = combineReducers({
auth: authReducer,
project: projectReducer,
firestore: firestoreReducer,
firebase: firebaseReducer,
});
export default rootReducer;
|
/*
This file is part of Ext JS 4.2
Copyright (c) 2011-2013 Sencha Inc
Contact: http://www.sencha.com/contact
GNU General Public License Usage
This file may be used under the terms of the GNU General Public License version 3.0 as
published by the Free Software Foundation and appearing in the file LICENSE included in the
packaging of this file.
Please review the following information to ensure the GNU General Public License version 3.0
requirements will be met: http://www.gnu.org/copyleft/gpl.html.
If you are unsure which license is appropriate for your use, please contact the sales department
at http://www.sencha.com/contact.
Build date: 2013-05-16 14:36:50 (f9be68accb407158ba2b1be2c226a6ce1f649314)
*/
/**
* @private
* A cache of View elements keyed using the index of the associated record in the store.
*
* This implements the methods of {Ext.dom.CompositeElement} which are used by {@link Ext.view.AbstractView}
* to privide a map of record nodes and methods to manipulate the nodes.
*/
Ext.define('Ext.view.NodeCache', {
constructor: function(view) {
this.view = view;
this.clear();
this.el = new Ext.dom.AbstractElement.Fly();
},
/**
* Removes all elements from this NodeCache.
* @param {Boolean} [removeDom] True to also remove the elements from the document.
*/
clear: function(removeDom) {
var me = this,
elements = this.elements,
i, el;
if (removeDom) {
for (i in elements) {
el = elements[i];
el.parentNode.removeChild(el);
}
}
me.elements = {};
me.count = me.startIndex = 0;
me.endIndex = -1;
},
/**
* Clears this NodeCache and adds the elements passed.
* @param {HTMLElement[]} els An array of DOM elements from which to fill this NodeCache.
* @return {Ext.view.NodeCache} this
*/
fill: function(newElements, startIndex) {
var me = this,
elements = me.elements = {},
i,
len = newElements.length;
if (!startIndex) {
startIndex = 0;
}
for (i = 0; i < len; i++) {
elements[startIndex + i] = newElements[i];
}
me.startIndex = startIndex;
me.endIndex = startIndex + len - 1;
me.count = len;
return this;
},
insert: function(insertPoint, nodes) {
var me = this,
elements = me.elements,
i,
nodeCount = nodes.length;
// If not inserting into empty cache, validate, and possibly shuffle.
if (me.count) {
//<debug>
if (insertPoint > me.endIndex + 1 || insertPoint + nodes.length - 1 < me.startIndex) {
Ext.Error.raise('Discontiguous range would result from inserting ' + nodes.length + ' nodes at ' + insertPoint);
}
//</debug>
// Move following nodes forwards by <nodeCount> positions
if (insertPoint < me.count) {
for (i = me.endIndex + nodeCount; i >= insertPoint + nodeCount; i--) {
elements[i] = elements[i - nodeCount];
elements[i].setAttribute('data-recordIndex', i);
}
}
me.endIndex = me.endIndex + nodeCount;
}
// Empty cache. set up counters
else {
me.startIndex = insertPoint;
me.endIndex = insertPoint + nodeCount - 1;
}
// Insert new nodes into place
for (i = 0; i < nodeCount; i++, insertPoint++) {
elements[insertPoint] = nodes[i];
elements[insertPoint].setAttribute('data-recordIndex', insertPoint);
}
me.count += nodeCount;
},
item: function(index, asDom) {
var el = this.elements[index],
result = null;
if (el) {
result = asDom ? this.elements[index] : this.el.attach(this.elements[index]);
}
return result;
},
first: function(asDom) {
return this.item(this.startIndex, asDom);
},
last: function(asDom) {
return this.item(this.endIndex, asDom);
},
getCount : function() {
return this.count;
},
slice: function(start, end) {
var elements = this.elements,
result = [],
i;
if (arguments.length < 2) {
end = this.endIndex;
} else {
end = Math.min(this.endIndex, end - 1);
}
for (i = start||this.startIndex; i <= end; i++) {
result.push(elements[i]);
}
return result;
},
/**
* Replaces the specified element with the passed element.
* @param {String/HTMLElement/Ext.Element/Number} el The id of an element, the Element itself, the index of the
* element in this composite to replace.
* @param {String/Ext.Element} replacement The id of an element or the Element itself.
* @param {Boolean} [domReplace] True to remove and replace the element in the document too.
*/
replaceElement: function(el, replacement, domReplace) {
var elements = this.elements,
index = (typeof el === 'number') ? el : this.indexOf(el);
if (index > -1) {
replacement = Ext.getDom(replacement);
if (domReplace) {
el = elements[index];
el.parentNode.insertBefore(replacement, el);
Ext.removeNode(el);
replacement.setAttribute('data-recordIndex', index);
}
this.elements[index] = replacement;
}
return this;
},
/**
* Find the index of the passed element within the composite collection.
* @param {String/HTMLElement/Ext.Element/Number} el The id of an element, or an Ext.dom.Element, or an HtmlElement
* to find within the composite collection.
* @return {Number} The index of the passed Ext.dom.Element in the composite collection, or -1 if not found.
*/
indexOf: function(el) {
var elements = this.elements,
index;
el = Ext.getDom(el);
for (index = this.startIndex; index <= this.endIndex; index++) {
if (elements[index] === el) {
return index;
}
}
return -1;
},
removeRange: function(start, end, removeDom) {
var me = this,
elements = me.elements,
el,
i, removeCount, fromPos;
if (end === undefined) {
end = me.count;
} else {
end = Math.min(me.endIndex + 1, end + 1);
}
if (!start) {
start = 0;
}
removeCount = end - start;
for (i = start, fromPos = end; i < me.endIndex; i++, fromPos++) {
// Within removal range and we are removing from DOM
if (removeDom && i < end) {
Ext.removeNode(elements[i]);
}
// If the from position is occupied, shuffle that entry back into reference "i"
if (fromPos <= me.endIndex) {
el = elements[i] = elements[fromPos];
el.setAttribute('data-recordIndex', i);
}
// The from position has walked off the end, so delete reference "i"
else {
delete elements[i];
}
}
me.count -= removeCount;
me.endIndex -= removeCount;
},
/**
* Removes the specified element(s).
* @param {String/HTMLElement/Ext.Element/Number} el The id of an element, the Element itself, the index of the
* element in this composite or an array of any of those.
* @param {Boolean} [removeDom] True to also remove the element from the document
*/
removeElement: function(keys, removeDom) {
var me = this,
inKeys,
key,
elements = me.elements,
el,
deleteCount,
keyIndex = 0, index,
fromIndex;
// Sort the keys into ascending order so that we can iterate through the elements
// collection, and delete items encountered in the keys array as we encounter them.
if (Ext.isArray(keys)) {
inKeys = keys;
keys = [];
deleteCount = inKeys.length;
for (keyIndex = 0; keyIndex < deleteCount; keyIndex++) {
key = inKeys[keyIndex];
if (typeof key !== 'number') {
key = me.indexOf(key);
}
// Could be asked to remove data above the start, or below the end of rendered zone in a buffer rendered view
// So only collect keys which are within our range
if (key >= me.startIndex && key <= me.endIndex) {
keys[keys.length] = key;
}
}
Ext.Array.sort(keys);
deleteCount = keys.length;
} else {
// Could be asked to remove data above the start, or below the end of rendered zone in a buffer rendered view
if (keys < me.startIndex || keys > me.endIndex) {
return;
}
deleteCount = 1;
keys = [keys];
}
// Iterate through elements starting at the element referenced by the first deletion key.
// We also start off and index zero in the keys to delete array.
for (index = fromIndex = keys[0], keyIndex = 0; index <= me.endIndex; index++, fromIndex++) {
// If the current index matches the next key in the delete keys array, this
// entry is being deleted, so increment the fromIndex to skip it.
// Advance to next entry in keys array.
if (keyIndex < deleteCount && index === keys[keyIndex]) {
fromIndex++;
keyIndex++;
if (removeDom) {
Ext.removeNode(elements[index]);
}
}
// Shuffle entries forward of the delete range back into contiguity.
if (fromIndex <= me.endIndex && fromIndex >= me.startIndex) {
el = elements[index] = elements[fromIndex];
el.setAttribute('data-recordIndex', index);
} else {
delete elements[index];
}
}
me.endIndex -= deleteCount;
me.count -= deleteCount;
},
/**
* Appends/prepends records depending on direction flag
* @param {Ext.data.Model[]} newRecords Items to append/prepend
* @param {Number} direction `-1' = scroll up, `0` = scroll down.
* @param {Number} removeCount The number of records to remove from the end. if scrolling
* down, rows are removed from the top and the new rows are added at the bottom.
*/
scroll: function(newRecords, direction, removeCount) {
var me = this,
elements = me.elements,
recCount = newRecords.length,
i, el, removeEnd,
newNodes,
nodeContainer = me.view.getNodeContainer(),
frag = document.createDocumentFragment();
// Scrolling up (content moved down - new content needed at top, remove from bottom)
if (direction == -1) {
for (i = (me.endIndex - removeCount) + 1; i <= me.endIndex; i++) {
el = elements[i];
delete elements[i];
el.parentNode.removeChild(el);
}
me.endIndex -= removeCount;
// grab all nodes rendered, not just the data rows
newNodes = me.view.bufferRender(newRecords, me.startIndex -= recCount);
for (i = 0; i < recCount; i++) {
elements[me.startIndex + i] = newNodes[i];
frag.appendChild(newNodes[i]);
}
nodeContainer.insertBefore(frag, nodeContainer.firstChild);
}
// Scrolling down (content moved up - new content needed at bottom, remove from top)
else {
removeEnd = me.startIndex + removeCount;
for (i = me.startIndex; i < removeEnd; i++) {
el = elements[i];
delete elements[i];
el.parentNode.removeChild(el);
}
me.startIndex = i;
// grab all nodes rendered, not just the data rows
newNodes = me.view.bufferRender(newRecords, me.endIndex + 1);
for (i = 0; i < recCount; i++) {
elements[me.endIndex += 1] = newNodes[i];
frag.appendChild(newNodes[i]);
}
nodeContainer.appendChild(frag);
}
// Keep count consistent.
me.count = me.endIndex - me.startIndex + 1;
}
});
|
const ruleUpdateUtil = require('scripts/ruleUpdateUtil')
const updateUtil = require('scripts/updateUtil')
const loadingHint = "检查规则/脚本更新..."
const sw = $device.info.screen.width
let pm = function (method) {
return new Promise((resolve, reject) => {
method({
handler: res => {
resolve(res)
}
})
})
}
function renderTodayUI(bid) {
let isLauncher = bid === 'app.cyan.jsbox.ghost'
let checks = [pm(ruleUpdateUtil.getGitHubFilesSha), pm(updateUtil.getLatestVersion)]
Promise.all(checks).then(res => {
let canUpdate = ruleUpdateUtil.checkUpdate(ruleUpdateUtil.getFilesSha(), res[0])
let newVersion = updateUtil.needUpdate(res[1], updateUtil.getCurVersion())
$("newTag").hidden = !canUpdate
$("newVersionTag").hidden = !newVersion
return canUpdate ? pm(ruleUpdateUtil.getLatestCommitMessage) : Promise.resolve()
}).then(res => {
$("updateStatus").text = res? res.commit.message : ""
})
$ui.render({
props: {
id: "todayMainView",
title: "Surge3规则生成",
frame: $rect(0, 0, sw, 110),
hideNavbar: true,
navBarHidden: true,
bgcolor: $color("clear"),
},
views: [{
type: "blur",
props: {
id: "close",
style: 1,
radius: 0,
hidden: !isLauncher
},
layout: (make, view) => {
make.width.height.equalTo(view.super).offset(10)
make.top.equalTo(view.super.top).offset(-10)
},
events: {
tapped: sender => {
$app.close(0.3)
}
}
}, {
type: "view",
props: {
id: "",
},
layout: (make, view) => {
make.height.equalTo(110)
make.width.equalTo(view.super).offset(0)
make.center.equalTo(view.super)
},
views: [{
type: "label",
props: {
id: "updateStatus",
text: "Rules-lhie1 by Fndroid",
font: $font(12),
textColor: $rgba(50, 50, 50, .3)
},
layout: (make, view) => {
make.top.equalTo(view.super.top).offset(5)
make.centerX.equalTo(view.super)
}
}, {
type: "label",
props: {
id: "updateStatus",
text: loadingHint,
font: $font(12),
textColor: $rgba(50, 50, 50, .5)
},
layout: (make, view) => {
make.bottom.equalTo(view.super.bottom).offset(-5)
make.centerX.equalTo(view.super)
}
}, {
type: "image",
props: {
id: "pullBtn",
data: $file.read("assets/today_pull.png"),
radius: 25,
bgcolor: $rgba(255, 255, 255, 0)
},
layout: (make, view) => {
make.width.height.equalTo(55)
make.centerY.equalTo(view.super).offset(-10)
make.centerX.equalTo(view.super)
},
events: {
tapped: sender => {
$app.openURL("jsbox://run?name=Rules-lhie1&auto=1")
}
},
}, {
type: "image",
props: {
id: "surgeBtn",
data: $file.read("assets/today_surge.png"),
radius: 25,
bgcolor: $rgba(255, 255, 255, 0)
},
layout: (make, view) => {
make.width.height.equalTo(55)
make.centerY.equalTo(view.super).offset(-10)
make.right.equalTo(view.prev.left).offset(- (sw / 9))
},
events: {
tapped: sender => {
$app.openURL("surge3:///toggle?autoclose=true")
}
}
}, {
type: "image",
props: {
id: "jsboxBtn",
data: $file.read("assets/today_jsbox.png"),
radius: 25,
bgcolor: $rgba(255, 255, 255, 0)
},
layout: (make, view) => {
make.width.height.equalTo(50)
make.centerY.equalTo(view.super).offset(-10)
make.left.equalTo(view.prev.prev.right).offset((sw / 9))
},
events: {
tapped: sender => {
$app.openURL("jsbox://run?name=Rules-lhie1")
}
}
}, {
type: "label",
props: {
text: "更新规则",
font: $font(12),
textColor: $rgba(50, 50, 50, .8),
align: $align.center
},
layout: (make, view) => {
make.height.equalTo(10)
make.top.equalTo($("pullBtn").bottom)
make.width.equalTo($("pullBtn").width)
make.centerX.equalTo($("pullBtn"))
}
}, {
type: "label",
props: {
text: "Surge开关",
font: $font(12),
textColor: $rgba(50, 50, 50, .8),
align: $align.center
},
layout: (make, view) => {
make.height.equalTo(10)
make.top.equalTo(view.prev.top)
make.width.equalTo($("pullBtn").width)
make.centerX.equalTo($("surgeBtn"))
}
}, {
type: "label",
props: {
text: "脚本设置",
font: $font(12),
textColor: $rgba(50, 50, 50, .8),
align: $align.center
},
layout: (make, view) => {
make.height.equalTo(10)
make.top.equalTo($("pullBtn").bottom)
make.width.equalTo($("pullBtn").width)
make.centerX.equalTo($("jsboxBtn"))
}
}, {
type: "image",
props: {
id: "newTag",
data: $file.read("assets/new_rules_tag.png"),
bgcolor: $rgba(255, 255, 255, 0),
hidden: true
},
layout: (make, view) => {
make.width.height.equalTo(15)
make.centerY.equalTo(view.super).offset(-20)
make.left.equalTo($("pullBtn").right).offset(-10)
}
}, {
type: "image",
props: {
id: "newVersionTag",
data: $file.read("assets/new_version_tag.png"),
bgcolor: $rgba(255, 255, 255, 0),
hidden: true
},
layout: (make, view) => {
make.width.height.equalTo(15)
make.centerY.equalTo(view.super).offset(-20)
make.left.equalTo($("jsboxBtn").right).offset(-10)
}
}, {
type: "image",
props: {
id: "closeBtn",
data: $file.read("assets/close_icon.png"),
bgcolor: $rgba(255, 255, 255, 0),
hidden: !isLauncher,
alpha: 0.7
},
layout: (make, view) => {
make.width.height.equalTo(20)
make.top.equalTo(view.super.top).offset(10)
make.right.equalTo(view.super.right).offset(-10)
},
events: {
tapped: sender => {
$app.close(.2)
}
}
}]
}]
})
}
module.exports = {
renderTodayUI: renderTodayUI
}
|
from setuptools import setup, find_packages
setup(
name='CovidVoting',
version='1.0',
description='This is the setup for CovidVoting Module',
author='Chiaoya Chang, Yanyan Guan. Lindsey Ulmer',
author_email='chiaoya@uw.edu',
LICENSE = "MIT",
url='https://github.com/lindseyulmer/Voting-COVID',
packages=find_packages(),
package_dir={'CovidVoting': 'CovidVoting'},
package_data={'CovidVoting': ['data/*']},
requires = ["pandas", "NumPy", "geopandas", "bokeh"]
)
|