hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
debc1fbbc2265a759c75b70c32551eb82dc91688 | 449 | py | Python | setup.py | ZenMX/bert4keras | 8ffb46a16a79f87aa8cdf045df7994036b4be47d | [
"Apache-2.0"
] | 1 | 2020-08-24T13:42:49.000Z | 2020-08-24T13:42:49.000Z | setup.py | ZenMX/bert4keras | 8ffb46a16a79f87aa8cdf045df7994036b4be47d | [
"Apache-2.0"
] | null | null | null | setup.py | ZenMX/bert4keras | 8ffb46a16a79f87aa8cdf045df7994036b4be47d | [
"Apache-2.0"
] | 1 | 2021-01-07T12:00:39.000Z | 2021-01-07T12:00:39.000Z | #! -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='bert4keras',
version='0.8.5',
description='an elegant bert4keras',
long_description='bert4keras: https://github.com/bojone/bert4keras',
license='Apache License 2.0',
url='https://github.com/bojone/bert4keras',
author='bojone',
author_email='bojone@spaces.ac.cn',
install_requires=['keras<=2.3.1'],
packages=find_packages()
)
| 26.411765 | 72 | 0.674833 |
561922a62fe84f59f2163817321c2ffe8e0e93a9 | 119 | py | Python | producto/admin.py | JohanVasquez/crud-venta-libre | 557f82b5d88c42480020a65cc6034348ff20efce | [
"MIT"
] | null | null | null | producto/admin.py | JohanVasquez/crud-venta-libre | 557f82b5d88c42480020a65cc6034348ff20efce | [
"MIT"
] | null | null | null | producto/admin.py | JohanVasquez/crud-venta-libre | 557f82b5d88c42480020a65cc6034348ff20efce | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Ventas
admin.site.register(Ventas)
| 17 | 32 | 0.798319 |
c2582a5089aa76a4898cf61bc983b8eedce5f624 | 2,352 | py | Python | tests/test_site/settings.py | tredzko/python-django | 0e39eacc2740be5d9620482119c3e9d41bc77ec8 | [
"BSD-3-Clause"
] | 103 | 2016-07-21T10:11:43.000Z | 2021-11-15T09:52:49.000Z | tests/test_site/settings.py | tredzko/python-django | 0e39eacc2740be5d9620482119c3e9d41bc77ec8 | [
"BSD-3-Clause"
] | 51 | 2016-11-28T20:01:47.000Z | 2022-01-27T15:49:17.000Z | tests/test_site/settings.py | tredzko/python-django | 0e39eacc2740be5d9620482119c3e9d41bc77ec8 | [
"BSD-3-Clause"
] | 64 | 2016-11-28T14:08:06.000Z | 2021-12-11T11:16:50.000Z | """
Django settings for opentracing_test_site project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import sys
import django_opentracing
import opentracing
from opentracing.mocktracer import MockTracer
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tracing_is_fun!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django_opentracing.OpenTracingMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
MIDDLEWARE = MIDDLEWARE_CLASSES
ROOT_URLCONF = 'test_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_site.wsgi.application'
# OpenTracing settings
OPENTRACING_TRACING = django_opentracing.DjangoTracing(MockTracer())
OPENTRACING_TRACED_ATTRIBUTES = ['META', 'FAKE_ATTRIBUTE']
| 29.037037 | 71 | 0.728741 |
bb4aeb9eb590ac8c74c30568d3fd8b51b26d49f6 | 22,019 | py | Python | pyani/pyani_orm.py | widdowquinn/pyani | 14a96f30ccebef64d8945e9abf58dc2517ee478b | [
"MIT"
] | 144 | 2015-02-16T11:34:28.000Z | 2022-03-17T09:03:52.000Z | pyani/pyani_orm.py | HuttonICS/pyani | 14a96f30ccebef64d8945e9abf58dc2517ee478b | [
"MIT"
] | 331 | 2015-06-04T16:25:53.000Z | 2022-03-31T05:24:11.000Z | pyani/pyani_orm.py | HuttonICS/pyani | 14a96f30ccebef64d8945e9abf58dc2517ee478b | [
"MIT"
] | 61 | 2015-06-03T00:28:19.000Z | 2022-02-15T08:47:57.000Z | # -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2018-2019
# (c) The University of Strathclyde 2019-2020
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@strath.ac.uk
#
# Leighton Pritchard,
# Strathclyde Institute of Pharmacy and Biomedical Sciences
# University of Strathclyde
# 161 Cathedral Street
# Glasgow
# Scotland,
# G4 0RE
# UK
#
# The MIT License
#
# Copyright (c) 2018-2019 The James Hutton Institute
# Copyright (c) 2019-2020 The University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module providing useful functions for manipulating pyani's SQLite3 db.
This SQLAlchemy-based ORM replaces the previous SQL-based module
"""
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple
import numpy as np # type: ignore
import pandas as pd # type: ignore
from sqlalchemy import and_ # type: ignore
from sqlalchemy import UniqueConstraint, create_engine, Table
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Float, Boolean
from sqlalchemy.ext.declarative import declarative_base # type: ignore
from sqlalchemy.orm import relationship, sessionmaker # type: ignore
from pyani import PyaniException
from pyani.pyani_files import (
get_fasta_and_hash_paths,
load_classes_labels,
read_fasta_description,
read_hash_string,
)
from pyani.pyani_tools import get_genome_length
class PyaniORMException(PyaniException):
"""Exception raised when ORM or database interaction fails."""
# Using the declarative system
# We follow Flask-like naming conventions, so override some of the pyline errors
# Mypy doesn't like dynamic base classes, see https://github.com/python/mypy/issues/2477
Base = declarative_base() # type: Any
Session = sessionmaker() # pylint: disable=C0103
# Linker table between genomes and runs tables
rungenome = Table( # pylint: disable=C0103
"runs_genomes",
Base.metadata,
Column("genome_id", Integer, ForeignKey("genomes.genome_id")),
Column("run_id", Integer, ForeignKey("runs.run_id")),
)
# Linker table between comparisons and runs tables
runcomparison = Table( # pylint: disable=C0103
"runs_comparisons",
Base.metadata,
Column("comparison_id", Integer, ForeignKey("comparisons.comparison_id")),
Column("run_id", Integer, ForeignKey("runs.run_id")),
)
# Convenience struct for labels and classes
class LabelTuple(NamedTuple):
"""Label and Class for each file."""
label: str
class_label: str
class Label(Base):
"""Describes relationship between genome, run and genome label.
Each genome and run combination can be assigned a single label
"""
__tablename__ = "labels"
label_id = Column(Integer, primary_key=True)
genome_id = Column(Integer, ForeignKey("genomes.genome_id"))
run_id = Column(Integer, ForeignKey("runs.run_id"))
label = Column(String)
class_label = Column(String)
genome = relationship("Genome", back_populates="labels")
run = relationship("Run", back_populates="labels")
def __str__(self) -> str:
"""Return string representation of Label table row."""
return str(
"Genome ID: {}, Run ID: {}, Label ID: {}, Label: {}, Class: {}".format(
self.genome_id, self.run_id, self.label_id, self.label, self.class_label
)
)
def __repr__(self) -> str:
"""Return string representation of Label table object."""
return "<Label(key=({}, {}, {}))>".format(
self.label_id, self.run_id, self.genome_id
)
class BlastDB(Base):
"""Describes relationship between genome, run, source BLAST database and query fragments.
Each genome and run combination can be assigned a single BLAST database
for the comparisons
- fragpath path to fragmented genome (query in ANIb)
- dbpath path to source genome database (subject in ANIb)
- fragsizes JSONified dict of fragment sizes
- dbcmd command used to generate database
"""
__tablename__ = "blastdbs"
blastdb_id = Column(Integer, primary_key=True)
genome_id = Column(Integer, ForeignKey("genomes.genome_id"))
run_id = Column(Integer, ForeignKey("runs.run_id"))
fragpath = Column(String)
dbpath = Column(String)
fragsizes = Column(String)
dbcmd = Column(String)
genome = relationship("Genome", back_populates="blastdbs")
run = relationship("Run", back_populates="blastdbs")
def __str__(self) -> str:
"""Return string representation of BlastDB table row."""
return str(
"BlastDB: {}, Run ID: {}, Label ID: {}, Label: {}, Class: {}".format(
self.genome_id, self.run_id, self.label_id, self.label, self.class_label
)
)
def __repr__(self) -> str:
"""Return string representation of BlastDB table object."""
return "<BlastDB(key=({}, {}, {}))>".format(
self.label_id, self.run_id, self.genome_id
)
class Genome(Base):
"""Describes an input genome for a pyani run.
- genome_id
primary key
- genome_hash
MD5 hash of input genome file (in ``path``)
- path
path to FASTA genome file
- length
length of genome (total bases)
- description
genome description
"""
__tablename__ = "genomes"
__table_args__ = (UniqueConstraint("genome_hash"),)
genome_id = Column(Integer, primary_key=True)
genome_hash = Column(String)
path = Column(String)
length = Column(Integer)
description = Column(String)
labels = relationship("Label", back_populates="genome", lazy="dynamic")
blastdbs = relationship("BlastDB", back_populates="genome", lazy="dynamic")
runs = relationship(
"Run", secondary=rungenome, back_populates="genomes", lazy="dynamic"
)
query_comparisons = relationship(
"Comparison",
back_populates="query",
primaryjoin="Genome.genome_id == Comparison.query_id",
)
subject_comparisons = relationship(
"Comparison",
back_populates="subject",
primaryjoin="Genome.genome_id == Comparison.subject_id",
)
def __str__(self) -> str:
"""Return string representation of Genome table row."""
return str("Genome {}: {}".format(self.genome_id, self.description))
def __repr__(self) -> str:
"""Return string representation of Genome table object."""
return "<Genome(id='{}',desc='{}')>".format(self.genome_id, self.description)
class Run(Base):
"""Describes a single pyani run."""
__tablename__ = "runs"
run_id = Column(Integer, primary_key=True)
method = Column(String)
cmdline = Column(String)
date = Column(DateTime)
status = Column(String)
name = Column(String)
df_identity = Column(String) # JSON-encoded Pandas dataframe
df_coverage = Column(String) # JSON-encoded Pandas dataframe
df_alnlength = Column(String) # JSON-encoded Pandas dataframe
df_simerrors = Column(String) # JSON-encoded Pandas dataframe
df_hadamard = Column(String) # JSON-encoded Pandas dataframe
genomes = relationship(
"Genome", secondary=rungenome, back_populates="runs", lazy="dynamic"
)
comparisons = relationship(
"Comparison", secondary=runcomparison, back_populates="runs", lazy="dynamic"
)
labels = relationship("Label", back_populates="run", lazy="dynamic")
blastdbs = relationship("BlastDB", back_populates="run", lazy="dynamic")
def __str__(self) -> str:
"""Return string representation of Run table row."""
return str("Run {}: {} ({})".format(self.run_id, self.name, self.date))
def __repr__(self) -> str:
"""Return string representation of Run table object."""
return "<Run(run_id={})>".format(self.run_id)
class Comparison(Base):
"""Describes a single pairwise comparison between two genomes."""
__tablename__ = "comparisons"
__table_args__ = (
UniqueConstraint(
"query_id", "subject_id", "program", "version", "fragsize", "maxmatch"
),
)
comparison_id = Column(Integer, primary_key=True)
query_id = Column(Integer, ForeignKey("genomes.genome_id"), nullable=False)
subject_id = Column(Integer, ForeignKey("genomes.genome_id"), nullable=False)
aln_length = Column(Integer)
sim_errs = Column(Integer)
identity = Column(Float)
cov_query = Column(Float)
cov_subject = Column(Float)
program = Column(String)
version = Column(String)
fragsize = Column(Integer)
maxmatch = Column(Boolean)
query = relationship(
"Genome", foreign_keys=[query_id], back_populates="query_comparisons"
)
subject = relationship(
"Genome", foreign_keys=[subject_id], back_populates="subject_comparisons"
)
runs = relationship(
"Run", secondary=runcomparison, back_populates="comparisons", lazy="dynamic"
)
def __str__(self) -> str:
"""Return string representation of Comparison table row."""
return str(
"Query: {}, Subject: {}, %%ID={}, ({} {})".format(
self.query_id,
self.subject_id,
self.identity,
self.program,
self.version,
)
)
def __repr__(self) -> str:
"""Return string representation of Comparison table object."""
return "<Comparison(comparison_id={})>".format(self.comparison_id)
def create_db(dbpath: Path) -> None:
"""Create an empty pyani SQLite3 database at the passed path.
:param dbpath: path to pyani database
"""
engine = create_engine("sqlite:///{}".format(dbpath), echo=False)
Base.metadata.create_all(engine)
def get_session(dbpath: Path) -> Any:
"""Connect to an existing pyani SQLite3 database and return a session.
:param dbpath: path to pyani database
"""
engine = create_engine("sqlite:///{}".format(dbpath), echo=False)
Session.configure(bind=engine)
return Session()
def get_comparison_dict(session: Any) -> Dict[Tuple, Any]:
"""Return a dictionary of comparisons in the session database.
:param session: live SQLAlchemy session of pyani database
Returns Comparison objects, keyed by (_.query_id, _.subject_id,
_.program, _.version, _.fragsize, _.maxmatch) tuple
"""
return {
(_.query_id, _.subject_id, _.program, _.version, _.fragsize, _.maxmatch): _
for _ in session.query(Comparison).all()
}
def get_matrix_labels_for_run(session: Any, run_id: int) -> Dict:
"""Return dictionary of genome labels, keyed by row/column ID.
:param session: live SQLAlchemy session
:param run_id: the Run.run_id value for matrices
The labels should be valid for identity, coverage and other complete
matrix results accessed via the .df_* attributes of a run.
Labels are returned keyed by the string of the genome ID, for compatibility with
matplotlib.
"""
results = (
session.query(Genome.genome_id, Label.label)
.join(rungenome, Run)
.join(
Label, and_(Genome.genome_id == Label.genome_id, Run.run_id == Label.run_id)
)
.filter(Run.run_id == run_id)
.all()
)
return {str(_.genome_id): _.label for _ in results}
def get_matrix_classes_for_run(session: Any, run_id: int) -> Dict[str, List]:
"""Return dictionary of genome classes, keyed by row/column ID.
:param session: live SQLAlchemy session
:param run_id: the Run.run_id value for matrices
The class labels should be valid for identity, coverage and other complete
matrix results accessed via the .df_* attributes of a run
Labels are returned keyed by the string of the genome ID, for compatibility with
matplotlib.
"""
results = (
session.query(Genome.genome_id, Label.class_label)
.join(rungenome, Run)
.join(
Label, and_(Genome.genome_id == Label.genome_id, Run.run_id == Label.run_id)
)
.filter(Run.run_id == run_id)
.all()
)
return {str(_.genome_id): _.class_label for _ in results}
def filter_existing_comparisons(
session,
run,
comparisons,
program,
version,
fragsize: Optional[int] = None,
maxmatch: Optional[bool] = None,
) -> List:
"""Filter list of (Genome, Genome) comparisons for those not in the session db.
:param session: live SQLAlchemy session of pyani database
:param run: Run object describing parent pyani run
:param comparisons: list of (Genome, Genome) query vs subject comparisons
:param program: program used for comparison
:param version: version of program for comparison
:param fragsize: fragment size for BLAST databases
:param maxmatch: maxmatch used with nucmer comparison
When passed a list of (Genome, Genome) comparisons as comparisons, check whether
the comparison exists in the database and, if so, associate it with the passed run.
If not, then add the (Genome, Genome) pair to a list for returning as the
comparisons that still need to be run.
"""
existing_comparisons = get_comparison_dict(session)
comparisons_to_run = []
for (qgenome, sgenome) in comparisons:
try:
# Associate run with existing comparisons
run.comparisons.append(
existing_comparisons[
(
qgenome.genome_id,
sgenome.genome_id,
program,
version,
fragsize,
maxmatch,
)
]
)
session.commit()
except KeyError:
comparisons_to_run.append((qgenome, sgenome))
return comparisons_to_run
def add_run(session, method, cmdline, date, status, name):
"""Create a new Run and add it to the session.
:param session: live SQLAlchemy session of pyani database
:param method: string describing analysis run type
:param cmdline: string describing pyani command-line for run
:param date: datetime object describing analysis start time
:param status: string describing status of analysis
:param name: string - name given to the analysis run
Creates a new Run object with the passed parameters, and returns it.
"""
try:
run = Run(method=method, cmdline=cmdline, date=date, status=status, name=name)
except Exception:
raise PyaniORMException(
f"Could not create {method} run with command line: {cmdline}"
)
try:
session.add(run)
session.commit()
except Exception:
raise PyaniORMException(f"Could not add run {run} to the database")
return run
def add_run_genomes(
session, run, indir: Path, classpath: Path, labelpath: Path
) -> List:
"""Add genomes for a run to the database.
:param session: live SQLAlchemy session of pyani database
:param run: Run object describing the parent pyani run
:param indir: path to the directory containing genomes
:param classpath: path to the file containing class information for each genome
:param labelpath: path to the file containing class information for each genome
This function expects a single directory (indir) containing all FASTA files
for a run, and optional paths to plain text files that contain information
on class and label strings for each genome.
If the genome already exists in the database, then a Genome object is recovered
from the database. Otherwise, a new Genome object is created. All Genome objects
will be associated with the passed Run object.
The session changes are committed once all genomes and labels are added to the
database without error, as a single transaction.
"""
# Get list of genome files and paths to class and labels files
infiles = get_fasta_and_hash_paths(indir) # paired FASTA/hash files
class_data = {} # type: Dict[str,str]
label_data = {} # type: Dict[str,str]
all_keys = [] # type: List[str]
if classpath:
class_data = load_classes_labels(classpath)
all_keys += list(class_data.keys())
if labelpath:
label_data = load_classes_labels(labelpath)
all_keys += list(label_data.keys())
# Make dictionary of labels and/or classes
new_keys = set(all_keys)
label_dict = {} # type: Dict
for key in new_keys:
label_dict[key] = LabelTuple(label_data[key] or "", class_data[key] or "")
# Get hash and sequence description for each FASTA/hash pair, and add
# to current session database
genome_ids = []
for fastafile, hashfile in infiles:
try:
inhash, _ = read_hash_string(hashfile)
indesc = read_fasta_description(fastafile)
except Exception:
raise PyaniORMException("Could not read genome files for database import")
abspath = fastafile.absolute()
genome_len = get_genome_length(abspath)
# If the genome is not already in the database, add it as a Genome object
genome = session.query(Genome).filter(Genome.genome_hash == inhash).first()
if not isinstance(genome, Genome):
try:
genome = Genome(
genome_hash=inhash,
path=str(abspath),
length=genome_len,
description=indesc,
)
session.add(genome)
except Exception:
raise PyaniORMException(f"Could not add genome {genome} to database")
# Associate this genome with the current run
try:
genome.runs.append(run)
except Exception:
raise PyaniORMException(
f"Could not associate genome {genome} with run {run}"
)
# If there's an associated class or label for the genome, add it
if inhash in label_dict:
try:
session.add(
Label(
genome=genome,
run=run,
label=label_dict[inhash].label,
class_label=label_dict[inhash].class_label,
)
)
except Exception:
raise PyaniORMException(
f"Could not add labels for {genome} to database."
)
genome_ids.append(genome.genome_id)
try:
session.commit()
except Exception:
raise PyaniORMException("Could not commit new genomes in database.")
return genome_ids
def update_comparison_matrices(session, run) -> None:
"""Update the Run table with summary matrices for the analysis.
:param session: active pyanidb session via ORM
:param run: Run ORM object for the current ANIm run
"""
# Create dataframes for storing in the Run table
# Rows and columns are the (ordered) list of genome IDs
genome_ids = sorted([_.genome_id for _ in run.genomes.all()])
df_identity = pd.DataFrame(index=genome_ids, columns=genome_ids, dtype=float)
df_coverage = pd.DataFrame(index=genome_ids, columns=genome_ids, dtype=float)
df_alnlength = pd.DataFrame(index=genome_ids, columns=genome_ids, dtype=float)
df_simerrors = pd.DataFrame(index=genome_ids, columns=genome_ids, dtype=float)
df_hadamard = pd.DataFrame(index=genome_ids, columns=genome_ids, dtype=float)
# Set appropriate diagonals for each matrix
np.fill_diagonal(df_identity.values, 1.0)
np.fill_diagonal(df_coverage.values, 1.0)
np.fill_diagonal(df_simerrors.values, 1.0)
np.fill_diagonal(df_hadamard.values, 1.0)
for genome in run.genomes.all():
df_alnlength.loc[genome.genome_id, genome.genome_id] = genome.length
# Loop over all comparisons for the run and fill in result matrices
for cmp in run.comparisons.all():
qid, sid = cmp.query_id, cmp.subject_id
df_identity.loc[qid, sid] = cmp.identity
df_identity.loc[sid, qid] = cmp.identity
df_coverage.loc[qid, sid] = cmp.cov_query
df_coverage.loc[sid, qid] = cmp.cov_subject
df_alnlength.loc[qid, sid] = cmp.aln_length
df_alnlength.loc[sid, qid] = cmp.aln_length
df_simerrors.loc[qid, sid] = cmp.sim_errs
df_simerrors.loc[sid, qid] = cmp.sim_errs
df_hadamard.loc[qid, sid] = cmp.identity * cmp.cov_query
df_hadamard.loc[sid, qid] = cmp.identity * cmp.cov_subject
# Add matrices to the database
run.df_identity = df_identity.to_json()
run.df_coverage = df_coverage.to_json()
run.df_alnlength = df_alnlength.to_json()
run.df_simerrors = df_simerrors.to_json()
run.df_hadamard = df_hadamard.to_json()
session.commit()
| 35.978758 | 93 | 0.660793 |
bc3846533f2b5a9312e69cffa6859ba819e0236f | 2,758 | py | Python | model_zoo/wide_and_deep/eval.py | wudenggang/mindspore | 95e75c3119909cc5c7c3098232851d1d7bc4ef8c | [
"Apache-2.0"
] | null | null | null | model_zoo/wide_and_deep/eval.py | wudenggang/mindspore | 95e75c3119909cc5c7c3098232851d1d7bc4ef8c | [
"Apache-2.0"
] | null | null | null | model_zoo/wide_and_deep/eval.py | wudenggang/mindspore | 95e75c3119909cc5c7c3098232851d1d7bc4ef8c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_training """
import os
from mindspore import Model, context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.wide_and_deep import PredictWithSigmoid, TrainStepWrap, NetWithLossClass, WideDeepModel
from src.callbacks import LossCallBack, EvalCallBack
from src.datasets import create_dataset
from src.metrics import AUCMetric
from src.config import WideDeepConfig
def get_WideDeep_net(config):
"""
Get network of wide&deep model.
"""
WideDeep_net = WideDeepModel(config)
loss_net = NetWithLossClass(WideDeep_net, config)
train_net = TrainStepWrap(loss_net)
eval_net = PredictWithSigmoid(WideDeep_net)
return train_net, eval_net
class ModelBuilder():
"""
Wide and deep model builder
"""
def __init__(self):
pass
def get_hook(self):
pass
def get_train_hook(self):
hooks = []
callback = LossCallBack()
hooks.append(callback)
if int(os.getenv('DEVICE_ID')) == 0:
pass
return hooks
def get_net(self, config):
return get_WideDeep_net(config)
def test_eval(config):
"""
test evaluate
"""
data_path = config.data_path
batch_size = config.batch_size
ds_eval = create_dataset(data_path, train_mode=False, epochs=2,
batch_size=batch_size)
print("ds_eval.size: {}".format(ds_eval.get_dataset_size()))
net_builder = ModelBuilder()
train_net, eval_net = net_builder.get_net(config)
param_dict = load_checkpoint(config.ckpt_path)
load_param_into_net(eval_net, param_dict)
auc_metric = AUCMetric()
model = Model(train_net, eval_network=eval_net, metrics={"auc": auc_metric})
eval_callback = EvalCallBack(model, ds_eval, auc_metric, config)
model.eval(ds_eval, callbacks=eval_callback)
if __name__ == "__main__":
widedeep_config = WideDeepConfig()
widedeep_config.argparse_init()
context.set_context(mode=context.GRAPH_MODE, device_target=widedeep_config.device_target)
test_eval(widedeep_config)
| 28.729167 | 96 | 0.70087 |
4caed4f5599e3832a7b674f99427c51f03a36c31 | 73,714 | py | Python | django/db/models/fields/related.py | adambrenecki/django | 28a571348bca9c5a3c137e495e7d3c9349a5bd56 | [
"BSD-3-Clause"
] | 1 | 2020-02-08T11:04:08.000Z | 2020-02-08T11:04:08.000Z | django/db/models/fields/related.py | adambrenecki/django | 28a571348bca9c5a3c137e495e7d3c9349a5bd56 | [
"BSD-3-Clause"
] | null | null | null | django/db/models/fields/related.py | adambrenecki/django | 28a571348bca9c5a3c137e495e7d3c9349a5bd56 | [
"BSD-3-Clause"
] | null | null | null | from operator import attrgetter
from django.db import connection, connections, router
from django.db.backends import util
from django.db.models import signals
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject, PathInfo
from django.db.models.query import QuerySet
from django.db.models.deletion import CASCADE
from django.utils.encoding import smart_text
from django.utils import six
from django.utils.deprecation import RenameMethodsBase
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry, cached_property
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = cls._meta.app_cache.get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
cls._meta.app_cache.pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in sender._meta.app_cache.pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
class RelatedField(Field):
def db_type(self, connection):
'''By default related field will not have a column
as it relates columns to another table'''
return None
def contribute_to_class(self, cls, name, virtual_only=False):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name, virtual_only=virtual_only)
if not cls._meta.abstract and self.rel.related_name:
related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.rel.related_name = related_name
other = self.rel.to
if isinstance(other, six.string_types) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.set_field_name()
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_query_name or self.rel.related_name or self.opts.model_name
class RenameRelatedObjectDescriptorMethods(RenameMethodsBase):
renamed_methods = (
('get_query_set', 'get_queryset', DeprecationWarning),
('get_prefetch_query_set', 'get_prefetch_queryset', DeprecationWarning),
)
class SingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **db_hints):
db = router.db_for_read(self.related.model, **db_hints)
return self.related.model._base_manager.using(db)
def get_prefetch_queryset(self, instances):
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
query = {'%s__in' % self.related.field.name: instances}
qs = self.get_queryset(instance=instances[0]).filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return qs, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
params = {}
for lh_field, rh_field in self.related.field.related_fields:
params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname)
try:
rel_obj = self.get_queryset(instance=instance).get(**params)
except self.related.model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.related.model.DoesNotExist("%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()))
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null is False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
if None in related_pk:
raise ValueError('Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, instance._meta.object_name))
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **db_hints):
db = router.db_for_read(self.field.rel.to, **db_hints)
rel_mgr = self.field.rel.to._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if getattr(rel_mgr, 'use_for_related_fields', False):
return rel_mgr.using(db)
else:
return QuerySet(self.field.rel.to).using(db)
def get_prefetch_queryset(self, instances):
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
query = {'%s__in' % self.field.related_query_name(): instances}
qs = self.get_queryset(instance=instances[0]).filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.rel.multiple:
rel_obj_cache_name = self.field.related.get_cache_name()
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return qs, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
params = dict(
(rh_field.attname, getattr(instance, lh_field.attname))
for lh_field, rh_field in self.field.related_fields)
qs = self.get_queryset(instance=instance)
extra_filter = self.field.get_extra_descriptor_filter(instance)
if isinstance(extra_filter, dict):
params.update(extra_filter)
qs = qs.filter(**params)
else:
qs = qs.filter(extra_filter, **params)
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.rel.multiple:
setattr(rel_obj, self.field.related.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.field.rel.to.DoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name))
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.related.get_cache_name(), None)
# Set the value of the related field
for lh_field, rh_field in self.field.related_fields:
try:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
except AttributeError:
setattr(instance, lh_field.attname, None)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.related.get_cache_name(), instance)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's default
# manager.
superclass = self.related.model._default_manager.__class__
rel_field = self.related.field
rel_model = self.related.model
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.core_filters = {'%s__exact' % rel_field.name: instance}
self.model = rel_model
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
qs = super(RelatedManager, self).get_queryset().using(db).filter(**self.core_filters)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
for field in rel_field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances):
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
db = self._db or router.db_for_read(self.model, instance=instances[0])
query = {'%s__in' % rel_field.name: instances}
qs = super(RelatedManager, self).get_queryset().using(db).filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return qs, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
setattr(obj, rel_field.name, self.instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = rel_field.get_foreign_related_value(self.instance)
for obj in objs:
# Is obj actually part of this descriptor set?
if rel_field.get_local_related_value(obj) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
remove.alters_data = True
def clear(self):
self.update(**{rel_field.name: None})
clear.alters_data = True
return RelatedManager
def create_many_related_manager(superclass, rel):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
source_field_name=None, target_field_name=None, reverse=False,
through=None, prefetch_cache_name=None):
super(ManyRelatedManager, self).__init__()
self.model = model
self.query_field_name = query_field_name
source_field = through._meta.get_field(source_field_name)
source_related_fields = source_field.related_fields
self.core_filters = {}
for lh_field, rh_field in source_related_fields:
self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
self.instance = instance
self.symmetrical = symmetrical
self.source_field = source_field
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.reverse = reverse
self.through = through
self.prefetch_cache_name = prefetch_cache_name
self.related_val = source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)
return super(ManyRelatedManager, self).get_queryset().using(db)._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances):
instance = instances[0]
db = self._db or router.db_for_read(instance.__class__, instance=instance)
query = {'%s__in' % self.query_field_name: instances}
qs = super(ManyRelatedManager, self).get_queryset().using(db)._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[db]
qn = connection.ops.quote_name
qs = qs.extra(select=dict(
('_prefetch_related_val_%s' % f.attname,
'%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields))
return (qs,
lambda result: tuple(getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields),
lambda inst: tuple(getattr(inst, f.attname) for f in fk.foreign_related_fields),
False,
self.prefetch_cache_name)
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if rel.through._meta.auto_created:
def add(self, *objs):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_field_name, self.source_field_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_field_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_field_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db))
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError('Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name))
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
# Work out what DB we're operating on
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
# Remove the specified objects from the join table
self.through._default_manager.using(db).filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: old_ids
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
def _clear_items(self, source_field_name):
db = router.db_for_write(self.through, instance=self.instance)
# source_field_name: the PK colname in join table for the source object
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
self.through._default_manager.using(db).filter(**{
source_field_name: self.related_val
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related
# model's default manager.
return create_many_related_manager(
self.related.model._default_manager.__class__,
self.related.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_model = self.related.model
manager = self.related_manager_cls(
model=rel_model,
query_field_name=self.related.field.name,
prefetch_cache_name=self.related.field.related_query_name(),
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True,
through=self.related.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's
# default manager.
return create_many_related_manager(
self.field.rel.to._default_manager.__class__,
self.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
manager = self.related_manager_cls(
model=self.field.rel.to,
query_field_name=self.field.related_query_name(),
prefetch_cache_name=self.field.name,
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False,
through=self.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
# clear() can change expected output of 'value' queryset, we force evaluation
# of queryset before clear; ticket #19816
value = tuple(value)
manager.clear()
manager.add(*value)
class ForeignObjectRel(object):
def __init__(self, field, to, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.field = field
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
class ManyToOneRel(ForeignObjectRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(ManyToOneRel, self).__init__(
field, to, related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.field_name = field_name
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
def set_field_name(self):
self.field_name = self.field_name or self.to._meta.pk.name
class OneToOneRel(ManyToOneRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(OneToOneRel, self).__init__(field, to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name,
)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None, db_constraint=True, related_query_name=None):
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
self.db_constraint = db_constraint
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignObject(RelatedField):
requires_unique_target = True
generate_reverse_relation = True
def __init__(self, to, from_fields, to_fields, **kwargs):
self.from_fields = from_fields
self.to_fields = to_fields
if 'rel' not in kwargs:
kwargs['rel'] = ForeignObjectRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
super(ForeignObject, self).__init__(**kwargs)
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.rel.to, six.string_types):
raise ValueError('Related model %r cannot been resolved' % self.rel.to)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field_by_name(from_field_name)[0])
to_field = (self.rel.to._meta.pk if to_field_name is None
else self.rel.to._meta.get_field_by_name(to_field_name)[0])
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
opts = instance._meta
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if not possible_parent_link or possible_parent_link.primary_key:
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Returns an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Returns a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(qn, connection) method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.rel.to._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookup_type,
raw_value):
from django.db.models.sql.where import SubqueryConstraint, Constraint, AND, OR
root_constraint = constraint_class()
assert len(targets) == len(sources)
def get_normalized_value(value):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
for source in sources:
# Account for one-to-one relations when sent a different model
while not isinstance(value, source.model) and source.rel:
source = source.rel.to._meta.get_field(source.rel.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
elif not isinstance(value, tuple):
return (value,)
return value
is_multicolumn = len(self.related_fields) > 1
if (hasattr(raw_value, '_as_sql') or
hasattr(raw_value, 'get_compiler')):
root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets],
[source.name for source in sources], raw_value),
AND)
elif lookup_type == 'isnull':
root_constraint.add(
(Constraint(alias, targets[0].column, targets[0]), lookup_type, raw_value), AND)
elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte']
and not is_multicolumn)):
value = get_normalized_value(raw_value)
for index, source in enumerate(sources):
root_constraint.add(
(Constraint(alias, targets[index].column, sources[index]), lookup_type,
value[index]), AND)
elif lookup_type in ['range', 'in'] and not is_multicolumn:
values = [get_normalized_value(value) for value in raw_value]
value = [val[0] for val in values]
root_constraint.add(
(Constraint(alias, targets[0].column, sources[0]), lookup_type, value), AND)
elif lookup_type == 'in':
values = [get_normalized_value(value) for value in raw_value]
for value in values:
value_constraint = constraint_class()
for index, target in enumerate(targets):
value_constraint.add(
(Constraint(alias, target.column, sources[index]), 'exact', value[index]),
AND)
root_constraint.add(value_constraint, OR)
else:
raise TypeError('Related Field got invalid lookup: %s' % lookup_type)
return root_constraint
@property
def attnames(self):
return tuple(field.attname for field in self.local_related_fields)
def get_defaults(self):
return tuple(field.get_default() for field in self.local_related_fields)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
class ForeignKey(ForeignObject):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel,
db_constraint=True, **kwargs):
try:
to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.db_constraint = db_constraint
kwargs['rel'] = rel_class(
self, to, to_field,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(ForeignKey, self).deconstruct()
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
if self.rel.on_delete is not CASCADE:
kwargs['on_delete'] = self.rel.on_delete
# Rel needs more work.
if self.rel.field_name:
kwargs['to_field'] = self.rel.field_name
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
return name, path, args, kwargs
@property
def related_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.rel.limit_choices_to)
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'model': self.rel.to._meta.verbose_name, 'pk': value},
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.related_field.name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.related_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value == '' or value is None:
return None
else:
return self.related_field.get_db_prep_save(value,
connection=connection)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.rel.to, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.rel.to))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.related_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OneToOneField, self).deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, six.string_types):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.model_name
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'app_cache': field.model._meta.app_cache,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint),
to: models.ForeignKey(to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint)
})
class ManyToManyField(RelatedField):
description = _("Many-to-many relationship")
def __init__(self, to, db_constraint=True, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
# Class names must be ASCII in Python 2.x, so we forcibly coerce it here to break early if there's a problem.
to = str(to)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
db_constraint=db_constraint,
)
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(ManyToManyField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(ManyToManyField, self).deconstruct()
# Handle the simpler arguments
if self.rel.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
if "help_text" in kwargs:
del kwargs['help_text']
# Rel needs more work.
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct an indirect m2m traversal.
"""
pathinfos = []
int_model = self.rel.through
linkfield1 = int_model._meta.get_field_by_name(self.m2m_field_name())[0]
linkfield2 = int_model._meta.get_field_by_name(self.m2m_reverse_field_name())[0]
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return util.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.model:
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
else:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, six.string_types):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
| 46.772843 | 228 | 0.630369 |
dc190f9b6feda41e3b9eaac02f5051d0598853fc | 2,357 | py | Python | a2c_ppo_acktr/algo/sog/block_coordinate_search.py | ArashVahabpour/sog-gail | 90ebdc5a051a015f3b6c801d4b16307d2fbac0ae | [
"MIT"
] | null | null | null | a2c_ppo_acktr/algo/sog/block_coordinate_search.py | ArashVahabpour/sog-gail | 90ebdc5a051a015f3b6c801d4b16307d2fbac0ae | [
"MIT"
] | null | null | null | a2c_ppo_acktr/algo/sog/block_coordinate_search.py | ArashVahabpour/sog-gail | 90ebdc5a051a015f3b6c801d4b16307d2fbac0ae | [
"MIT"
] | null | null | null | import torch
import itertools
from .base_search import BaseSearch
class BlockCoordinateSearch(BaseSearch):
"""
Block coordinate grid search optimizer over the distribution of points
in the latent space.
"""
def __init__(self, actor_critic, args):
super().__init__(actor_critic, args)
self.block_size = args.block_size
self.n_rounds = args.n_rounds
def _sample(self, old_z, block_idx):
"""
Takes the best codes and perturbs
Take old optimum code and repeat code 'latent_batch_size' times
Then sample 'block_size' blocks from a normal distribution
Args:
old_z: batch_size x n_latent
Returns:
new_z: batch_size x latent_batch_size x n_latent
"""
new_z = old_z.unsqueeze(1).repeat(1, self.latent_batch_size, 1)
new_z[:, :, block_idx * self.block_size:(block_idx + 1) * self.block_size].normal_()
return new_z
def resolve_latent_code(self, state, action):
"""
Find the loss between the optimal fake data and the real data.
Args:
state: batch_size x dim_1 x ... x dim_kx
action: batch_size x dim_1 x ... x dim_ky
Returns:
best_z: batch_size x n_latent
"""
batch_size = action.shape[0] # to accommodate for the end of the dataset when batch size might change
if self.shared:
# the first dimension is later on replicated among all batch elements
best_z = torch.zeros(1, self.latent_dim, device=self.device)
else:
best_z = torch.zeros(batch_size, self.latent_dim, device=self.device)
# Go back over the latent vector and re-search
for round_idx, block_idx in itertools.product(range(self.n_rounds),
range(self.latent_dim // self.block_size)):
new_z = self._sample(best_z, block_idx)
if self.shared:
# batch_size x latent_batch_size x n_latent
new_z = new_z.expand(batch_size, -1, -1)
best_z = self.search_iter(new_z, actions=action, states=state)[0][None]
if self.shared:
# batch_size x latent_batch_size x n_latent
new_z = new_z.expand(batch_size, -1, -1)
return best_z
| 36.261538 | 110 | 0.615613 |
402dd9173e8abfcceba800118850eec309d23372 | 1,233 | py | Python | example.py | tjddn2615/GazeTracking | 60ef5f5797cb9a61f42bb0257800b2de0de40bce | [
"MIT"
] | 4 | 2020-08-19T10:24:45.000Z | 2020-08-19T13:42:27.000Z | example.py | tjddn2615/GazeTracking | 60ef5f5797cb9a61f42bb0257800b2de0de40bce | [
"MIT"
] | null | null | null | example.py | tjddn2615/GazeTracking | 60ef5f5797cb9a61f42bb0257800b2de0de40bce | [
"MIT"
] | null | null | null | """
Demonstration of the GazeTracking library.
Check the README.md for complete documentation.
"""
import cv2
from gaze_tracking import GazeTracking
gaze = GazeTracking()
webcam = cv2.VideoCapture(0)
while True:
# We get a new frame from the webcam
_, frame = webcam.read()
# We send this frame to GazeTracking to analyze it
gaze.refresh(frame)
frame = gaze.annotated_frame()
text = ""
if gaze.is_blinking():
text = "Blinking"
elif gaze.is_right():
text = "Looking right"
elif gaze.is_left():
text = "Looking left"
elif gaze.is_up():
text = "Looking up"
elif gaze.is_down():
text = "Looking down"
elif gaze.is_center():
text = "Looking center"
cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
cv2.putText(frame, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.imshow("Demo", frame)
if cv2.waitKey(1) == 27:
break
| 25.6875 | 117 | 0.635036 |
3fb67869634239a005f645848740820bee35f6b4 | 11,331 | py | Python | pybind/nos/v7_1_0/interface/gigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/dad/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/interface/gigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/dad/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/interface/gigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/dad/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class dad(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/gigabitethernet/ipv6/ipv6-nd-ra/ipv6-intf-cmds/nd/dad. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__attempts','__time',)
_yang_name = 'dad'
_rest_name = 'dad'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__attempts = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..10']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2), is_leaf=True, yang_name="attempts", rest_name="attempts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'number of Neighbor solicitations to send as part of duplicate address detection'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='uint32', is_config=True)
self.__time = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'gigabitethernet', u'ipv6', u'ipv6-nd-ra', u'ipv6-intf-cmds', u'nd', u'dad']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'GigabitEthernet', u'ipv6', u'nd', u'dad']
def _get_attempts(self):
"""
Getter method for attempts, mapped from YANG variable /interface/gigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/dad/attempts (uint32)
"""
return self.__attempts
def _set_attempts(self, v, load=False):
"""
Setter method for attempts, mapped from YANG variable /interface/gigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/dad/attempts (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_attempts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_attempts() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..10']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2), is_leaf=True, yang_name="attempts", rest_name="attempts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'number of Neighbor solicitations to send as part of duplicate address detection'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """attempts must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..10']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2), is_leaf=True, yang_name="attempts", rest_name="attempts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'number of Neighbor solicitations to send as part of duplicate address detection'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='uint32', is_config=True)""",
})
self.__attempts = t
if hasattr(self, '_set'):
self._set()
def _unset_attempts(self):
self.__attempts = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..10']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2), is_leaf=True, yang_name="attempts", rest_name="attempts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'number of Neighbor solicitations to send as part of duplicate address detection'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='uint32', is_config=True)
def _get_time(self):
"""
Getter method for time, mapped from YANG variable /interface/gigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/dad/time (common-def:time-interval-sec)
"""
return self.__time
def _set_time(self, v, load=False):
"""
Setter method for time, mapped from YANG variable /interface/gigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/dad/time (common-def:time-interval-sec)
If this variable is read-only (config: false) in the
source YANG file, then _set_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_time() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """time must be of a type compatible with common-def:time-interval-sec""",
'defined-type': "common-def:time-interval-sec",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)""",
})
self.__time = t
if hasattr(self, '_set'):
self._set()
def _unset_time(self):
self.__time = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)
attempts = __builtin__.property(_get_attempts, _set_attempts)
time = __builtin__.property(_get_time, _set_time)
_pyangbind_elements = {'attempts': attempts, 'time': time, }
| 71.71519 | 789 | 0.72915 |
961398d791e512712f8fcf023ccab17c8cbaa299 | 120 | py | Python | mmdet/datasets/samplers/__init__.py | Qianna00/InstanceLoc | de6bf95f482c04f3b9af4434feff6a38646e0a87 | [
"Apache-2.0"
] | 120 | 2021-02-16T12:06:05.000Z | 2022-03-30T03:38:37.000Z | mmdet/datasets/samplers/__init__.py | Qianna00/InstanceLoc | de6bf95f482c04f3b9af4434feff6a38646e0a87 | [
"Apache-2.0"
] | 19 | 2021-02-22T12:52:31.000Z | 2022-03-07T12:04:03.000Z | mmdet/datasets/samplers/__init__.py | Qianna00/InstanceLoc | de6bf95f482c04f3b9af4434feff6a38646e0a87 | [
"Apache-2.0"
] | 9 | 2021-02-22T02:35:20.000Z | 2022-02-25T05:38:52.000Z | from .group_sampler import DistributedGroupSampler, GroupSampler
__all__ = ['DistributedGroupSampler', 'GroupSampler']
| 30 | 64 | 0.833333 |
f8faeacc525124fa9bfeb2eef9202fd41b303fc4 | 13,222 | py | Python | CodeMapping/CodeWrapper.py | ArielBlG/stackoverflow_mapping | 203f0da09f2190578b83b3c885c57e58e0234573 | [
"MIT"
] | null | null | null | CodeMapping/CodeWrapper.py | ArielBlG/stackoverflow_mapping | 203f0da09f2190578b83b3c885c57e58e0234573 | [
"MIT"
] | null | null | null | CodeMapping/CodeWrapper.py | ArielBlG/stackoverflow_mapping | 203f0da09f2190578b83b3c885c57e58e0234573 | [
"MIT"
] | null | null | null | import stackoverflow_java_queries
# from googlesearch import search
class Task:
def __init__(self):
"""
Task Constructor - object that holds the task attribute.
"""
self.task = None
self.key = 0
self.documentation = ""
self.code = None
self.code_changed = False
def changed_code(self):
"""
changed_code Function - indicates that code has been changed
"""
self.code_changed = True
def get_key(self):
"""
get_key Function - returns the task's key
:return:
"""
return self.key
def set_documentation(self, documentation):
"""
set_documentation - set the documentation of the task, inherit all tasks
:param documentation:
"""
documentation = documentation.replace("/**", '')
documentation = documentation.replace("*/", '')
self.documentation += documentation
def set_key(self, key):
"""
set_key Function - set the map key to the task, inherit to all tasks
:param key:
"""
self.key = key
def set_code(self, code):
"""
set_code Function - sets the task's code
:param code:
"""
if self.code is not None:
self.code_changed = True
self.code = code
# ------------------------------------------------------------------------------
class CodeWrapper(Task):
def __init__(self, query, text):
"""
Code Wrapper Constructor - query that wraps a specific code.
"""
super().__init__()
self.query = query
self.text = text
self.answer_text = None
self.sub_classes = []
self.url = None
self.methods = []
self.imports = []
self.imports_codes = []
self.tags = []
self.score = None
self.post_id = None
def __eq__(self, other):
"""
equality of two queries
:param other:
:return True is 2 queries are equal, otherwise False
"""
if self.query == other.query:
return True
return False
def set_id(self, post_id):
"""
add_id Function - adds the post id
:param post_id:
"""
self.post_id = post_id
def set_tags(self, tags):
"""
add_tags Function - adds the query tags to the map
:param tags:
"""
self.tags = tags
def set_score(self, score):
"""
add_score Function - adds the score to the map
:param score:
"""
self.score = score
def set_url(self, url):
# TODO: to fix function
self.url = url
def get_queries_class(self):
"""
get_queries_class Function - return all queries classes
:return all queries classes
"""
return self.sub_classes
def get_class(self, class_to_return):
"""
get_class Function - return a class by name
:param class_to_return:
:return class task object, None if doesn't exists
"""
for curr_class in self.sub_classes:
if curr_class.get_class_name() in stackoverflow_java_queries.primitive_types:
continue
else:
if curr_class.get_class_name() == class_to_return:
return curr_class
return None
def get_methods(self, method_name):
"""
get_methods Function - find if a method exists
:param method_name:
:return: list of methods
"""
# TODO: handle two functions from same name
return next((x for x in self.methods if x.get_method_name() == method_name), None)
def add_imports(self, _import):
"""
add_imports Function - adds code imports to the class
:param _import:
:return:
"""
self.imports.append(_import)
def add_answer_text(self, answer_text):
"""
add_answer_text Function - adds the answer text to the query
:param answer_text:
"""
self.text += answer_text
def add_methods(self, method):
"""
add_methods Function - creates methods list to find simple codes
:param method:
"""
self.methods.append(method)
def add_class(self, task):
"""
add_class Function - adds a class to the current query
:param task:
"""
self.sub_classes.append(task)
def add_imports_code(self, _import):
"""
add_imports_code Function - adds code imports codes to the class
:param _import:
"""
self.imports_codes.append(_import)
# ------------------------------------------------------------------------------
class ClassTask(Task):
def __init__(self, class_name):
"""
ClassTask constructor - builds a task from a specific class
:param class_name:
"""
super().__init__()
self.class_name = class_name
self.Attributes = []
self.Methods = []
self.Implements = []
self.Extends = None
self.Constructors = []
self.sub_classes = []
self.Enums = []
def get_class_atts_names(self):
"""
:return:
"""
att_names = []
for att in self.Attributes:
curr_att_dec = att.name
if att.att_type is not None and att.att_type.class_name is not None:
curr_att_dec = att.att_type.class_name + " " + curr_att_dec
att_names.append(curr_att_dec)
return att_names
def add_sub_class(self, sub_class):
"""
add_sub_class Function - adds a sub class
:param sub_class:
"""
self.sub_classes.append(sub_class)
def add_class_enums(self, enum):
"""
add_class_enums Function - adds the class's enums declarations
:param enum:
"""
self.Enums.append(enum)
def add_implement_class(self, implement_class):
"""
add_implenent_class Function - adds a class that the Task's classes implements
:param implement_class:
"""
self.Implements.append(implement_class)
def add_extended_class(self, extended_class):
"""
add_extended_class Function - adds an extended class from the Task's classes.
:param extended_class:
"""
self.Extends = extended_class
def add_constructors(self, constructor):
"""
add_constructors Function - adds constructor of the Task's class
:param constructor:
"""
self.Constructors.append(constructor)
def add_class_methods(self, method):
"""
add_class_methods Function - adds a method to Task's classes.
:param method:
"""
self.Methods.append(method)
def add_class_attributes(self, attribute):
"""
add_class_attributes Function - add a specific attribute to the class
:param attribute:
:return:
"""
self.Attributes.append(attribute)
def get_class_object(self):
"""
get_class_object Function - returns the Task's task
:return:
"""
return self.task
def get_class_name(self):
"""
get_class_name Function
:return class's name
"""
return self.class_name
def get_class_attributes(self):
"""
get_class_attributes
:return current class attributes
"""
return self.Attributes
def get_class_method(self, method):
"""
get_class_method Function - recives a method name and returns a method task
:param method:
:return method task if recived method exists, otherwise None
"""
return next((x for x in self.Methods if x.get_method_name() == method), None)
def get_specific_attribute(self, attribute):
"""
get_specific_attribute Function - returns an attribute task from received attribute name
:param attribute:
:return attribute task if received attribute exists, otherwise None
"""
return next((x for x in self.Attributes if x.get_attribute_name() == attribute), None)
def get_all_method(self):
"""
get_all_method
:return all current Task's class methods
"""
return self.Methods
def get_constructor(self):
"""
get_constructor Function
:return first constructor
"""
if self.Constructors:
return self.Constructors[0]
return None
def __eq__(self, other):
"""
equality checker for class task
:param other:
:return True if classes are equal, otherwise False.
"""
return self.class_name == other.get_class_name()
# ------------------------------------------------------------------------------
class ClassAttribute(Task):
def __init__(self, class_task, attribute_name, att_type=None, object_type=None):
"""
ClassAttribute constructor - builds an attribute task object
:param class_task:
:param attribute_name:
"""
super().__init__()
self.class_name = class_task
self.name = attribute_name
self.att_type = att_type
self.object_type = object_type
def get_att_obj_type(self):
return self.object_type
def get_attribute_name(self):
"""
get_attribute_name
:return attribute task's name:
"""
return self.name
def get_attribute_class(self):
"""
get_attribute_class
:return attributes class:
"""
return self.class_name
def get_attribute_type(self):
return self.att_type
# ------------------------------------------------------------------------------
class MultiTypeClassAttribute(ClassAttribute):
def __init__(self, class_task, attribute_name, att_types, object_type):
"""
MultiTypeClassAttribute constructor - builds a multi types attribute task object
:param class_task:
:param attribute_name:
:param att_types:
:param object_type:
"""
super().__init__(class_task, attribute_name, object_type=object_type)
self.types = att_types
# ------------------------------------------------------------------------------
class MethodTask(Task):
def __init__(self, method_name, class_task):
"""
MethodTask constructor - builds the method task object
:param method_name:
"""
super().__init__()
self.task = class_task
self.Attributes = []
self.method_name = method_name
self.calling_methods = []
self.method_token = None
self.params = []
def add_method_calls(self, method):
"""
add_method_calls - adds a method that is called from the current method task.a
:param method:
"""
self.calling_methods.append(method)
def add_method_attributes(self, attribute):
"""
add_method_attributes Function - adds an attribute to the current class - used for invocation.
:param attribute:
"""
self.Attributes.append(attribute)
def get_method_name(self):
"""
get_method_name
:return current method's tasks name
"""
return self.method_name
def get_method_super_class(self):
"""
get_method_super_class
:return method's super class object
"""
return self.task
def get_calling_method(self):
"""
get_calling_method
:return all the methods invoked from the specific method:
"""
return self.calling_methods
def get_attribute(self, attribute):
"""
get_attribute Function - returns an attribute of the method by a received attribute name.
:return attribute task object if exists, otherwise None
"""
return next((x for x in self.Attributes if x.get_attribute_name() == attribute), None)
def find_method_call(self, method_called):
"""
find_method_call Function - checks if the received method is already invoked and added.
:param method_called:
:return method object if the received method has been called already, otherwise None.
"""
for calling in self.calling_methods:
if calling.get_method_name() == method_called:
return calling
return None
# ------------------------------------------------------------------------------
class EnumTask(Task):
def __init__(self, enum_name, task):
"""
MethodTask constructor - builds the method task object
:param method_name:
"""
super().__init__()
self.enum_name = enum_name
self.super_task = task
self.enum_consts = []
def add_enum_const(self, const):
"""
add_enum_const Function - adds the enum consts to the enum task
:param const:
"""
self.enum_consts.append(const)
| 28.373391 | 102 | 0.568447 |
df8424f222409c2cd454068f3ca5a2c343650c81 | 2,113 | py | Python | _scripts/make_committees_data.py | GCCR/GCCR.github.io | 9400e64ee1eb618bfaeaf7a1c927a04165db191b | [
"MIT"
] | 5 | 2020-03-27T20:01:18.000Z | 2021-06-06T12:41:20.000Z | _scripts/make_committees_data.py | GCCR/GCCR.github.io | 9400e64ee1eb618bfaeaf7a1c927a04165db191b | [
"MIT"
] | 39 | 2020-04-01T23:55:10.000Z | 2022-02-26T07:09:57.000Z | _scripts/make_committees_data.py | GCCR/GCCR.github.io | 9400e64ee1eb618bfaeaf7a1c927a04165db191b | [
"MIT"
] | 3 | 2020-03-29T16:06:02.000Z | 2020-04-07T19:23:37.000Z | import os
import re
import logging
import pandas as pd
from googleapiclient.discovery import build
import yaml
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
# load personal websites
with open("_data/websites.yml", "r") as f:
WEBSITES = yaml.load(f, Loader=yaml.BaseLoader)
def member_url(member):
name, *rest = member.split(" (")
name = "".join(name)
try:
url = WEBSITES[name]
except KeyError:
return member
rest = f' ({"".join(rest)}' if rest else ""
return f'<a href="{url}">{name}</a>{rest}'
# fetch Google Sheet for members data
GOOGLE_API_KEY = os.environ["GOOGLE_API_KEY"]
COMMITTEES_SPREADSHEET_ID = os.environ["COMMITTEES_SPREADSHEET_ID"]
service = build("sheets", "v4", developerKey=GOOGLE_API_KEY)
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=COMMITTEES_SPREADSHEET_ID,
range='Sheet1').execute()
values = result.get('values', [])
# to dataframe
columns = []
for col in values[0]:
*name, time = col.split()
columns.append((" ".join(name), time.capitalize()))
n_cols = len(columns)
columns = pd.MultiIndex.from_tuples(columns, names=["Committee", "Time"])
data = []
for row in values[1:]:
n = len(row)
row = [x if x else None for x in row]
padded = row + [None for _ in range(n_cols - n)]
data.append(padded)
df = pd.DataFrame(data, columns=columns)
# write yaml
content = {}
for committee in df.columns[1:].droplevel(1).drop_duplicates():
content[committee] = {}
for time in df[committee].columns:
col = (committee, time)
members = df[col].dropna().to_list()
if members:
content[committee][time] = [member_url(m) for m in members]
if not content[committee]:
content.pop(committee)
with open("_data/committees.yml", "w") as f:
for committee, items in content.items():
f.write(f"- committee: {committee}\n")
f.write(f" listing:\n")
for time, members in items.items():
f.write(f" - time: {time}\n")
f.write(f" members: {members}\n") | 32.015152 | 76 | 0.648367 |
c6055a2d2fc16d09a29485623dcb97126921071e | 415 | py | Python | frille-lang/lib/python3.6/site-packages/srsly/tests/msgpack/test_subtype.py | frillecode/CDS-spring-2021-language | a0b2116044cd20d4a34b98f23bd2663256c90c5d | [
"MIT"
] | 10 | 2021-05-31T07:18:08.000Z | 2022-03-19T09:20:11.000Z | frille-lang/lib/python3.6/site-packages/srsly/tests/msgpack/test_subtype.py | frillecode/CDS-spring-2021-language | a0b2116044cd20d4a34b98f23bd2663256c90c5d | [
"MIT"
] | 4 | 2021-06-02T00:49:27.000Z | 2022-01-13T01:59:34.000Z | frille-lang/lib/python3.6/site-packages/srsly/tests/msgpack/test_subtype.py | frillecode/CDS-spring-2021-language | a0b2116044cd20d4a34b98f23bd2663256c90c5d | [
"MIT"
] | 2 | 2021-12-09T07:23:21.000Z | 2022-03-31T06:13:10.000Z | #!/usr/bin/env python
# coding: utf-8
from ...msgpack import packb, unpackb
from collections import namedtuple
class MyList(list):
pass
class MyDict(dict):
pass
class MyTuple(tuple):
pass
MyNamedTuple = namedtuple('MyNamedTuple', 'x y')
def test_types():
assert packb(MyDict()) == packb(dict())
assert packb(MyList()) == packb(list())
assert packb(MyNamedTuple(1, 2)) == packb((1, 2))
| 18.863636 | 53 | 0.66747 |
27d756536e47782ac79ca97c25233df65996839b | 7,867 | py | Python | seamm_dashboard/__init__.py | paulsaxe/seamm_dashboard | 66049c8c58fd34af3bd143157d0138e8fb737f9b | [
"BSD-3-Clause"
] | null | null | null | seamm_dashboard/__init__.py | paulsaxe/seamm_dashboard | 66049c8c58fd34af3bd143157d0138e8fb737f9b | [
"BSD-3-Clause"
] | null | null | null | seamm_dashboard/__init__.py | paulsaxe/seamm_dashboard | 66049c8c58fd34af3bd143157d0138e8fb737f9b | [
"BSD-3-Clause"
] | null | null | null | import logging
import os
from pathlib import Path
import connexion
# from flask_admin import Admin
from flask_debugtoolbar import DebugToolbarExtension
from flask_bootstrap import Bootstrap
from flask_cors import CORS
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_authorize import Authorize
from .jwt_patch import flask_jwt_extended
from .config import config
from .template_filters import replace_empty
from .setup_logging import setup_logging
from .setup_argparsing import options, parser
# Handle versioneer
from ._version import get_versions
__author__ = """Jessica Nash"""
__email__ = "janash@vt.edu"
versions = get_versions()
__version__ = versions["version"]
__git_revision__ = versions["full-revisionid"]
del get_versions, versions
# Ensure that the projects directory exists.
datastore_path = Path(options["datastore"]).expanduser().resolve()
datastore = str(datastore_path)
projects_path = datastore_path / "projects"
projects_path.mkdir(parents=True, exist_ok=True)
# Setup the logging, now that we know where the datastore is
setup_logging(datastore, options)
logger = logging.getLogger("dashboard")
# If there is no database we need to initialize!
db_path = datastore_path / "seamm.db"
if not db_path.exists():
logger.warning("The database does not exists, so forcing initialization")
options["initialize"] = True
# Two of the Flask options cannot be reset, and should (apparently) be
# handled with environment variables ... so if they are in the options
# set the correct environment variables. Carefully!
if "env" in options:
if "FLASK_ENV" in os.environ and options["env"] != os.environ["FLASK_ENV"]:
logger.warning(
(
"The environment variable FLASK_ENV is being overidden by "
"the configuration option 'env' ({})"
).format(options["env"])
)
os.environ["FLASK_ENV"] = options["env"]
if "debug" in options:
if "FLASK_DEBUG" in os.environ and options["debug"] != os.environ["FLASK_DEBUG"]:
logger.warning(
(
"The environment variable FLASK_DEBUG is being overidden by "
"the configuration option 'debug' ({})"
).format(options["debug"])
)
os.environ["FLASK_DEBUG"] = options["debug"]
# continue the setup
mail = Mail()
cors = CORS()
bootstrap = Bootstrap()
jwt = flask_jwt_extended.JWTManager()
authorize = Authorize(current_user=flask_jwt_extended.get_current_user)
moment = Moment()
toolbar = DebugToolbarExtension()
db = SQLAlchemy()
ma = Marshmallow()
@jwt.user_lookup_loader
def user_loader_callback(jwt_header, jwt_payload):
"""Function for app, to return user object"""
if jwt_header:
from seamm_datastore.database.models import User
username = jwt_payload["sub"]["username"]
user = User.query.filter_by(username=username).one_or_none()
return user
else:
# return None / null
return None
def create_app(config_name=None):
"""Flask app factory pattern
separately creating the extensions and later initializing"""
conn_app = connexion.App(__name__, specification_dir="./")
app = conn_app.app
logger.info("")
if config_name is not None:
logger.info("Configuring from configuration " + config_name)
app.config.from_object(config[config_name])
options["initialize"] = False
options["no_check"] = True
else:
# Report where options come from
# parser = configargparse.get_argument_parser("dashboard")
logger.info("Where options are set:")
logger.info(60 * "-")
for section, tmp in parser.get_options().items():
origin = parser.get_origins(section)
for key, value in tmp.items():
logger.info(f"{key:<19} {origin[key]:<15} {value}")
# Now set the options!
logger.info("")
logger.info("Configuration:")
logger.info(60 * "-")
for key, value in options.items():
if key not in (
"env",
"debug",
"initialize",
"log_dir",
"log_level",
"console_log_level",
"dashboard_configfile",
):
key = key.upper()
if isinstance(value, str):
value = value.replace("%datastore%", datastore)
logger.info("\t{:>30s} = {}".format(key, value))
app.config[key] = value
logger.info("")
logger.info(
"Running in "
+ app.config["ENV"]
+ " mode with database "
+ app.config["SQLALCHEMY_DATABASE_URI"]
)
# Authorization configuration
app.config["AUTHORIZE_DEFAULT_PERMISSIONS"] = dict(
owner=["read", "update", "delete", "create", "manage"],
group=["read", "update"],
other=[""],
)
app.config["AUTHORIZE_ALLOW_ANONYMOUS_ACTIONS"] = True
# Set application to store JWTs in cookies.
app.config["JWT_TOKEN_LOCATION"] = ["cookies", "headers"]
# Set the cookie paths
# app.config["JWT_ACCESS_COOKIE_PATH"] = "/api"
app.config["JWT_REFRESH_COOKIE_PATH"] = "/api/auth/token/refresh"
# Cookie security
app.config["JWT_COOKIE_SECURE"] = False
app.config["JWT_COOKIE_CSRF_PROTECT"] = True
app.config["JWT_CSRF_ACCESS_PATH"] = "/api/"
conn_app.add_api("swagger.yml")
db.init_app(app)
with app.app_context():
from seamm_datastore.database.build import import_datastore, _build_initial
if options["initialize"] or config_name and config_name.lower() == "testing":
logger.warning("Removing all previous jobs from the database.")
db.drop_all()
logger.info("Tables dropped creating tables")
db.create_all()
# Create database using other interface for consistency.
logger.info("Importing jobs...")
_build_initial(db.session, "default")
if config_name is None or not config_name.lower() == "testing":
# Log in as user running
import flask_authorize.plugin
from seamm_datastore.database.models import User
flask_authorize.plugin.CURRENT_USER = User.query.filter_by(id=2).one
logger.warning("Importing any jobs into the database.")
import_datastore(db.session, str(projects_path))
flask_authorize.plugin.CURRENT_USER = flask_jwt_extended.get_current_user
from .routes.auth import auth as auth_blueprint
from .routes.main import main as main_blueprint
from .routes.jobs import jobs as jobs_blueprint
from .routes.flowcharts import flowcharts as flowchart_blueprint
from .routes.projects import projects as project_blueprint
from .routes.admin import admin as admin_blueprint
from .routes.main import errors
app.register_blueprint(auth_blueprint)
app.register_blueprint(main_blueprint)
app.register_blueprint(jobs_blueprint)
app.register_blueprint(flowchart_blueprint)
app.register_blueprint(project_blueprint)
app.register_blueprint(admin_blueprint)
app.register_error_handler(404, errors.not_found)
# init
mail.init_app(app)
cors.init_app(app)
bootstrap.init_app(app)
authorize.init_app(app)
jwt.init_app(app)
moment.init_app(app)
# jinja template
app.jinja_env.filters["empty"] = replace_empty
logger.info("")
logger.info("Final configuration:")
logger.info(60 * "-")
for key, value in app.config.items():
logger.info("\t{:>30s} = {}".format(key, value))
logger.info("")
logger.info(f"{app.url_map}")
return app
| 32.508264 | 85 | 0.66048 |
b4614af266cc48e90173a3bf20f9e343ed9f82fc | 2,816 | py | Python | scraper/proxy.py | FTJiang/Scraper-for-Stackoverflow | 7a482ab853a3b0f3092a3e41ac36395c7f89cb2e | [
"MIT"
] | 1 | 2019-01-15T07:44:38.000Z | 2019-01-15T07:44:38.000Z | scraper/proxy.py | FTJiang/Scraper-for-Stackoverflow | 7a482ab853a3b0f3092a3e41ac36395c7f89cb2e | [
"MIT"
] | null | null | null | scraper/proxy.py | FTJiang/Scraper-for-Stackoverflow | 7a482ab853a3b0f3092a3e41ac36395c7f89cb2e | [
"MIT"
] | null | null | null | import urllib2
import urllib
from bs4 import BeautifulSoup
import RandomAgent
import os
import socket
#function used to get IP, port and type of proxies from website, stored in file named prxy_ip.txt
def getProxyFile():
url = "https://hidemy.name/en/proxy-list"
#temp = "'User-Agent':"+'\''+RandomAgent.Agent()+'\''
#print temp
hdr = {}
#use randomized user-agent
User_Agent = RandomAgent.Agent()
hdr['User-Agent'] = User_Agent
req = urllib2.Request(url,headers=hdr)
#if file has exist, delete and recreate
if os.path.isfile('proxy_ip.txt'):
os.remove('proxy_ip.txt')
ip_file = open('proxy_ip.txt','w')
#try:
page = urllib2.urlopen(req).read()
#except urllib2.HTTPError,e:
# print e.code
#print page.read()
soup = BeautifulSoup(page,'html.parser')
#extract proxies from source html file
proxies = soup.findAll('tr')
ip = ''
port = ''
host = ''
for ips in proxies:
cnt = 0
for temp in ips.findAll('td'):
if cnt == 0: #IP is in the forth td tag
ip = temp.text
elif cnt == 1: #port is in the forth td tag
port = temp.text
elif cnt == 4: #type is in the forth td tag
host = temp.text
cnt+=1
cnt == 0
#combine ip, port and type to a string and write it into file
ip_temp = ip+'\t'+port+'\t'+host+'\n'
ip_file.write(ip_temp)
ip_file.close()
'''
function used to delete timeout IP from record file
there is no function in python to delete specific line, so we rewrite the whole file without the line we want to delete
'''
def delete(ip):
f = open('proxy_ip.txt','r')
lines = f.readlines()
f.close()
f = open('proxy_ip.txt','w')
for line1 in lines:
if ip not in line1:
f.write(line1)
f.close()
#function used to test the proxies from website and delete the timeout proxies from file
def viability():
socket.setdefaulttimeout(1)
f = open('proxy_ip.txt','r')
lines = f.readlines()
f.close()
proxys = []
#This is the url used to test proxies
url = 'http://ip.chinaz.com/getip.aspx'
proxies_useful = []
#test all the proxies
for i in range(0,len(lines)):
ip = lines[i].strip('\n').split('\t')
#print ip
#value passed into proxies should be a dictionary
proxy_host = ip[2].lower()+"://"+ip[0]+":"+ip[1]
proxy_temp = {ip[2].lower():proxy_host}
try:
res = urllib.urlopen(url,proxies = proxy_temp).read()
except Exception,e:
delete(ip[0])
print "IP with problem : "+str(proxy_temp)
print e
continue
except socket.error, e:
pass
#print str(len(lines))
#print "test finish"
def getProxy():
f = open('proxy_ip.txt','r')
#if proxy pool size less than 5, get new list
if os.path.getsize(proxy_ip.txt) == 5:
f.close()
getProxyFile()
viability()
lines = f.readlines()
f.close()
ip = random.choice(lines).strip('\n').split('\t')
proxy_host = ip[0]+":"+ip[1]
delete(ip[0])
return proxy_host
| 26.074074 | 119 | 0.669034 |
5fece72ccddcab69b360820607c782544be531da | 153,373 | py | Python | pandas/core/series.py | Muktan/pandas | ffa6e20d7dadd262d9035a647dffed9903fc5929 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/series.py | Muktan/pandas | ffa6e20d7dadd262d9035a647dffed9903fc5929 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/series.py | Muktan/pandas | ffa6e20d7dadd262d9035a647dffed9903fc5929 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | """
Data structure for 1-dimensional cross-sectional and time series data
"""
from __future__ import annotations
from io import StringIO
from shutil import get_terminal_size
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Hashable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import (
lib,
properties,
reshape,
tslibs,
)
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
ArrayLike,
Axis,
Dtype,
DtypeObj,
FrameOrSeriesUnion,
IndexKeyFunc,
NpDtype,
StorageOptions,
ValueKeyFunc,
)
from pandas.compat.numpy import function as nv
from pandas.errors import InvalidIndexError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
convert_dtypes,
maybe_box_native,
maybe_cast_to_extension_array,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
is_bool,
is_categorical_dtype,
is_dict_like,
is_extension_array_dtype,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
validate_all_hashable,
)
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import (
isna,
na_value_for_dtype,
notna,
remove_na_arraylike,
)
from pandas.core import (
algorithms,
base,
generic,
missing,
nanops,
ops,
)
from pandas.core.accessor import CachedAccessor
from pandas.core.apply import series_apply
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.categorical import CategoricalAccessor
from pandas.core.arrays.sparse import SparseAccessor
import pandas.core.common as com
from pandas.core.construction import (
create_series_with_explicit_dtype,
extract_array,
is_empty_data,
sanitize_array,
)
from pandas.core.generic import NDFrame
from pandas.core.indexers import (
deprecate_ndim_indexing,
unpack_1tuple,
)
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.api import (
CategoricalIndex,
Float64Index,
Index,
MultiIndex,
ensure_index,
)
import pandas.core.indexes.base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import check_bool_indexer
from pandas.core.internals import SingleBlockManager
from pandas.core.internals.construction import sanitize_index
from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import (
ensure_key_mapped,
nargsort,
)
from pandas.core.strings import StringMethods
from pandas.core.tools.datetimes import to_datetime
import pandas.io.formats.format as fmt
import pandas.plotting
if TYPE_CHECKING:
from pandas._typing import (
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
)
from pandas.core.frame import DataFrame
from pandas.core.groupby.generic import SeriesGroupBy
from pandas.core.resample import Resampler
__all__ = ["Series"]
_shared_doc_kwargs = {
"axes": "index",
"klass": "Series",
"axes_single_arg": "{0 or 'index'}",
"axis": """axis : {0 or 'index'}
Parameter needed for compatibility with DataFrame.""",
"inplace": """inplace : boolean, default False
If True, performs operation inplace and returns None.""",
"unique": "np.ndarray",
"duplicated": "Series",
"optional_by": "",
"optional_mapper": "",
"optional_labels": "",
"optional_axis": "",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
def _coerce_method(converter):
"""
Install the scalar coercion methods.
"""
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError(f"cannot convert the series to {converter}")
wrapper.__name__ = f"__{converter.__name__}__"
return wrapper
# ----------------------------------------------------------------------
# Series class
class Series(base.IndexOpsMixin, generic.NDFrame):
"""
One-dimensional ndarray with axis labels (including time series).
Labels need not be unique but must be a hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
missing data (currently represented as NaN).
Operations between Series (+, -, /, *, **) align values based on their
associated index values-- they need not be the same length. The result
index will be the sorted union of the two indexes.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series. If data is a dict, argument order is
maintained.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like
and index is None, then the keys in the data are used as the index. If the
index is not None, the resulting Series is reindexed with the index values.
dtype : str, numpy.dtype, or ExtensionDtype, optional
Data type for the output Series. If not specified, this will be
inferred from `data`.
See the :ref:`user guide <basics.dtypes>` for more usages.
name : str, optional
The name to give to the Series.
copy : bool, default False
Copy input data.
Examples
--------
Constructing Series from a dictionary with an Index specified
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> ser = pd.Series(data=d, index=['a', 'b', 'c'])
>>> ser
a 1
b 2
c 3
dtype: int64
The keys of the dictionary match with the Index values, hence the Index
values have no effect.
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> ser = pd.Series(data=d, index=['x', 'y', 'z'])
>>> ser
x NaN
y NaN
z NaN
dtype: float64
Note that the Index is first build with the keys from the dictionary.
After this the Series is reindexed with the given Index values, hence we
get all NaN as a result.
"""
_typ = "series"
_HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)
_name: Hashable
_metadata: List[str] = ["name"]
_internal_names_set = {"index"} | generic.NDFrame._internal_names_set
_accessors = {"dt", "cat", "str", "sparse"}
_hidden_attrs = (
base.IndexOpsMixin._hidden_attrs
| generic.NDFrame._hidden_attrs
| frozenset(["compress", "ptp"])
)
# Override cache_readonly bc Series is mutable
hasnans = property(
base.IndexOpsMixin.hasnans.func, doc=base.IndexOpsMixin.hasnans.__doc__
)
__hash__ = generic.NDFrame.__hash__
_mgr: SingleBlockManager
div: Callable[[Series, Any], Series]
rdiv: Callable[[Series, Any], Series]
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index=None,
dtype: Optional[Dtype] = None,
name=None,
copy: bool = False,
fastpath: bool = False,
):
if (
isinstance(data, SingleBlockManager)
and index is None
and dtype is None
and copy is False
):
# GH#33357 called with just the SingleBlockManager
NDFrame.__init__(self, data)
self.name = name
return
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager.from_array(data, index)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
name = ibase.maybe_extract_name(name, data, type(self))
if is_empty_data(data) and dtype is None:
# gh-17261
warnings.warn(
"The default dtype for empty Series will be 'object' instead "
"of 'float64' in a future version. Specify a dtype explicitly "
"to silence this warning.",
DeprecationWarning,
stacklevel=2,
)
# uncomment the line below when removing the DeprecationWarning
# dtype = np.dtype(object)
if index is not None:
index = ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError(
"initializing a Series from a MultiIndex is not supported"
)
elif isinstance(data, Index):
if dtype is not None:
# astype copies
data = data.astype(dtype)
else:
# GH#24096 we need to ensure the index remains immutable
data = data._values.copy()
copy = False
elif isinstance(data, np.ndarray):
if len(data.dtype):
# GH#13296 we are dealing with a compound dtype, which
# should be treated as 2D
raise ValueError(
"Cannot construct a Series from an ndarray with "
"compound dtype. Use DataFrame instead."
)
elif isinstance(data, Series):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
copy = False
data = data._mgr
elif is_dict_like(data):
data, index = self._init_dict(data, index, dtype)
dtype = None
copy = False
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
elif not data.index.equals(index) or copy:
# GH#19275 SingleBlockManager input should only be called
# internally
raise AssertionError(
"Cannot pass both SingleBlockManager "
"`data` argument and a different "
"`index` argument. `copy` must be False."
)
elif is_extension_array_dtype(data):
pass
elif isinstance(data, (set, frozenset)):
raise TypeError(f"'{type(data).__name__}' type is unordered")
else:
data = com.maybe_iterable_to_list(data)
if index is None:
if not is_list_like(data):
data = [data]
index = ibase.default_index(len(data))
elif is_list_like(data):
sanitize_index(data, index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, errors="ignore", copy=copy)
elif copy:
data = data.copy()
else:
data = sanitize_array(data, index, dtype, copy)
data = SingleBlockManager.from_array(data, index)
generic.NDFrame.__init__(self, data)
self.name = name
self._set_axis(0, index, fastpath=True)
def _init_dict(self, data, index=None, dtype: Optional[Dtype] = None):
"""
Derive the "_mgr" and "index" attributes of a new Series from a
dictionary input.
Parameters
----------
data : dict or dict-like
Data used to populate the new Series.
index : Index or index-like, default None
Index for the new Series: if None, use dict keys.
dtype : dtype, default None
The dtype for the new Series: if None, infer from data.
Returns
-------
_data : BlockManager for the new Series
index : index for the new Series
"""
# Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
# raises KeyError), so we iterate the entire dict, and align
if data:
# GH:34717, issue was using zip to extract key and values from data.
# using generators in effects the performance.
# Below is the new way of extracting the keys and values
keys = tuple(data.keys())
values = list(data.values()) # Generating list of values- faster way
elif index is not None:
# fastpath for Series(data=None). Just use broadcasting a scalar
# instead of reindexing.
values = na_value_for_dtype(pandas_dtype(dtype))
keys = index
else:
keys, values = (), []
# Input is now list-like, so rely on "standard" construction:
# TODO: passing np.float64 to not break anything yet. See GH-17261
s = create_series_with_explicit_dtype(
values, index=keys, dtype=dtype, dtype_if_empty=np.float64
)
# Now we just make sure the order is respected, if any
if data and index is not None:
s = s.reindex(index, copy=False)
return s._mgr, s.index
# ----------------------------------------------------------------------
@property
def _constructor(self) -> Type[Series]:
return Series
@property
def _constructor_expanddim(self) -> Type[DataFrame]:
"""
Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
from pandas.core.frame import DataFrame
return DataFrame
# types
@property
def _can_hold_na(self) -> bool:
return self._mgr._can_hold_na
_index: Optional[Index] = None
def _set_axis(self, axis: int, labels, fastpath: bool = False) -> None:
"""
Override generic, we want to set the _typ here.
This is called from the cython code when we set the `index` attribute
directly, e.g. `series.index = [1, 2, 3]`.
"""
if not fastpath:
labels = ensure_index(labels)
if labels._is_all_dates:
deep_labels = labels
if isinstance(labels, CategoricalIndex):
deep_labels = labels.categories
if not isinstance(
deep_labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)
):
try:
labels = DatetimeIndex(labels)
# need to set here because we changed the index
if fastpath:
self._mgr.set_axis(axis, labels)
except (tslibs.OutOfBoundsDatetime, ValueError):
# labels may exceeds datetime bounds,
# or not be a DatetimeIndex
pass
object.__setattr__(self, "_index", labels)
if not fastpath:
# The ensure_index call above ensures we have an Index object
self._mgr.set_axis(axis, labels)
# ndarray compatibility
@property
def dtype(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
return self._mgr.dtype
@property
def dtypes(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
# DataFrame compatibility
return self.dtype
@property
def name(self) -> Hashable:
"""
Return the name of the Series.
The name of a Series becomes its index or column name if it is used
to form a DataFrame. It is also used whenever displaying the Series
using the interpreter.
Returns
-------
label (hashable object)
The name of the Series, also the column name if part of a DataFrame.
See Also
--------
Series.rename : Sets the Series name when given a scalar input.
Index.name : Corresponding Index property.
Examples
--------
The Series name can be set initially when calling the constructor.
>>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers')
>>> s
0 1
1 2
2 3
Name: Numbers, dtype: int64
>>> s.name = "Integers"
>>> s
0 1
1 2
2 3
Name: Integers, dtype: int64
The name of a Series within a DataFrame is its column name.
>>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]],
... columns=["Odd Numbers", "Even Numbers"])
>>> df
Odd Numbers Even Numbers
0 1 2
1 3 4
2 5 6
>>> df["Even Numbers"].name
'Even Numbers'
"""
return self._name
@name.setter
def name(self, value: Hashable) -> None:
validate_all_hashable(value, error_name=f"{type(self).__name__}.name")
object.__setattr__(self, "_name", value)
@property
def values(self):
"""
Return Series as ndarray or ndarray-like depending on the dtype.
.. warning::
We recommend using :attr:`Series.array` or
:meth:`Series.to_numpy`, depending on whether you need
a reference to the underlying data or a NumPy array.
Returns
-------
numpy.ndarray or ndarray-like
See Also
--------
Series.array : Reference to the underlying data.
Series.to_numpy : A NumPy array representing the underlying data.
Examples
--------
>>> pd.Series([1, 2, 3]).values
array([1, 2, 3])
>>> pd.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
>>> pd.Series(list('aabc')).astype('category').values
['a', 'a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Timezone aware datetime data is converted to UTC:
>>> pd.Series(pd.date_range('20130101', periods=3,
... tz='US/Eastern')).values
array(['2013-01-01T05:00:00.000000000',
'2013-01-02T05:00:00.000000000',
'2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')
"""
return self._mgr.external_values()
@property
def _values(self):
"""
Return the internal repr of this data (defined by Block.interval_values).
This are the values as stored in the Block (ndarray or ExtensionArray
depending on the Block class), with datetime64[ns] and timedelta64[ns]
wrapped in ExtensionArrays to match Index._values behavior.
Differs from the public ``.values`` for certain data types, because of
historical backwards compatibility of the public attribute (e.g. period
returns object ndarray and datetimetz a datetime64[ns] ndarray for
``.values`` while it returns an ExtensionArray for ``._values`` in those
cases).
Differs from ``.array`` in that this still returns the numpy array if
the Block is backed by a numpy array (except for datetime64 and
timedelta64 dtypes), while ``.array`` ensures to always return an
ExtensionArray.
Overview:
dtype | values | _values | array |
----------- | ------------- | ------------- | ------------- |
Numeric | ndarray | ndarray | PandasArray |
Category | Categorical | Categorical | Categorical |
dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray |
dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray |
td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] |
Period | ndarray[obj] | PeriodArray | PeriodArray |
Nullable | EA | EA | EA |
"""
return self._mgr.internal_values()
# error: Decorated property not supported
@Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc]
@property
def array(self) -> ExtensionArray:
return self._mgr._block.array_values()
# ops
def ravel(self, order="C"):
"""
Return the flattened underlying data as an ndarray.
Returns
-------
numpy.ndarray or ndarray-like
Flattened data of the Series.
See Also
--------
numpy.ndarray.ravel : Return a flattened array.
"""
return self._values.ravel(order=order)
def __len__(self) -> int:
"""
Return the length of the Series.
"""
return len(self._mgr)
def view(self, dtype: Optional[Dtype] = None) -> Series:
"""
Create a new view of the Series.
This function will return a new Series with a view of the same
underlying values in memory, optionally reinterpreted with a new data
type. The new data type must preserve the same size in bytes as to not
cause index misalignment.
Parameters
----------
dtype : data type
Data type object or one of their string representations.
Returns
-------
Series
A new Series object as a view of the same data in memory.
See Also
--------
numpy.ndarray.view : Equivalent numpy function to create a new view of
the same data in memory.
Notes
-----
Series are instantiated with ``dtype=float64`` by default. While
``numpy.ndarray.view()`` will return a view with the same data type as
the original array, ``Series.view()`` (without specified dtype)
will try using ``float64`` and may fail if the original data type size
in bytes is not the same.
Examples
--------
>>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')
>>> s
0 -2
1 -1
2 0
3 1
4 2
dtype: int8
The 8 bit signed integer representation of `-1` is `0b11111111`, but
the same bytes represent 255 if read as an 8 bit unsigned integer:
>>> us = s.view('uint8')
>>> us
0 254
1 255
2 0
3 1
4 2
dtype: uint8
The views share the same underlying values:
>>> us[0] = 128
>>> s
0 -128
1 -1
2 0
3 1
4 2
dtype: int8
"""
return self._constructor(
self._values.view(dtype), index=self.index
).__finalize__(self, method="view")
# ----------------------------------------------------------------------
# NDArray Compat
_HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)
def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
"""
Return the values as a NumPy array.
Users should not call this directly. Rather, it is invoked by
:func:`numpy.array` and :func:`numpy.asarray`.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to use for the resulting NumPy array. By default,
the dtype is inferred from the data.
Returns
-------
numpy.ndarray
The values in the series converted to a :class:`numpy.ndarray`
with the specified `dtype`.
See Also
--------
array : Create a new array from data.
Series.array : Zero-copy view to the array backing the Series.
Series.to_numpy : Series method for similar behavior.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> np.asarray(ser)
array([1, 2, 3])
For timezone-aware data, the timezones may be retained with
``dtype='object'``
>>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> np.asarray(tzser, dtype="object")
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or the values may be localized to UTC and the tzinfo discarded with
``dtype='datetime64[ns]'``
>>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', ...],
dtype='datetime64[ns]')
"""
return np.asarray(self.array, dtype)
# ----------------------------------------------------------------------
# Unary Methods
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
# ----------------------------------------------------------------------
# indexers
@property
def axes(self) -> List[Index]:
"""
Return a list of the row axis labels.
"""
return [self.index]
# ----------------------------------------------------------------------
# Indexing Methods
@Appender(generic.NDFrame.take.__doc__)
def take(self, indices, axis=0, is_copy=None, **kwargs) -> Series:
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take((), kwargs)
indices = ensure_platform_int(indices)
new_index = self.index.take(indices)
new_values = self._values.take(indices)
result = self._constructor(new_values, index=new_index, fastpath=True)
return result.__finalize__(self, method="take")
def _take_with_is_copy(self, indices, axis=0):
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning). For Series this does the same
as the public take (it never sets `_is_copy`).
See the docstring of `take` for full explanation of the parameters.
"""
return self.take(indices=indices, axis=axis)
def _ixs(self, i: int, axis: int = 0):
"""
Return the i-th value or values in the Series by location.
Parameters
----------
i : int
Returns
-------
scalar (int) or Series (slice, sequence)
"""
return self._values[i]
def _slice(self, slobj: slice, axis: int = 0) -> Series:
# axis kwarg is retained for compat with NDFrame method
# _slice is *always* positional
return self._get_values(slobj)
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
if key is Ellipsis:
return self
key_is_scalar = is_scalar(key)
if isinstance(key, (list, tuple)):
key = unpack_1tuple(key)
if is_integer(key) and self.index._should_fallback_to_positional():
return self._values[key]
elif key_is_scalar:
return self._get_value(key)
if is_hashable(key):
# Otherwise index.get_value will raise InvalidIndexError
try:
# For labels that don't resolve as scalars like tuples and frozensets
result = self._get_value(key)
return result
except (KeyError, TypeError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# We still have the corner case where a tuple is a key
# in the first level of our MultiIndex
return self._get_values_tuple(key)
if is_iterator(key):
key = list(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
key = np.asarray(key, dtype=bool)
return self._get_values(key)
return self._get_with(key)
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
# _convert_slice_indexer to determine if this slice is positional
# or label based, and if the latter, convert to positional
slobj = self.index._convert_slice_indexer(key, kind="getitem")
return self._slice(slobj)
elif isinstance(key, ABCDataFrame):
raise TypeError(
"Indexing a Series with DataFrame is not "
"supported, use the appropriate DataFrame column"
)
elif isinstance(key, tuple):
return self._get_values_tuple(key)
elif not is_list_like(key):
# e.g. scalars that aren't recognized by lib.is_scalar, GH#32684
return self.loc[key]
if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)):
key = list(key)
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key, skipna=False)
# Note: The key_type == "boolean" case should be caught by the
# com.is_bool_indexer check in __getitem__
if key_type == "integer":
# We need to decide whether to treat this as a positional indexer
# (i.e. self.iloc) or label-based (i.e. self.loc)
if not self.index._should_fallback_to_positional():
return self.loc[key]
else:
return self.iloc[key]
# handle the dup indexing case GH#4246
return self.loc[key]
def _get_values_tuple(self, key):
# mpl hackaround
if com.any_none(*key):
result = self._get_values(key)
deprecate_ndim_indexing(result, stacklevel=5)
return result
if not isinstance(self.index, MultiIndex):
raise KeyError("key of type tuple not found and not a MultiIndex")
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self._values[indexer], index=new_index).__finalize__(
self
)
def _get_values(self, indexer):
try:
return self._constructor(self._mgr.get_slice(indexer)).__finalize__(self)
except ValueError:
# mpl compat if we look up e.g. ser[:, np.newaxis];
# see tests.series.timeseries.test_mpl_compat_hack
# the asarray is needed to avoid returning a 2D DatetimeArray
return np.asarray(self._values[indexer])
def _get_value(self, label, takeable: bool = False):
"""
Quickly retrieve single value at passed index label.
Parameters
----------
label : object
takeable : interpret the index as indexers, default False
Returns
-------
scalar value
"""
if takeable:
return self._values[label]
# Similar to Index.get_value, but we do not fall back to positional
loc = self.index.get_loc(label)
return self.index._get_values_for_loc(self, loc, label)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
cacher_needs_updating = self._check_is_chained_assignment_possible()
if key is Ellipsis:
key = slice(None)
try:
self._set_with_engine(key, value)
except (KeyError, ValueError):
values = self._values
if is_integer(key) and self.index.inferred_type != "integer":
# positional setter
values[key] = value
else:
# GH#12862 adding a new key to the Series
self.loc[key] = value
except TypeError as err:
if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):
raise KeyError(
"key of type tuple not found and not a MultiIndex"
) from err
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
key = np.asarray(key, dtype=bool)
try:
self._where(~key, value, inplace=True)
except InvalidIndexError:
self.iloc[key] = value
return
else:
self._set_with(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
# fails with AttributeError for IntervalIndex
loc = self.index._engine.get_loc(key)
validate_numeric_casting(self.dtype, value)
self._values[loc] = value
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
return self._set_values(indexer, value)
else:
assert not isinstance(key, tuple)
if is_scalar(key):
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
key = key._values
else:
key_type = lib.infer_dtype(key, skipna=False)
# Note: key_type == "boolean" should not occur because that
# should be caught by the is_bool_indexer check in __setitem__
if key_type == "integer":
if not self.index._should_fallback_to_positional():
self._set_labels(key, value)
else:
self._set_values(key, value)
else:
self.loc[key] = value
def _set_labels(self, key, value):
key = com.asarray_tuplesafe(key)
indexer: np.ndarray = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise KeyError(f"{key[mask]} not in index")
self._set_values(indexer, value)
def _set_values(self, key, value):
if isinstance(key, Series):
key = key._values
self._mgr = self._mgr.setitem( # type: ignore[assignment]
indexer=key, value=value
)
self._maybe_update_cacher()
def _set_value(self, label, value, takeable: bool = False):
"""
Quickly set single value at passed label.
If label is not contained, a new object is created with the label
placed at the end of the result index.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed.
value : object
Scalar value.
takeable : interpret the index as indexers, default False
"""
if not takeable:
try:
loc = self.index.get_loc(label)
except KeyError:
# set using a non-recursive method
self.loc[label] = value
return
else:
loc = label
self._set_values(loc, value)
# ----------------------------------------------------------------------
# Unsorted
@property
def _is_mixed_type(self):
return False
def repeat(self, repeats, axis=None) -> Series:
"""
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
0 a
1 b
1 b
2 c
2 c
dtype: object
>>> s.repeat([1, 2, 3])
0 a
1 b
1 b
2 c
2 c
2 c
dtype: object
"""
nv.validate_repeat((), {"axis": axis})
new_index = self.index.repeat(repeats)
new_values = self._values.repeat(repeats)
return self._constructor(new_values, index=new_index).__finalize__(
self, method="repeat"
)
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column, or
when the index is meaningless and needs to be reset to the default
before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels
from the index. Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in
the new DataFrame.
name : object, optional
The name to use for the column containing the original Series
values. Uses ``self.name`` by default. This argument is ignored
when `drop` is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame or None
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
See Also
--------
DataFrame.reset_index: Analogous function for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
The `level` parameter is interesting for Series with a multi-level
index.
>>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
... np.array(['one', 'two', 'one', 'two'])]
>>> s2 = pd.Series(
... range(4), name='foo',
... index=pd.MultiIndex.from_arrays(arrays,
... names=['a', 'b']))
To remove a specific level from the Index, use `level`.
>>> s2.reset_index(level='a')
a foo
b
one bar 0
two bar 1
one baz 2
two baz 3
If `level` is not set, all levels are removed from the Index.
>>> s2.reset_index()
a b foo
0 bar one 0
1 bar two 1
2 baz one 2
3 baz two 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if drop:
new_index = ibase.default_index(len(self))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(
self._values.copy(), index=new_index
).__finalize__(self, method="reset_index")
elif inplace:
raise TypeError(
"Cannot reset_index inplace on a Series to create a DataFrame"
)
else:
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
"""
Return a string representation for a particular Series.
"""
buf = StringIO("")
width, height = get_terminal_size()
max_rows = (
height
if get_option("display.max_rows") == 0
else get_option("display.max_rows")
)
min_rows = (
height
if get_option("display.max_rows") == 0
else get_option("display.min_rows")
)
show_dimensions = get_option("display.show_dimensions")
self.to_string(
buf=buf,
name=self.name,
dtype=self.dtype,
min_rows=min_rows,
max_rows=max_rows,
length=show_dimensions,
)
return buf.getvalue()
def to_string(
self,
buf=None,
na_rep="NaN",
float_format=None,
header=True,
index=True,
length=False,
dtype=False,
name=False,
max_rows=None,
min_rows=None,
):
"""
Render a string representation of the Series.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
na_rep : str, optional
String representation of NaN to use, default 'NaN'.
float_format : one-parameter function, optional
Formatter function to apply to columns' elements if they are
floats, default None.
header : bool, default True
Add the Series header (index name).
index : bool, optional
Add index (row) labels, default True.
length : bool, default False
Add the Series length.
dtype : bool, default False
Add the Series dtype.
name : bool, default False
Add the Series name if not None.
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
min_rows : int, optional
The number of rows to display in a truncated repr (when number
of rows is above `max_rows`).
Returns
-------
str or None
String representation of Series if ``buf=None``, otherwise None.
"""
formatter = fmt.SeriesFormatter(
self,
name=name,
length=length,
header=header,
index=index,
dtype=dtype,
na_rep=na_rep,
float_format=float_format,
min_rows=min_rows,
max_rows=max_rows,
)
result = formatter.to_string()
# catch contract violations
if not isinstance(result, str):
raise AssertionError(
"result must be of type str, type "
f"of result is {repr(type(result).__name__)}"
)
if buf is None:
return result
else:
try:
buf.write(result)
except AttributeError:
with open(buf, "w") as f:
f.write(result)
@doc(
klass=_shared_doc_kwargs["klass"],
storage_options=generic._shared_docs["storage_options"],
examples=dedent(
"""
Examples
--------
>>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(s.to_markdown())
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
"""
),
)
def to_markdown(
self,
buf: Optional[IO[str]] = None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> Optional[str]:
"""
Print {klass} in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened, "wt" by default.
index : bool, optional, default True
Add index (row) labels.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
**kwargs
These parameters will be passed to `tabulate \
<https://pypi.org/project/tabulate>`_.
Returns
-------
str
{klass} in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(s.to_markdown())
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
Output markdown with a tabulate option.
>>> print(s.to_markdown(tablefmt="grid"))
+----+----------+
| | animal |
+====+==========+
| 0 | elk |
+----+----------+
| 1 | pig |
+----+----------+
| 2 | dog |
+----+----------+
| 3 | quetzal |
+----+----------+
"""
return self.to_frame().to_markdown(
buf, mode, index, storage_options=storage_options, **kwargs
)
# ----------------------------------------------------------------------
def items(self) -> Iterable[Tuple[Hashable, Any]]:
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = pd.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print(f"Index : {index}, Value : {value}")
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
return zip(iter(self.index), iter(self))
@Appender(items.__doc__)
def iteritems(self) -> Iterable[Tuple[Hashable, Any]]:
return self.items()
# ----------------------------------------------------------------------
# Misc public methods
def keys(self) -> Index:
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
"""
return self.index
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_dict()
{0: 1, 1: 2, 2: 3, 3: 4}
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd)
defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
# GH16122
into_c = com.standardize_mapping(into)
return into_c((k, maybe_box_native(v)) for k, v in self.items())
def to_frame(self, name=None) -> DataFrame:
"""
Convert Series to DataFrame.
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
Examples
--------
>>> s = pd.Series(["a", "b", "c"],
... name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
if name is None:
df = self._constructor_expanddim(self)
else:
df = self._constructor_expanddim({name: self})
return df
def _set_name(self, name, inplace=False) -> Series:
"""
Set the Series name.
Parameters
----------
name : str
inplace : bool
Whether to modify `self` directly or return a copy.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
ser = self if inplace else self.copy()
ser.name = name
return ser
@Appender(
"""
Examples
--------
>>> ser = pd.Series([390., 350., 30., 20.],
... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed")
>>> ser
Falcon 390.0
Falcon 350.0
Parrot 30.0
Parrot 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", "b"]).mean()
a 210.0
b 185.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(ser > 100).mean()
Max Speed
False 25.0
True 370.0
Name: Max Speed, dtype: float64
**Grouping by Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed")
>>> ser
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Animal
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level="Type").mean()
Type
Captive 210.0
Wild 185.0
Name: Max Speed, dtype: float64
We can also choose to include `NA` in group keys or not by defining
`dropna` parameter, the default setting is `True`:
>>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan])
>>> ser.groupby(level=0).sum()
a 3
b 3
dtype: int64
>>> ser.groupby(level=0, dropna=False).sum()
a 3
b 3
NaN 3
dtype: int64
>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot']
>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed")
>>> ser.groupby(["a", "b", "a", np.nan]).mean()
a 210.0
b 350.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()
a 210.0
b 350.0
NaN 20.0
Name: Max Speed, dtype: float64
"""
)
@Appender(generic._shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = no_default,
observed: bool = False,
dropna: bool = True,
) -> SeriesGroupBy:
from pandas.core.groupby.generic import SeriesGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return SeriesGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
dropna=dropna,
)
# ----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series.
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series.
Returns
-------
int or Series (if level specified)
Number of non-null values in the Series.
See Also
--------
DataFrame.count : Count non-NA cells for each column or row.
Examples
--------
>>> s = pd.Series([0.0, 1.0, np.nan])
>>> s.count()
2
"""
if level is None:
return notna(self.array).sum()
elif not isinstance(self.index, MultiIndex):
raise ValueError("Series.count level is only valid with a MultiIndex")
index = self.index
assert isinstance(index, MultiIndex) # for mypy
if isinstance(level, str):
level = index._get_level_number(level)
lev = index.levels[level]
level_codes = np.array(index.codes[level], subok=False, copy=True)
mask = level_codes == -1
if mask.any():
level_codes[mask] = cnt = len(lev)
lev = lev.insert(cnt, lev._na_value)
obs = level_codes[notna(self._values)]
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev, dtype="int64").__finalize__(
self, method="count"
)
def mode(self, dropna=True) -> Series:
"""
Return the mode(s) of the Series.
The mode is the value that appears most often. There can be multiple modes.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
Series
Modes of the Series in sorted order.
"""
# TODO: Add option for bins like value_counts()
return algorithms.mode(self, dropna=dropna)
def unique(self):
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
Returns
-------
ndarray or ExtensionArray
The unique values returned as a NumPy array. See Notes.
See Also
--------
unique : Top-level unique method for any 1-d array-like object.
Index.unique : Return Index with unique values from an Index object.
Notes
-----
Returns the unique values as a NumPy array. In case of an
extension-array backed Series, a new
:class:`~api.extensions.ExtensionArray` of that type with just
the unique values is returned. This includes
* Categorical
* Period
* Datetime with Timezone
* Interval
* Sparse
* IntegerNA
See Examples section.
Examples
--------
>>> pd.Series([2, 1, 3, 3], name='A').unique()
array([2, 1, 3])
>>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')
... for _ in range(3)]).unique()
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
An unordered Categorical will return categories in the order of
appearance.
>>> pd.Series(pd.Categorical(list('baabc'))).unique()
['b', 'a', 'c']
Categories (3, object): ['b', 'a', 'c']
An ordered Categorical preserves the category ordering.
>>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),
... ordered=True)).unique()
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
"""
return super().unique()
def drop_duplicates(self, keep="first", inplace=False) -> Optional[Series]:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series or None
Series with duplicates dropped or None if ``inplace=True``.
See Also
--------
Index.drop_duplicates : Equivalent method on Index.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Series.duplicated : Related method on Series, indicating duplicate
Series values.
Examples
--------
Generate a Series with duplicated entries.
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last')
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries. Setting the value of 'inplace' to ``True`` performs
the operation inplace and returns ``None``.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
result = super().drop_duplicates(keep=keep)
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(self, keep="first") -> Series:
"""
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
Method to handle dropping duplicates:
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on pandas.Index.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep='first')
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep='last')
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
"""
res = base.IndexOpsMixin.duplicated(self, keep=keep)
result = self._constructor(res, index=self.index)
return result.__finalize__(self, method="duplicated")
def idxmin(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
"""
i = self.argmin(axis, skipna, *args, **kwargs)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
axis : int, default 0
For compatibility with DataFrame.idxmax. Redundant for application
on Series.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmax : Return indices of the maximum values
along the given axis.
DataFrame.idxmax : Return index of first occurrence of maximum
over requested axis.
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Notes
-----
This method is the Series version of ``ndarray.argmax``. This method
returns the label of the maximum, while ``ndarray.argmax`` returns
the position. To get the position, use ``series.values.argmax()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 3, 4],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 4.0
dtype: float64
>>> s.idxmax()
'C'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
"""
i = self.argmax(axis, skipna, *args, **kwargs)
if i == -1:
return np.nan
return self.index[i]
def round(self, decimals=0, *args, **kwargs) -> Series:
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int, default 0
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Series
Rounded values of the Series.
See Also
--------
numpy.around : Round values of an np.array.
DataFrame.round : Round values of a DataFrame.
Examples
--------
>>> s = pd.Series([0.1, 1.3, 2.7])
>>> s.round()
0 0.0
1 1.0
2 3.0
dtype: float64
"""
nv.validate_round(args, kwargs)
result = self._values.round(decimals)
result = self._constructor(result, index=self.index).__finalize__(
self, method="round"
)
return result
def quantile(self, q=0.5, interpolation="linear"):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
The quantile(s) to compute, which can lie in range: 0 <= q <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile : Calculate the rolling quantile.
numpy.percentile : Returns the q-th percentile(s) of the array elements.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
validate_percentile(q)
# We dispatch to DataFrame so that core.internals only has to worry
# about 2D cases.
df = self.to_frame()
result = df.quantile(q=q, interpolation=interpolation, numeric_only=False)
if result.ndim == 2:
result = result.iloc[:, 0]
if is_list_like(q):
result.name = self.name
return self._constructor(result, index=Float64Index(q), name=self.name)
else:
# scalar
return result.iloc[0]
def corr(self, other, method="pearson", min_periods=None) -> float:
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the correlation.
method : {'pearson', 'kendall', 'spearman'} or callable
Method used to compute correlation:
- pearson : Standard correlation coefficient
- kendall : Kendall Tau correlation coefficient
- spearman : Spearman rank correlation
- callable: Callable with input two 1d ndarrays and returning a float.
.. versionadded:: 0.24.0
Note that the returned matrix from corr will have 1 along the
diagonals and will be symmetric regardless of the callable's
behavior.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Correlation with other.
See Also
--------
DataFrame.corr : Compute pairwise correlation between columns.
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> s1 = pd.Series([.2, .0, .6, .2])
>>> s2 = pd.Series([.3, .6, .0, .1])
>>> s1.corr(s2, method=histogram_intersection)
0.3
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
if method in ["pearson", "spearman", "kendall"] or callable(method):
return nanops.nancorr(
this.values, other.values, method=method, min_periods=min_periods
)
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
def cov(
self,
other: Series,
min_periods: Optional[int] = None,
ddof: Optional[int] = 1,
) -> float:
"""
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
See Also
--------
DataFrame.cov : Compute pairwise covariance of columns.
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(
this.values, other.values, min_periods=min_periods, ddof=ddof
)
@doc(
klass="Series",
extra_params="",
other_klass="DataFrame",
examples=dedent(
"""
Difference with previous row
>>> s = pd.Series([1, 1, 2, 3, 5, 8])
>>> s.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
dtype: float64
Difference with 3rd previous row
>>> s.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 2.0
4 4.0
5 6.0
dtype: float64
Difference with following row
>>> s.diff(periods=-1)
0 0.0
1 -1.0
2 -1.0
3 -2.0
4 -3.0
5 NaN
dtype: float64
Overflow in input dtype
>>> s = pd.Series([1, 0], dtype=np.uint8)
>>> s.diff()
0 NaN
1 255.0
dtype: float64"""
),
)
def diff(self, periods: int = 1) -> Series:
"""
First discrete difference of element.
Calculates the difference of a {klass} element compared with another
element in the {klass} (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
{extra_params}
Returns
-------
{klass}
First differences of the Series.
See Also
--------
{klass}.pct_change: Percent change over given number of periods.
{klass}.shift: Shift index by desired number of periods with an
optional time freq.
{other_klass}.diff: First discrete difference of object.
Notes
-----
For boolean dtypes, this uses :meth:`operator.xor` rather than
:meth:`operator.sub`.
The result is calculated according to current dtype in {klass},
however dtype of the result is always float64.
Examples
--------
{examples}
"""
result = algorithms.diff(self.array, periods)
return self._constructor(result, index=self.index).__finalize__(
self, method="diff"
)
def autocorr(self, lag=1) -> float:
"""
Compute the lag-N autocorrelation.
This method computes the Pearson correlation between
the Series and its shifted self.
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
float
The Pearson correlation between self and self.shift(lag).
See Also
--------
Series.corr : Compute the correlation between two Series.
Series.shift : Shift index by desired number of periods.
DataFrame.corr : Compute pairwise correlation of columns.
DataFrame.corrwith : Compute pairwise correlation between rows or
columns of two DataFrame objects.
Notes
-----
If the Pearson correlation is not well defined return 'NaN'.
Examples
--------
>>> s = pd.Series([0.25, 0.5, 0.2, -0.05])
>>> s.autocorr() # doctest: +ELLIPSIS
0.10355...
>>> s.autocorr(lag=2) # doctest: +ELLIPSIS
-0.99999...
If the Pearson correlation is not well defined, then 'NaN' is returned.
>>> s = pd.Series([1, 0, 0, 0])
>>> s.autocorr()
nan
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame, or the Series and
each columns of an array.
It can also be called using `self @ other` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series or numpy.ndarray
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame or a numpy.ndarray between the Series
and each columns of the numpy array.
See Also
--------
DataFrame.dot: Compute the matrix product with the DataFrame.
Series.mul: Multiplication of series and other, element-wise.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> other = pd.Series([-1, 2, -3, 4])
>>> s.dot(other)
8
>>> s @ other
8
>>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(df)
0 24
1 14
dtype: int64
>>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(arr)
array([24, 14])
"""
if isinstance(other, (Series, ABCDataFrame)):
common = self.index.union(other.index)
if len(common) > len(self.index) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, ABCDataFrame):
return self._constructor(
np.dot(lvals, rvals), index=other.columns
).__finalize__(self, method="dot")
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(np.transpose(other))
@doc(base.IndexOpsMixin.searchsorted, klass="Series")
def searchsorted(self, value, side="left", sorter=None):
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
# -------------------------------------------------------------------
# Combination
def append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
Series to append with self.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise Exception on creating index with duplicates.
Returns
-------
Series
Concatenated Series.
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2]
"""
from pandas.core.reshape.concat import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self]
to_concat.extend(to_append)
else:
to_concat = [self, to_append]
if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]):
msg = "to_append should be a Series or list/tuple of Series, got DataFrame"
raise TypeError(msg)
return concat(
to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity
)
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value.
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value.
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
Returns
-------
Series
"""
if not isinstance(other, Series):
raise AssertionError("Other operand must be Series")
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join="outer", copy=False)
this_vals, other_vals = ops.fill_binop(this.values, other.values, fill_value)
with np.errstate(all="ignore"):
result = func(this_vals, other_vals)
name = ops.get_op_result_name(self, other)
return this._construct_result(result, name)
def _construct_result(
self, result: Union[ArrayLike, Tuple[ArrayLike, ArrayLike]], name: Hashable
) -> Union[Series, Tuple[Series, Series]]:
"""
Construct an appropriately-labelled Series from the result of an op.
Parameters
----------
result : ndarray or ExtensionArray
name : Label
Returns
-------
Series
In the case of __divmod__ or __rdivmod__, a 2-tuple of Series.
"""
if isinstance(result, tuple):
# produced by divmod or rdivmod
res1 = self._construct_result(result[0], name=name)
res2 = self._construct_result(result[1], name=name)
# GH#33427 assertions to keep mypy happy
assert isinstance(res1, Series)
assert isinstance(res2, Series)
return (res1, res2)
# We do not pass dtype to ensure that the Series constructor
# does inference in the case where `result` has object-dtype.
out = self._constructor(result, index=self.index)
out = out.__finalize__(self)
# Set the result's name after __finalize__ is called because __finalize__
# would set it back to self.name
out.name = name
return out
@doc(
generic._shared_docs["compare"],
"""
Returns
-------
Series or DataFrame
If axis is 0 or 'index' the result will be a Series.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
If axis is 1 or 'columns' the result will be a DataFrame.
It will have two columns namely 'self' and 'other'.
See Also
--------
DataFrame.compare : Compare with another DataFrame and show differences.
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> s1 = pd.Series(["a", "b", "c", "d", "e"])
>>> s2 = pd.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2)
self other
1 b a
3 d b
Stack the differences on indices
>>> s1.compare(s2, align_axis=0)
1 self b
other a
3 self d
other b
dtype: object
Keep all original rows
>>> s1.compare(s2, keep_shape=True)
self other
0 NaN NaN
1 b a
2 NaN NaN
3 d b
4 NaN NaN
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True)
self other
0 a a
1 b a
2 c c
3 d b
4 e e
""",
klass=_shared_doc_kwargs["klass"],
)
def compare(
self,
other: Series,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> FrameOrSeriesUnion:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(self, other, func, fill_value=None) -> Series:
"""
Combine the Series with a Series or scalar according to `func`.
Combine the Series and `other` using `func` to perform elementwise
selection for combined Series.
`fill_value` is assumed when value is missing at some index
from one of the two objects being combined.
Parameters
----------
other : Series or scalar
The value(s) to be combined with the `Series`.
func : function
Function that takes two scalars as inputs and returns an element.
fill_value : scalar, optional
The value to assume when an index is missing from
one Series or the other. The default specifies to use the
appropriate NaN value for the underlying dtype of the Series.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series' values first.
Examples
--------
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
>>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
>>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
>>> s2
falcon 345.0
eagle 200.0
duck 30.0
dtype: float64
Now, to combine the two datasets and view the highest speeds
of the birds across the two datasets
>>> s1.combine(s2, max)
duck NaN
eagle 200.0
falcon 345.0
dtype: float64
In the previous example, the resulting value for duck is missing,
because the maximum of a NaN and a float is a NaN.
So, in the example, we set ``fill_value=0``,
so the maximum value returned will be the value from some dataset.
>>> s1.combine(s2, max, fill_value=0)
duck 30.0
eagle 200.0
falcon 345.0
dtype: float64
"""
if fill_value is None:
fill_value = na_value_for_dtype(self.dtype, compat=False)
if isinstance(other, Series):
# If other is a Series, result is based on union of Series,
# so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
new_values = []
for idx in new_index:
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all="ignore"):
new_values.append(func(lv, rv))
else:
# Assume that other is a scalar, so apply the function for
# each element in the Series
new_index = self.index
with np.errstate(all="ignore"):
new_values = [func(lv, other) for lv in self._values]
new_name = self.name
if is_categorical_dtype(self.dtype):
pass
elif is_extension_array_dtype(self.dtype):
# TODO: can we do this for only SparseDtype?
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
new_values = maybe_cast_to_extension_array(type(self._values), new_values)
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other) -> Series:
"""
Combine Series values, choosing the calling Series's values first.
Parameters
----------
other : Series
The value(s) to be combined with the `Series`.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine : Perform elementwise operation on two Series
using a given function.
Notes
-----
Result index will be the union of the two indexes.
Examples
--------
>>> s1 = pd.Series([1, np.nan])
>>> s2 = pd.Series([3, 4])
>>> s1.combine_first(s2)
0 1.0
1 4.0
dtype: float64
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
if this.dtype.kind == "M" and other.dtype.kind != "M":
other = to_datetime(other)
return this.where(notna(this), other)
def update(self, other) -> None:
"""
Modify Series in place using values from passed Series.
Uses non-NA values from passed Series to make updates. Aligns
on index.
Parameters
----------
other : Series, or object coercible into Series
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = pd.Series(['a', 'b', 'c'])
>>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, np.nan, 6]))
>>> s
0 4
1 2
2 6
dtype: int64
``other`` can also be a non-Series object type
that is coercible into a Series
>>> s = pd.Series([1, 2, 3])
>>> s.update([4, np.nan, 6])
>>> s
0 4
1 2
2 6
dtype: int64
>>> s = pd.Series([1, 2, 3])
>>> s.update({1: 9})
>>> s
0 1
1 9
2 3
dtype: int64
"""
if not isinstance(other, Series):
other = Series(other)
other = other.reindex_like(self)
mask = notna(other)
self._mgr = self._mgr.putmask(mask=mask, new=other)
self._maybe_update_cacher()
# ----------------------------------------------------------------------
# Reindexing, sorting
def sort_values(
self,
axis=0,
ascending: Union[Union[bool, int], Sequence[Union[bool, int]]] = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
key: ValueKeyFunc = None,
):
"""
Sort by the values.
Sort a Series in ascending or descending order by some
criterion.
Parameters
----------
axis : {0 or 'index'}, default 0
Axis to direct sorting. The value 'index' is accepted for
compatibility with DataFrame.sort_values.
ascending : bool or list of bools, default True
If True, sort values in ascending order, otherwise descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the series values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect a
``Series`` and return an array-like.
.. versionadded:: 1.1.0
Returns
-------
Series or None
Series ordered by values or None if ``inplace=True``.
See Also
--------
Series.sort_index : Sort by the Series indices.
DataFrame.sort_values : Sort DataFrame by the values along either axis.
DataFrame.sort_index : Sort DataFrame by indices.
Examples
--------
>>> s = pd.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
Sort using a key function. Your `key` function will be
given the ``Series`` of values and should return an array-like.
>>> s = pd.Series(['a', 'B', 'c', 'D', 'e'])
>>> s.sort_values()
1 B
3 D
0 a
2 c
4 e
dtype: object
>>> s.sort_values(key=lambda x: x.str.lower())
0 a
1 B
2 c
3 D
4 e
dtype: object
NumPy ufuncs work well here. For example, we can
sort by the ``sin`` of the value
>>> s = pd.Series([-4, -2, 0, 2, 4])
>>> s.sort_values(key=np.sin)
1 -2
4 4
2 0
0 -4
3 2
dtype: int64
More complicated user-defined functions can be used,
as long as they expect a Series and return an array-like
>>> s.sort_values(key=lambda x: (np.tan(x.cumsum())))
0 -4
3 2
4 4
1 -2
2 0
dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# Validate the axis parameter
self._get_axis_number(axis)
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError(
"This Series is a view of some other array, to "
"sort in-place you must create a copy"
)
if is_list_like(ascending):
ascending = cast(Sequence[Union[bool, int]], ascending)
if len(ascending) != 1:
raise ValueError(
f"Length of ascending ({len(ascending)}) must be 1 for Series"
)
ascending = ascending[0]
if not is_bool(ascending):
raise ValueError("ascending must be boolean")
if na_position not in ["first", "last"]:
raise ValueError(f"invalid na_position: {na_position}")
# GH 35922. Make sorting stable by leveraging nargsort
values_to_sort = ensure_key_mapped(self, key)._values if key else self._values
sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position)
result = self._constructor(
self._values[sorted_index], index=self.index[sorted_index]
)
if ignore_index:
result.index = ibase.default_index(len(sorted_index))
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
def sort_index(
self,
axis=0,
level=None,
ascending: Union[Union[bool, int], Sequence[Union[bool, int]]] = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
):
"""
Sort Series by index labels.
Returns a new Series sorted by label if `inplace` argument is
``False``, otherwise updates the original series and returns None.
Parameters
----------
axis : int, default 0
Axis to direct sorting. This can only be 0 for Series.
level : int, optional
If not None, sort on values in specified index level(s).
ascending : bool or list-like of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape.
.. versionadded:: 1.1.0
Returns
-------
Series or None
The original Series sorted by the labels or None if ``inplace=True``.
See Also
--------
DataFrame.sort_index: Sort DataFrame by the index.
DataFrame.sort_values: Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])
>>> s.sort_index()
1 c
2 b
3 a
4 d
dtype: object
Sort Descending
>>> s.sort_index(ascending=False)
4 d
3 a
2 b
1 c
dtype: object
Sort Inplace
>>> s.sort_index(inplace=True)
>>> s
1 c
2 b
3 a
4 d
dtype: object
By default NaNs are put at the end, but use `na_position` to place
them at the beginning
>>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan])
>>> s.sort_index(na_position='first')
NaN d
1.0 c
2.0 b
3.0 a
dtype: object
Specify index level to sort
>>> arrays = [np.array(['qux', 'qux', 'foo', 'foo',
... 'baz', 'baz', 'bar', 'bar']),
... np.array(['two', 'one', 'two', 'one',
... 'two', 'one', 'two', 'one'])]
>>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays)
>>> s.sort_index(level=1)
bar one 8
baz one 6
foo one 4
qux one 2
bar two 7
baz two 5
foo two 3
qux two 1
dtype: int64
Does not sort by remaining levels when sorting by levels
>>> s.sort_index(level=1, sort_remaining=False)
qux one 2
foo one 4
baz one 6
bar one 8
qux two 1
foo two 3
baz two 5
bar two 7
dtype: int64
Apply a key function before sorting
>>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd'])
>>> s.sort_index(key=lambda x : x.str.lower())
A 1
b 2
C 3
d 4
dtype: int64
"""
return super().sort_index(
axis,
level,
ascending,
inplace,
kind,
na_position,
sort_remaining,
ignore_index,
key,
)
def argsort(self, axis=0, kind="quicksort", order=None) -> Series:
"""
Return the integer indices that would sort the Series values.
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
Parameters
----------
axis : {0 or "index"}
Has no effect but is accepted for compatibility with numpy.
kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms.
order : None
Has no effect but is accepted for compatibility with numpy.
Returns
-------
Series
Positions of values within the sort order with -1 indicating
nan values.
See Also
--------
numpy.ndarray.argsort : Returns the indices that would sort this array.
"""
values = self._values
mask = isna(values)
if mask.any():
result = Series(-1, index=self.index, name=self.name, dtype="int64")
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
return self._constructor(result, index=self.index).__finalize__(
self, method="argsort"
)
else:
return self._constructor(
np.argsort(values, kind=kind), index=self.index, dtype="int64"
).__finalize__(self, method="argsort")
def nlargest(self, n=5, keep="first") -> Series:
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
def nsmallest(self, n=5, keep="first") -> Series:
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Brunei": 434000, "Malta": 434000,
... "Maldives": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Brunei 434000
Malta 434000
Maldives 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` smallest elements where ``n=5`` by default.
>>> s.nsmallest()
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
Iceland 337000
dtype: int64
The `n` smallest elements where ``n=3``. Default `keep` value is
'first' so Nauru and Tuvalu will be kept.
>>> s.nsmallest(3)
Montserrat 5200
Nauru 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` and keeping the last
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
>>> s.nsmallest(3, keep='last')
Montserrat 5200
Anguilla 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has four elements due to the three duplicates.
>>> s.nsmallest(3, keep='all')
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
def swaplevel(self, i=-2, j=-1, copy=True) -> Series:
"""
Swap levels i and j in a :class:`MultiIndex`.
Default is to swap the two innermost levels of the index.
Parameters
----------
i, j : int, str
Level of the indices to be swapped. Can pass level name as string.
copy : bool, default True
Whether to copy underlying data.
Returns
-------
Series
Series with levels swapped in MultiIndex.
"""
assert isinstance(self.index, MultiIndex)
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index, copy=copy).__finalize__(
self, method="swaplevel"
)
def reorder_levels(self, order) -> Series:
"""
Rearrange index levels using input order.
May not drop or duplicate levels.
Parameters
----------
order : list of int representing new level order
Reference level by number or key.
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception("Can only reorder levels on a hierarchical axis.")
result = self.copy()
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
return result
def explode(self, ignore_index: bool = False) -> Series:
"""
Transform each element of a list-like to a row.
.. versionadded:: 0.25.0
Parameters
----------
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of elements in
the output will be non-deterministic when exploding sets.
Examples
--------
>>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])
>>> s
0 [1, 2, 3]
1 foo
2 []
3 [3, 4]
dtype: object
>>> s.explode()
0 1
0 2
0 3
1 foo
2 NaN
3 3
3 4
dtype: object
"""
if not len(self) or not is_object_dtype(self):
return self.copy()
values, counts = reshape.explode(np.asarray(self.array))
if ignore_index:
index = ibase.default_index(len(values))
else:
index = self.index.repeat(counts)
return self._constructor(values, index=index, name=self.name)
def unstack(self, level=-1, fill_value=None):
"""
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
fill_value : scalar value, default None
Value to use when replacing NaN values.
Returns
-------
DataFrame
Unstacked Series.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'],
... ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
# ----------------------------------------------------------------------
# function application
def map(self, arg, na_action=None) -> Series:
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, collections.abc.Mapping subclass or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.apply : Apply a function row-/column-wise.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``.
Examples
--------
>>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 NaN
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 NaN
3 NaN
dtype: object
It also accepts a function:
>>> s.map('I am a {}'.format)
0 I am a cat
1 I am a dog
2 I am a nan
3 I am a rabbit
dtype: object
To avoid applying the function to missing values (and keep them as
``NaN``) ``na_action='ignore'`` can be used:
>>> s.map('I am a {}'.format, na_action='ignore')
0 I am a cat
1 I am a dog
2 NaN
3 I am a rabbit
dtype: object
"""
new_values = super()._map_values(arg, na_action=na_action)
return self._constructor(new_values, index=self.index).__finalize__(
self, method="map"
)
def _gotitem(self, key, ndim, subset=None) -> Series:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
Requested ndim of result.
subset : object, default None
Subset to act on.
"""
return self
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.apply : Invoke function on a Series.
Series.transform : Transform function producing a Series with like indexes.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.agg('min')
1
>>> s.agg(['min', 'max'])
min 1
max 4
dtype: int64
"""
)
@doc(
generic._shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis=0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
# if func is None, will switch to user-provided "named aggregation" kwargs
if func is None:
func = dict(kwargs.items())
op = series_apply(self, func, args=args, kwargs=kwargs)
result = op.agg()
return result
agg = aggregate
@doc(
_shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(
self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
) -> FrameOrSeriesUnion:
# Validate axis argument
self._get_axis_number(axis)
result = series_apply(self, func=func, args=args, kwargs=kwargs).transform()
return result
def apply(
self,
func: AggFuncType,
convert_dtype: bool = True,
args: Tuple[Any, ...] = (),
**kwargs,
) -> FrameOrSeriesUnion:
"""
Invoke function on values of Series.
Can be ufunc (a NumPy function that applies to the entire Series)
or a Python function that only works on single values.
Parameters
----------
func : function
Python function or NumPy ufunc to apply.
convert_dtype : bool, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object.
args : tuple
Positional arguments passed to func after the series value.
**kwargs
Additional keyword arguments passed to func.
Returns
-------
Series or DataFrame
If func returns a Series object the result will be a DataFrame.
See Also
--------
Series.map: For element-wise operations.
Series.agg: Only perform aggregating type operations.
Series.transform: Only perform transforming type operations.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`udf-mutation`
for more details.
Examples
--------
Create a series with typical summer temperatures for each city.
>>> s = pd.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> s.apply(lambda x: x ** 2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> s.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
return series_apply(self, func, convert_dtype, args, kwargs).apply()
def _reduce(
self,
op,
name: str,
*,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
"""
Perform a reduction operation.
If we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object.
"""
delegate = self._values
if axis is not None:
self._get_axis_number(axis)
if isinstance(delegate, ExtensionArray):
# dispatch to ExtensionArray interface
return delegate._reduce(name, skipna=skipna, **kwds)
else:
# dispatch to numpy arrays
if numeric_only:
raise NotImplementedError(
f"Series.{name} does not implement numeric_only."
)
with np.errstate(all="ignore"):
return op(delegate, skipna=skipna, **kwds)
def _reindex_indexer(self, new_index, indexer, copy):
if indexer is None:
if copy:
return self.copy()
return self
new_values = algorithms.take_nd(
self._values, indexer, allow_fill=True, fill_value=None
)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level):
"""
Check if we do need a multi reindex; this is for compat with
higher dims.
"""
return False
@doc(
NDFrame.align,
klass=_shared_doc_kwargs["klass"],
axes_single_arg=_shared_doc_kwargs["axes_single_arg"],
)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def rename(
self,
index=None,
*,
axis=None,
copy=True,
inplace=False,
level=None,
errors="ignore",
):
"""
Alter Series index labels or name.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
axis : {0 or "index"}
Unused. Accepted for compatibility with DataFrame method only.
index : scalar, hashable sequence, dict-like or function, optional
Functions or dict-like are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
**kwargs
Additional keyword arguments passed to the function. Only the
"inplace" keyword is used.
Returns
-------
Series or None
Series with index labels or name altered or None if ``inplace=True``.
See Also
--------
DataFrame.rename : Corresponding DataFrame method.
Series.rename_axis : Set the name of the axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
"""
if callable(index) or is_dict_like(index):
return super().rename(
index, copy=copy, inplace=inplace, level=level, errors=errors
)
else:
return self._set_name(index, inplace=inplace)
@Appender(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0)
a 1
b 2
c 3
dtype: int64
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub="",
axis_description_sub="",
see_also_sub="",
)
@Appender(generic.NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@doc(
NDFrame.reindex,
klass=_shared_doc_kwargs["klass"],
axes=_shared_doc_kwargs["axes"],
optional_labels=_shared_doc_kwargs["optional_labels"],
optional_axis=_shared_doc_kwargs["optional_axis"],
)
def reindex(self, index=None, **kwargs):
return super().reindex(index=index, **kwargs)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
) -> Series:
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed
by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
axis : 0, default 0
Redundant for application on Series.
index : single label or list-like
Redundant for application on Series, but 'index' can be used instead
of 'labels'.
columns : single label or list-like
No change is made to the Series; use 'index' or 'labels' instead.
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are dropped.
Returns
-------
Series or None
Series with specified index labels removed or None if ``inplace=True``.
Raises
------
KeyError
If none of the labels are found in the index.
See Also
--------
Series.reindex : Return only specified index labels of Series.
Series.dropna : Return series without null values.
Series.drop_duplicates : Return Series with duplicate values removed.
DataFrame.drop : Drop specified labels from rows or columns.
Examples
--------
>>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop labels B en C
>>> s.drop(labels=['B', 'C'])
A 0
dtype: int64
Drop 2nd level label in MultiIndex Series
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
) -> Optional[Series]:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Hashable) -> Any:
"""
Return item and drops from series. Raise KeyError if not found.
Parameters
----------
item : label
Index of the element that needs to be removed.
Returns
-------
Value that is popped from series.
Examples
--------
>>> ser = pd.Series([1,2,3])
>>> ser.pop(0)
1
>>> ser
1 2
2 3
dtype: int64
"""
return super().pop(item=item)
@doc(
NDFrame.replace,
klass=_shared_doc_kwargs["klass"],
inplace=_shared_doc_kwargs["inplace"],
replace_iloc=_shared_doc_kwargs["replace_iloc"],
)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_single(self, to_replace, method: str, inplace: bool, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = self._constructor(values, index=self.index, dtype=self.dtype)
result = result.__finalize__(self)
if inplace:
self._update_inplace(result)
return
return result
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> Series:
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of the Series.
The memory usage can optionally include the contribution of
the index and of elements of `object` dtype.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the Series index.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned value.
Returns
-------
int
Bytes of memory consumed.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
DataFrame.memory_usage : Bytes consumed by a DataFrame.
Examples
--------
>>> s = pd.Series(range(3))
>>> s.memory_usage()
152
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
The memory footprint of `object` values is ignored by default:
>>> s = pd.Series(["a", "b"])
>>> s.values
array(['a', 'b'], dtype=object)
>>> s.memory_usage()
144
>>> s.memory_usage(deep=True)
244
"""
v = super().memory_usage(deep=deep)
if index:
v += self.index.memory_usage(deep=deep)
return v
def isin(self, values) -> Series:
"""
Whether elements in Series are contained in `values`.
Return a boolean Series showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
Series
Series of booleans indicating if each element is in values.
Raises
------
TypeError
* If `values` is a string
See Also
--------
DataFrame.isin : Equivalent method on DataFrame.
Examples
--------
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Strings and integers are distinct and are therefore not comparable:
>>> pd.Series([1]).isin(['1'])
0 False
dtype: bool
>>> pd.Series([1.1]).isin(['1.1'])
0 False
dtype: bool
"""
result = algorithms.isin(self._values, values)
return self._constructor(result, index=self.index).__finalize__(
self, method="isin"
)
def between(self, left, right, inclusive=True) -> Series:
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
# ----------------------------------------------------------------------
# Convert to types that support pd.NA
def _convert_dtypes(
self,
infer_objects: bool = True,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
) -> Series:
input_series = self
if infer_objects:
input_series = input_series.infer_objects()
if is_object_dtype(input_series):
input_series = input_series.copy()
if convert_string or convert_integer or convert_boolean or convert_floating:
inferred_dtype = convert_dtypes(
input_series._values,
convert_string,
convert_integer,
convert_boolean,
convert_floating,
)
try:
result = input_series.astype(inferred_dtype)
except TypeError:
result = input_series.copy()
else:
result = input_series.copy()
return result
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> Series:
return generic.NDFrame.isna(self)
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> Series:
return super().isnull()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notna(self) -> Series:
return super().notna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notnull(self) -> Series:
return super().notnull()
def dropna(self, axis=0, inplace=False, how=None):
"""
Return a new Series with missing values removed.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
how : str, optional
Not in use. Kept for compatibility.
Returns
-------
Series or None
Series with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
Series.isna: Indicate missing values.
Series.notna : Indicate existing (non-missing) values.
Series.fillna : Replace missing values.
DataFrame.dropna : Drop rows or columns which contain NA values.
Index.dropna : Drop missing indices.
Examples
--------
>>> ser = pd.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
dtype: float64
Empty strings are not considered NA values. ``None`` is considered an
NA value.
>>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay'])
>>> ser
0 NaN
1 2
2 NaT
3
4 None
5 I stay
dtype: object
>>> ser.dropna()
1 2
3
5 I stay
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# Validate the axis parameter
self._get_axis_number(axis or 0)
if self._can_hold_na:
result = remove_na_arraylike(self)
if inplace:
self._update_inplace(result)
else:
return result
else:
if inplace:
# do nothing
pass
else:
return self.copy()
# ----------------------------------------------------------------------
# Time series-oriented methods
@doc(NDFrame.asfreq, **_shared_doc_kwargs)
def asfreq(
self,
freq,
method=None,
how: Optional[str] = None,
normalize: bool = False,
fill_value=None,
) -> Series:
return super().asfreq(
freq=freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
@doc(NDFrame.resample, **_shared_doc_kwargs)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: Optional[int] = None,
on=None,
level=None,
origin: Union[str, TimestampConvertibleTypes] = "start_day",
offset: Optional[TimedeltaConvertibleTypes] = None,
) -> Resampler:
return super().resample(
rule=rule,
axis=axis,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
base=base,
on=on,
level=level,
origin=origin,
offset=offset,
)
def to_timestamp(self, freq=None, how="start", copy=True) -> Series:
"""
Cast to DatetimeIndex of Timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
copy : bool, default True
Whether or not to return a copy.
Returns
-------
Series with DatetimeIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
if not isinstance(self.index, PeriodIndex):
raise TypeError(f"unsupported Type {type(self.index).__name__}")
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values, index=new_index).__finalize__(
self, method="to_timestamp"
)
def to_period(self, freq=None, copy=True) -> Series:
"""
Convert Series from DatetimeIndex to PeriodIndex.
Parameters
----------
freq : str, default None
Frequency associated with the PeriodIndex.
copy : bool, default True
Whether or not to return a copy.
Returns
-------
Series
Series with index converted to PeriodIndex.
"""
new_values = self._values
if copy:
new_values = new_values.copy()
if not isinstance(self.index, DatetimeIndex):
raise TypeError(f"unsupported Type {type(self.index).__name__}")
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values, index=new_index).__finalize__(
self, method="to_period"
)
# ----------------------------------------------------------------------
# Add index
_AXIS_ORDERS = ["index"]
_AXIS_REVERSED = False
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 0
_info_axis_name = "index"
index: Index = properties.AxisProperty(
axis=0, doc="The index (axis labels) of the Series."
)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
str = CachedAccessor("str", StringMethods)
dt = CachedAccessor("dt", CombinedDatetimelikeProperties)
cat = CachedAccessor("cat", CategoricalAccessor)
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
sparse = CachedAccessor("sparse", SparseAccessor)
# ----------------------------------------------------------------------
# Add plotting methods to Series
hist = pandas.plotting.hist_series
# ----------------------------------------------------------------------
# Template-Based Arithmetic/Comparison Methods
def _cmp_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
if isinstance(other, Series) and not self._indexed_same(other):
raise ValueError("Can only compare identically-labeled Series objects")
lvalues = extract_array(self, extract_numpy=True)
rvalues = extract_array(other, extract_numpy=True)
res_values = ops.comparison_op(lvalues, rvalues, op)
return self._construct_result(res_values, name=res_name)
def _logical_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
self, other = ops.align_method_SERIES(self, other, align_asobject=True)
lvalues = extract_array(self, extract_numpy=True)
rvalues = extract_array(other, extract_numpy=True)
res_values = ops.logical_op(lvalues, rvalues, op)
return self._construct_result(res_values, name=res_name)
def _arith_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
self, other = ops.align_method_SERIES(self, other)
lvalues = extract_array(self, extract_numpy=True)
rvalues = extract_array(other, extract_numpy=True)
result = ops.arithmetic_op(lvalues, rvalues, op)
return self._construct_result(result, name=res_name)
Series._add_numeric_operations()
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series)
| 30.388944 | 88 | 0.532734 |
a9324c82459b40cd90840217797e9deb5261dfad | 917 | py | Python | corehq/sql_db/management/commands/locate_invalid_shard_data.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/sql_db/management/commands/locate_invalid_shard_data.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/sql_db/management/commands/locate_invalid_shard_data.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from django.core.management.base import BaseCommand
from corehq.sql_db.shard_data_management import get_count_of_unmatched_models_by_shard
from corehq.sql_db.util import get_db_aliases_for_partitioned_query, get_all_sharded_models
class Command(BaseCommand):
help = "Print out all shard data that exists in databases that don't contain the associated shards."
def handle(self, **options):
sharded_models = list(get_all_sharded_models())
for database in get_db_aliases_for_partitioned_query():
for model in sharded_models:
invalid_data = get_count_of_unmatched_models_by_shard(database, model)
if invalid_data:
for shard_id, count in invalid_data:
print('found {} unexpected {}s in {} (shard {}).'.format(
count, model.__name__, database, shard_id)
)
| 48.263158 | 104 | 0.676118 |
f9b39604a020a4f9cef5de9be79129b055914837 | 561 | py | Python | output/models/nist_data/atomic/time/schema_instance/nistschema_sv_iv_atomic_time_max_exclusive_4_xsd/nistschema_sv_iv_atomic_time_max_exclusive_4.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/time/schema_instance/nistschema_sv_iv_atomic_time_max_exclusive_4_xsd/nistschema_sv_iv_atomic_time_max_exclusive_4.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/time/schema_instance/nistschema_sv_iv_atomic_time_max_exclusive_4_xsd/nistschema_sv_iv_atomic_time_max_exclusive_4.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Optional
from xsdata.models.datatype import XmlTime
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-time-maxExclusive-4-NS"
@dataclass
class NistschemaSvIvAtomicTimeMaxExclusive4:
class Meta:
name = "NISTSchema-SV-IV-atomic-time-maxExclusive-4"
namespace = "NISTSchema-SV-IV-atomic-time-maxExclusive-4-NS"
value: Optional[XmlTime] = field(
default=None,
metadata={
"required": True,
"max_exclusive": XmlTime(12, 25, 37, 0),
}
)
| 26.714286 | 68 | 0.677362 |
e12713baf0d04aa62a6c710a0a6560119839e8fd | 156 | py | Python | joblicant/JoblicantApp/apps.py | juliuscecilia33/joblicant | b654bc438838f633556cef16fa7e5bfa63eecfb6 | [
"MIT"
] | null | null | null | joblicant/JoblicantApp/apps.py | juliuscecilia33/joblicant | b654bc438838f633556cef16fa7e5bfa63eecfb6 | [
"MIT"
] | null | null | null | joblicant/JoblicantApp/apps.py | juliuscecilia33/joblicant | b654bc438838f633556cef16fa7e5bfa63eecfb6 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class JoblicantappConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'JoblicantApp'
| 22.285714 | 56 | 0.775641 |
4bd7a98cb924a45ff60f4908627c386b20d9cbde | 8,141 | py | Python | python/paddle/fluid/tests/unittests/test_layer_norm_op.py | javakian/Paddle | 10018f1561cb8f75f8df982dcf2217e50cee2647 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_layer_norm_op.py | javakian/Paddle | 10018f1561cb8f75f8df982dcf2217e50cee2647 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_layer_norm_op.py | javakian/Paddle | 10018f1561cb8f75f8df982dcf2217e50cee2647 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from operator import mul
import paddle.fluid.core as core
import paddle.fluid as fluid
from functools import reduce
from op_test import _set_use_system_allocator
np.random.random(123)
_set_use_system_allocator(True)
def _reference_layer_norm_naive(x, scale, beta, epsilon, begin_norm_axis=1):
x_shape = x.shape
N = reduce(mul, x_shape[0:begin_norm_axis], 1)
D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1)
x.shape = [N, D]
mean = np.mean(x, axis=1)
var = np.var(x, axis=1) + epsilon
output = scale.reshape([1, D]) * np.divide(
(x - mean.reshape([N, 1])),
(np.sqrt(var)).reshape([N, 1])) + beta.reshape([1, D])
x.shape, output.shape = x_shape, x_shape
return output, mean, var
def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1):
x_shape = x.shape
scale_shape = scale.shape
N = reduce(mul, x_shape[0:begin_norm_axis], 1)
D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1)
x.shape, grad_y.shape = [N, D], [N, D]
var.shape, mean.shape = [N, 1], [N, 1]
scale.shape = [1, D]
# d_bias
d_bias = np.sum(grad_y, axis=0).reshape([1, D])
# d_scale
d_scale = np.sum(((x - mean) * np.sqrt(1 / var)) * grad_y,
axis=0).reshape([1, D])
# dx
dx_end = scale * np.sqrt(1.0 / var) * grad_y
d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y * scale, axis=1).reshape(
[N, 1]) # the second part equals to zero.
d_mean = 1.0 / D * d_mean_0
d_std = np.sum(
-(1.0 / var) * (x - mean) * grad_y * scale, axis=1).reshape([N, 1]) * (
1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * (x - mean))
grad_x = dx_end + d_mean + d_std
grad_x.shape, x.shape, grad_y.shape = x_shape, x_shape, x_shape
scale.shape = scale_shape
var.shape, mean.shape = [N, ], [N, ]
return grad_x, d_scale, d_bias
class TestLayerNormOp(unittest.TestCase):
def setUp(self):
self.use_cudnn = True
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
def check_forward_backward(self, shape, begin_norm_axis):
def test_with_place(place, shape, begin_norm_axis):
# attr
epsilon = 0.00001
x_shape = shape
D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1)
scale_shape = [D]
np.random.seed(123)
x = np.random.random_sample(x_shape).astype(np.float32)
scale = np.random.random_sample(scale_shape).astype(np.float32)
bias = np.random.random_sample(scale_shape).astype(np.float32)
y_grad = np.random.random_sample(x_shape).astype(np.float32)
# reference forward & backward
y, mean, variance = _reference_layer_norm_naive(
x, scale, bias, epsilon, begin_norm_axis)
x_grad, scale_grad, bias_grad = _reference_layer_norm_grad(
x, y_grad, scale, mean, variance, begin_norm_axis)
var_dict = locals()
var_dict['y@GRAD'] = y_grad
var_names = [
'x', 'scale', 'bias', 'mean', 'variance', 'y', 'y@GRAD'
]
ground_truth = {name: var_dict[name] for name in var_names}
program = fluid.Program()
with fluid.program_guard(program):
block = program.global_block()
for name in ground_truth:
block.create_var(
name=name,
dtype='float32',
shape=ground_truth[name].shape)
layer_norm_op = block.append_op(
type="layer_norm",
inputs={
"X": block.var('x'),
"Scale": block.var('scale'),
"Bias": block.var('bias'),
},
outputs={
"Y": block.var('y'),
"Mean": block.var('mean'), # share the same memory
"Variance":
block.var('variance'), # share the same memory
},
attrs={
"epsilon": epsilon,
"begin_norm_axis": begin_norm_axis
})
# generate backward op_desc
grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
layer_norm_op.desc, set(), [])
grad_op_desc = grad_op_desc_list[0]
new_op_desc = block.desc.append_op()
new_op_desc.copy_from(grad_op_desc)
for var_name in grad_op_desc.output_arg_names():
block.desc.var(var_name.encode("ascii"))
grad_op_desc.infer_var_type(block.desc)
grad_op_desc.infer_shape(block.desc)
for arg in grad_op_desc.output_arg_names():
grad_var = block.desc.find_var(arg.encode("ascii"))
grad_var.set_dtype(core.VarDesc.VarType.FP32)
exe = fluid.Executor(place)
out = exe.run(program,
feed={
name: var_dict[name]
for name in ['x', 'scale', 'bias', 'y@GRAD']
},
fetch_list=[
'y', 'mean', 'variance', 'x@GRAD',
'scale@GRAD', 'bias@GRAD'
])
self.__assert_close(y, out[0], "y")
self.__assert_close(mean, out[1], "mean")
self.__assert_close(variance, out[2], "variance", 1e-3)
self.__assert_close(x_grad, out[3], "x_grad")
self.__assert_close(scale_grad, out[4], "scale_grad", 1e-3)
self.__assert_close(bias_grad, out[5], "bias_grad")
places = [core.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu(
"layer_norm") and self.use_cudnn:
places.append(core.CUDAPlace(0))
for place in places:
test_with_place(place, shape, begin_norm_axis)
def test_check_forward_backward_with_scale_and_bias(self):
self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=1)
self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3)
class TestLayerNormAPI(unittest.TestCase):
def test_case(self):
x = fluid.layers.data(
name='x',
shape=[64, 32, 256],
dtype='float32',
append_batch_size=False)
x = fluid.layers.layer_norm(
x,
scale=True,
shift=True,
begin_norm_axis=1,
epsilon=1e-05,
param_attr=None,
bias_attr=None)
x = fluid.layers.layer_norm(
x,
scale=False,
shift=False,
begin_norm_axis=1,
epsilon=1e-05,
param_attr=None,
bias_attr=None)
x = fluid.layers.layer_norm(
x,
scale=False,
shift=False,
begin_norm_axis=1,
epsilon=1e-05,
param_attr="scale",
bias_attr="shift")
if __name__ == '__main__':
unittest.main()
| 37.689815 | 80 | 0.549932 |
6a2230920e6b1ada69ea5f060fe9b304e57ab86c | 4,950 | py | Python | wagtailmedia/models.py | cca/wagtailmedia | 65bfb904a696185bd8eee003e19c74a20ea04827 | [
"BSD-3-Clause"
] | null | null | null | wagtailmedia/models.py | cca/wagtailmedia | 65bfb904a696185bd8eee003e19c74a20ea04827 | [
"BSD-3-Clause"
] | null | null | null | wagtailmedia/models.py | cca/wagtailmedia | 65bfb904a696185bd8eee003e19c74a20ea04827 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import mimetypes
import os.path
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import MinValueValidator
from django.db import models
from django.dispatch import Signal
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from wagtail import VERSION as WAGTAIL_VERSION
from wagtail.core.models import CollectionMember
from wagtail.search import index
from wagtail.search.queryset import SearchableQuerySetMixin
from taggit.managers import TaggableManager
from wagtailmedia.utils import convert_gif
if WAGTAIL_VERSION < (2, 9):
from wagtail.admin.utils import get_object_usage
else:
from wagtail.admin.models import get_object_usage
class MediaQuerySet(SearchableQuerySetMixin, models.QuerySet):
pass
class AbstractMedia(CollectionMember, index.Indexed, models.Model):
MEDIA_TYPES = (
("audio", _("Audio file")),
("video", _("Video file")),
)
title = models.CharField(max_length=255, verbose_name=_("title"))
file = models.FileField(upload_to="media", verbose_name=_("file"))
type = models.CharField(
choices=MEDIA_TYPES, max_length=255, blank=False, null=False
)
duration = models.FloatField(
blank=True,
default=0,
validators=[MinValueValidator(0)],
verbose_name=_("duration"),
help_text=_("Duration in seconds"),
)
width = models.PositiveIntegerField(null=True, blank=True, verbose_name=_("width"))
height = models.PositiveIntegerField(
null=True, blank=True, verbose_name=_("height")
)
thumbnail = models.FileField(
upload_to="media_thumbnails", blank=True, verbose_name=_("thumbnail")
)
created_at = models.DateTimeField(verbose_name=_("created at"), auto_now_add=True)
uploaded_by_user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("uploaded by user"),
null=True,
blank=True,
editable=False,
on_delete=models.SET_NULL,
)
tags = TaggableManager(help_text=None, blank=True, verbose_name=_("tags"))
objects = MediaQuerySet.as_manager()
search_fields = CollectionMember.search_fields + [
index.SearchField("title", partial_match=True, boost=10),
index.RelatedFields(
"tags",
[
index.SearchField("name", partial_match=True, boost=10),
],
),
index.FilterField("uploaded_by_user"),
]
def __str__(self):
return self.title
@property
def filename(self):
return os.path.basename(self.file.name)
@property
def thumbnail_filename(self):
return os.path.basename(self.thumbnail.name)
@property
def file_extension(self):
return os.path.splitext(self.filename)[1][1:]
@property
def url(self):
return self.file.url
@property
def sources(self):
return [
{
"src": self.url,
"type": mimetypes.guess_type(self.filename)[0]
or "application/octet-stream",
}
]
def get_usage(self):
return get_object_usage(self)
@property
def usage_url(self):
return reverse("wagtailmedia:media_usage", args=(self.id,))
def is_editable_by_user(self, user):
from wagtailmedia.permissions import permission_policy
return permission_policy.user_has_permission_for_instance(user, "change", self)
def clean(self, *args, **kwargs):
super().clean(*args, **kwargs)
if not self.duration:
self.duration = 0
class Meta:
abstract = True
verbose_name = _("media")
class Media(AbstractMedia):
admin_form_fields = (
"title",
"file",
"collection",
"duration",
"width",
"height",
"thumbnail",
"tags",
)
def save(self, *args, **kwargs):
# If the file is a gif, convert it an .mp4
if mimetypes.guess_type(self.filename)[0] == "image/gif":
convert_gif(self)
return super().save(*args, **kwargs)
def get_media_model():
from django.apps import apps
from django.conf import settings
try:
app_label, model_name = settings.WAGTAILMEDIA_MEDIA_MODEL.split(".")
except AttributeError:
return Media
except ValueError:
raise ImproperlyConfigured(
"WAGTAILMEDIA_MEDIA_MODEL must be of the form 'app_label.model_name'"
)
media_model = apps.get_model(app_label, model_name)
if media_model is None:
raise ImproperlyConfigured(
"WAGTAILMEDIA_MEDIA_MODEL refers to model '%s' that has not been installed"
% settings.WAGTAILMEDIA_MEDIA_MODEL
)
return media_model
# Provides `request` as an argument
media_served = Signal()
| 27.5 | 87 | 0.656162 |
1673a35b740cfe55b5f9f7a1849414a55b1d27f5 | 1,554 | py | Python | cloudify_rest_client/workflows.py | mistio/cloudify-common | 3b706ba31a3371052fbdd12486d4a0befbcf491b | [
"Apache-2.0"
] | 6 | 2018-10-13T20:36:40.000Z | 2021-07-04T17:19:13.000Z | cloudify_rest_client/workflows.py | mistio/cloudify-common | 3b706ba31a3371052fbdd12486d4a0befbcf491b | [
"Apache-2.0"
] | 97 | 2018-05-25T12:10:19.000Z | 2022-03-30T10:16:40.000Z | cloudify_rest_client/workflows.py | mistio/cloudify-common | 3b706ba31a3371052fbdd12486d4a0befbcf491b | [
"Apache-2.0"
] | 15 | 2018-10-13T20:36:42.000Z | 2021-09-06T15:19:11.000Z | from cloudify_rest_client.responses import ListResponse
class Workflow(dict):
def __init__(self, workflow):
super(Workflow, self).__init__()
self.update(workflow)
@property
def id(self):
return self['name']
@property
def name(self):
return self['name']
@property
def parameters(self):
return self['parameters']
@property
def operation(self):
return self['operation']
@property
def plugin(self):
return self['plugin']
class WorkflowsClient(object):
def __init__(self, api):
self.api = api
def list(self, filter_id=None, filter_rules=None, **kwargs):
"""
Returns a list of workflows.
:param filter_id: A filter ID to filter the deployments list by
:param filter_rules: A list of filter rules to filter the
deployments list by
:param kwargs: Optional filter fields. for a list of available fields
see the REST service's models.Deployment.fields
:return: Workflows list.
"""
params = kwargs
if filter_id:
params['_filter_id'] = filter_id
if filter_rules:
response = self.api.post('/searches/workflows', params=params,
data={'filter_rules': filter_rules})
else:
response = self.api.get('/workflows', params=params)
return ListResponse(
[Workflow(item) for item in response['items']],
response['metadata'])
| 26.338983 | 77 | 0.594595 |
7ec1767616e07f25a9f4fcc840c2c8ca6c1b06bd | 22,984 | py | Python | salt/pillar/__init__.py | aletourneau/salt | d7013a2f64eb4b79592220d76274bc5dde609e08 | [
"Apache-2.0"
] | 1 | 2020-10-02T02:29:25.000Z | 2020-10-02T02:29:25.000Z | salt/pillar/__init__.py | aletourneau/salt | d7013a2f64eb4b79592220d76274bc5dde609e08 | [
"Apache-2.0"
] | null | null | null | salt/pillar/__init__.py | aletourneau/salt | d7013a2f64eb4b79592220d76274bc5dde609e08 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Render the pillar data
'''
# Import python libs
import os
import collections
import logging
from copy import copy
# Import salt libs
import salt.loader
import salt.fileclient
import salt.minion
import salt.crypt
import salt.transport
from salt._compat import string_types
from salt.template import compile_template
from salt.utils.dictupdate import update
from salt.utils.serializers.yamlex import merge_recursive
from salt.utils.odict import OrderedDict
from salt.version import __version__
log = logging.getLogger(__name__)
def merge_recurse(obj_a, obj_b):
copied = copy(obj_a)
return update(copied, obj_b)
def merge_aggregate(obj_a, obj_b):
return merge_recursive(obj_a, obj_b, level=1)
def merge_overwrite(obj_a, obj_b):
for obj in obj_b:
if obj in obj_a:
obj_a[obj] = obj_b[obj]
return obj_a
return merge_recurse(obj_a, obj_b)
def get_pillar(opts, grains, id_, saltenv=None, ext=None, env=None, funcs=None):
'''
Return the correct pillar driver based on the file_client option
'''
if env is not None:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
# Backwards compatibility
saltenv = env
return {
'remote': RemotePillar,
'local': Pillar
}.get(opts['file_client'], Pillar)(opts, grains, id_, saltenv, ext, functions=funcs)
class RemotePillar(object):
'''
Get the pillar from the master
'''
def __init__(self, opts, grains, id_, saltenv, ext=None, functions=None):
self.opts = opts
self.opts['environment'] = saltenv
self.ext = ext
self.grains = grains
self.id_ = id_
self.serial = salt.payload.Serial(self.opts)
self.sreq = salt.transport.Channel.factory(opts)
# self.auth = salt.crypt.SAuth(opts)
def compile_pillar(self):
'''
Return the pillar data from the master
'''
load = {'id': self.id_,
'grains': self.grains,
'saltenv': self.opts['environment'],
'ver': '2',
'cmd': '_pillar'}
if self.ext:
load['ext'] = self.ext
# ret = self.sreq.send(load, tries=3, timeout=7200)
ret_pillar = self.sreq.crypted_transfer_decode_dictentry(load, dictkey='pillar', tries=3, timeout=7200)
# key = self.auth.get_keys()
# aes = key.private_decrypt(ret['key'], 4)
# pcrypt = salt.crypt.Crypticle(self.opts, aes)
# ret_pillar = pcrypt.loads(ret['pillar'])
if not isinstance(ret_pillar, dict):
log.error(
'Got a bad pillar from master, type {0}, expecting dict: '
'{1}'.format(type(ret_pillar).__name__, ret_pillar)
)
return {}
return ret_pillar
class Pillar(object):
'''
Read over the pillar top files and render the pillar data
'''
def __init__(self, opts, grains, id_, saltenv, ext=None, functions=None):
# Store the file_roots path so we can restore later. Issue 5449
self.actual_file_roots = opts['file_roots']
# use the local file client
self.opts = self.__gen_opts(opts, grains, id_, saltenv, ext)
self.client = salt.fileclient.get_file_client(self.opts, True)
if opts.get('file_client', '') == 'local':
opts['grains'] = grains
# if we didn't pass in functions, lets load them
if functions is None:
if opts.get('file_client', '') == 'local':
self.functions = salt.loader.minion_mods(opts)
else:
self.functions = salt.loader.minion_mods(self.opts)
else:
self.functions = functions
self.matcher = salt.minion.Matcher(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
# Fix self.opts['file_roots'] so that ext_pillars know the real
# location of file_roots. Issue 5951
ext_pillar_opts = dict(self.opts)
ext_pillar_opts['file_roots'] = self.actual_file_roots
self.merge_strategy = 'smart'
if opts.get('pillar_source_merging_strategy'):
self.merge_strategy = opts['pillar_source_merging_strategy']
self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions)
def __valid_ext(self, ext):
'''
Check to see if the on demand external pillar is allowed
'''
if not isinstance(ext, dict):
return {}
valid = set(('libvirt', 'virtkey'))
if any(key not in valid for key in ext):
return {}
return ext
def __gen_opts(self, opts_in, grains, id_, saltenv=None, ext=None, env=None):
'''
The options need to be altered to conform to the file client
'''
if env is not None:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
opts = dict(opts_in)
opts['file_roots'] = opts['pillar_roots']
opts['file_client'] = 'local'
if not grains:
opts['grains'] = {}
else:
opts['grains'] = grains
opts['id'] = id_
if 'environment' not in opts:
opts['environment'] = saltenv
if opts['state_top'].startswith('salt://'):
opts['state_top'] = opts['state_top']
elif opts['state_top'].startswith('/'):
opts['state_top'] = os.path.join('salt://', opts['state_top'][1:])
else:
opts['state_top'] = os.path.join('salt://', opts['state_top'])
if self.__valid_ext(ext):
if 'ext_pillar' in opts:
opts['ext_pillar'].append(ext)
else:
opts['ext_pillar'] = [ext]
return opts
def _get_envs(self):
'''
Pull the file server environments out of the master options
'''
envs = set(['base'])
if 'file_roots' in self.opts:
envs.update(list(self.opts['file_roots']))
return envs
def get_tops(self):
'''
Gather the top files
'''
tops = collections.defaultdict(list)
include = collections.defaultdict(list)
done = collections.defaultdict(list)
errors = []
# Gather initial top files
try:
if self.opts['environment']:
tops[self.opts['environment']] = [
compile_template(
self.client.cache_file(
self.opts['state_top'],
self.opts['environment']
),
self.rend,
self.opts['renderer'],
self.opts['environment']
)
]
else:
for saltenv in self._get_envs():
tops[saltenv].append(
compile_template(
self.client.cache_file(
self.opts['state_top'],
saltenv
),
self.rend,
self.opts['renderer'],
saltenv=saltenv
)
)
except Exception as exc:
errors.append(
('Rendering Primary Top file failed, render error:\n{0}'
.format(exc)))
# Search initial top files for includes
for saltenv, ctops in tops.items():
for ctop in ctops:
if 'include' not in ctop:
continue
for sls in ctop['include']:
include[saltenv].append(sls)
ctop.pop('include')
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in include.items():
pops.append(saltenv)
if not states:
continue
for sls in states:
if sls in done[saltenv]:
continue
try:
tops[saltenv].append(
compile_template(
self.client.get_state(
sls,
saltenv
).get('dest', False),
self.rend,
self.opts['renderer'],
saltenv=saltenv
)
)
except Exception as exc:
errors.append(
('Rendering Top file {0} failed, render error'
':\n{1}').format(sls, exc))
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops, errors
def merge_tops(self, tops):
'''
Cleanly merge the top files
'''
top = collections.defaultdict(OrderedDict)
orders = collections.defaultdict(OrderedDict)
for ctops in tops.itervalues():
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == 'include':
continue
for tgt in targets:
matches = []
states = OrderedDict()
orders[saltenv][tgt] = 0
for comp in ctop[saltenv][tgt]:
if isinstance(comp, dict):
if 'match' in comp:
matches.append(comp)
if 'order' in comp:
order = comp['order']
if not isinstance(order, int):
try:
order = int(order)
except ValueError:
order = 0
orders[saltenv][tgt] = order
if isinstance(comp, string_types):
states[comp] = True
top[saltenv][tgt] = matches
top[saltenv][tgt].extend(states)
return self.sort_top_targets(top, orders)
def sort_top_targets(self, top, orders):
'''
Returns the sorted high data from the merged top files
'''
sorted_top = collections.defaultdict(OrderedDict)
# pylint: disable=cell-var-from-loop
for saltenv, targets in top.items():
sorted_targets = sorted(targets,
key=lambda target: orders[saltenv][target])
for target in sorted_targets:
sorted_top[saltenv][target] = targets[target]
# pylint: enable=cell-var-from-loop
return sorted_top
def get_top(self):
'''
Returns the high data derived from the top file
'''
tops, errors = self.get_tops()
try:
merged_tops = self.merge_tops(tops)
except TypeError as err:
merged_tops = OrderedDict()
errors.append('Error encountered while render pillar top file.')
return merged_tops, errors
def top_matches(self, top):
'''
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
'''
matches = {}
for saltenv, body in top.items():
if self.opts['environment']:
if saltenv != self.opts['environment']:
continue
for match, data in body.items():
if self.matcher.confirm_top(
match,
data,
self.opts.get('nodegroups', {}),
):
if saltenv not in matches:
matches[saltenv] = []
for item in data:
if isinstance(item, string_types):
matches[saltenv].append(item)
return matches
def render_pstate(self, sls, saltenv, mods, defaults=None):
'''
Collect a single pillar sls file and render it
'''
if defaults is None:
defaults = {}
err = ''
errors = []
fn_ = self.client.get_state(sls, saltenv).get('dest', False)
if not fn_:
if self.opts['pillar_roots'].get(saltenv):
msg = ('Specified SLS {0!r} in environment {1!r} is not'
' available on the salt master').format(sls, saltenv)
log.error(msg)
errors.append(msg)
else:
log.debug('Specified SLS {0!r} in environment {1!r} is not'
' found, which might be due to environment {1!r}'
' not being present in "pillar_roots" yet!'
.format(sls, saltenv))
# return state, mods, errors
return None, mods, errors
state = None
try:
state = compile_template(
fn_, self.rend, self.opts['renderer'], saltenv, sls, _pillar_rend=True, **defaults)
except Exception as exc:
msg = 'Rendering SLS {0!r} failed, render error:\n{1}'.format(
sls, exc
)
log.critical(msg)
errors.append('Rendering SLS \'{0}\' failed. Please see master log for details.'.format(sls))
mods.add(sls)
nstate = None
if state:
if not isinstance(state, dict):
msg = 'SLS {0!r} does not render to a dictionary'.format(sls)
log.error(msg)
errors.append(msg)
else:
if 'include' in state:
if not isinstance(state['include'], list):
msg = ('Include Declaration in SLS {0!r} is not '
'formed as a list'.format(sls))
log.error(msg)
errors.append(msg)
else:
for sub_sls in state.pop('include'):
if isinstance(sub_sls, dict):
sub_sls, v = sub_sls.iteritems().next()
defaults = v.get('defaults', {})
key = v.get('key', None)
else:
key = None
if sub_sls not in mods:
nstate, mods, err = self.render_pstate(
sub_sls,
saltenv,
mods,
defaults
)
if nstate:
if key:
nstate = {
key: nstate
}
state = self.merge_sources(state, nstate)
if err:
errors += err
return state, mods, errors
def render_pillar(self, matches):
'''
Extract the sls pillar files from the matches and render them into the
pillar
'''
pillar = {}
errors = []
for saltenv, pstates in matches.items():
mods = set()
for sls in pstates:
pstate, mods, err = self.render_pstate(sls, saltenv, mods)
if err:
errors += err
if pstate is not None:
if not isinstance(pstate, dict):
log.error(
'The rendered pillar sls file, {0!r} state did '
'not return the expected data format. This is '
'a sign of a malformed pillar sls file. Returned '
'errors: {1}'.format(
sls,
', '.join(['{0!r}'.format(e) for e in errors])
)
)
continue
pillar = self.merge_sources(pillar, pstate)
return pillar, errors
def _external_pillar_data(self,
pillar,
val,
pillar_dirs,
key):
'''
Builds actual pillar data structure
and update
the variable ``pillar``
'''
ext = None
# try the new interface, which includes the minion ID
# as first argument
if isinstance(val, dict):
ext = self.ext_pillars[key](self.opts['id'], pillar, **val)
elif isinstance(val, list):
ext = self.ext_pillars[key](self.opts['id'], pillar, *val)
else:
if key == 'git':
ext = self.ext_pillars[key](self.opts['id'],
val,
pillar_dirs)
else:
ext = self.ext_pillars[key](self.opts['id'],
pillar,
val)
return ext
def ext_pillar(self, pillar, pillar_dirs):
'''
Render the external pillar data
'''
if 'ext_pillar' not in self.opts:
return pillar
if not isinstance(self.opts['ext_pillar'], list):
log.critical('The "ext_pillar" option is malformed')
return pillar
ext = None
for run in self.opts['ext_pillar']:
if not isinstance(run, dict):
log.critical('The "ext_pillar" option is malformed')
return {}
for key, val in run.items():
if key not in self.ext_pillars:
err = ('Specified ext_pillar interface {0} is '
'unavailable').format(key)
log.critical(err)
continue
try:
try:
ext = self._external_pillar_data(pillar,
val,
pillar_dirs,
key)
except TypeError as exc:
if str(exc).startswith('ext_pillar() takes exactly '):
log.warning('Deprecation warning: ext_pillar "{0}"'
' needs to accept minion_id as first'
' argument'.format(key))
else:
raise
ext = self._external_pillar_data(pillar,
val,
pillar_dirs,
key)
except Exception as exc:
log.exception(
'Failed to load ext_pillar {0}: {1}'.format(
key,
exc
)
)
if ext:
pillar = self.merge_sources(pillar, ext)
ext = None
return pillar
def merge_sources(self, obj_a, obj_b):
strategy = self.merge_strategy
if strategy == 'smart':
renderer = self.opts.get('renderer', 'yaml')
if renderer == 'yamlex' or renderer.startswith('yamlex_'):
strategy = 'aggregate'
else:
strategy = 'recurse'
if strategy == 'recurse':
merged = merge_recurse(obj_a, obj_b)
elif strategy == 'aggregate':
#: level = 1 merge at least root data
merged = merge_aggregate(obj_a, obj_b)
elif strategy == 'overwrite':
merged = merge_overwrite(obj_a, obj_b)
else:
log.warning('unknown merging strategy {0}, '
'fallback to recurse'.format(strategy))
merged = merge_recurse(obj_a, obj_b)
return merged
def compile_pillar(self, ext=True, pillar_dirs=None):
'''
Render the pillar data and return
'''
top, terrors = self.get_top()
if ext:
if self.opts.get('ext_pillar_first', False):
self.opts['pillar'] = self.ext_pillar({}, pillar_dirs)
matches = self.top_matches(top)
pillar, errors = self.render_pillar(matches)
pillar = self.merge_sources(pillar, self.opts['pillar'])
else:
matches = self.top_matches(top)
pillar, errors = self.render_pillar(matches)
pillar = self.ext_pillar(pillar, pillar_dirs)
else:
matches = self.top_matches(top)
pillar, errors = self.render_pillar(matches)
errors.extend(terrors)
if self.opts.get('pillar_opts', True):
mopts = dict(self.opts)
if 'grains' in mopts:
mopts.pop('grains')
if 'aes' in mopts:
mopts.pop('aes')
# Restore the actual file_roots path. Issue 5449
mopts['file_roots'] = self.actual_file_roots
mopts['saltversion'] = __version__
pillar['master'] = mopts
if errors:
for error in errors:
log.critical('Pillar render error: {0}'.format(error))
pillar['_errors'] = errors
return pillar
| 37.927393 | 111 | 0.464976 |
fda20152febf7e9f69d3fd7c0f0e08be783c0fdc | 1,324 | py | Python | clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_utils.py | alexandreroutier/clinica | 66625c65e74962db7d5cea267d1a0e51d774bf91 | [
"MIT"
] | 1 | 2020-12-14T06:07:14.000Z | 2020-12-14T06:07:14.000Z | clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_utils.py | alexandreroutier/clinica | 66625c65e74962db7d5cea267d1a0e51d774bf91 | [
"MIT"
] | null | null | null | clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_utils.py | alexandreroutier/clinica | 66625c65e74962db7d5cea267d1a0e51d774bf91 | [
"MIT"
] | null | null | null | # coding: utf8
def atlas_statistics(in_image, atlas_list):
"""
For each atlas name provided it calculates for the input image the mean for each region in the atlas and saves it to a TSV file.
Args:
in_image: A Nifti image
atlas_list: List of names of atlas to be applied
Returns:
List of paths to TSV files
"""
from os.path import abspath, join
from nipype.utils.filemanip import split_filename
from clinica.utils.atlas import AtlasAbstract
from clinica.utils.statistics import statistics_on_atlas
from clinica.utils.filemanip import get_subject_id
from clinica.utils.ux import print_end_image
subject_id = get_subject_id(in_image)
orig_dir, base, ext = split_filename(in_image)
atlas_classes = AtlasAbstract.__subclasses__()
atlas_statistics_list = []
for atlas in atlas_list:
for atlas_class in atlas_classes:
if atlas_class.get_name_atlas() == atlas:
out_atlas_statistics = abspath(
join('./' + base + '_space-' + atlas + '_map-graymatter_statistics.tsv'))
statistics_on_atlas(in_image, atlas_class(), out_atlas_statistics)
atlas_statistics_list.append(out_atlas_statistics)
print_end_image(subject_id)
return atlas_statistics_list
| 37.828571 | 132 | 0.700151 |
f36605d5229679e139e8aebfb9a9ab10363b5041 | 1,844 | py | Python | src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareBpCrimsonContentConfig/cli/equal/golden_output_expected.py | ykoehler/genieparser | b62cf622c3d8eab77c7b69e932c214ed04a2565a | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareBpCrimsonContentConfig/cli/equal/golden_output_expected.py | ykoehler/genieparser | b62cf622c3d8eab77c7b69e932c214ed04a2565a | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareBpCrimsonContentConfig/cli/equal/golden_output_expected.py | ykoehler/genieparser | b62cf622c3d8eab77c7b69e932c214ed04a2565a | [
"Apache-2.0"
] | null | null | null | expected_output = {
'node': {
1: {
'priority': 1,
'domain': 1,
'mode': 'Aggregation',
'router_id': '0.0.0.0',
'configured_svl_links': {
'link_id': 1
},
'configured_svl_ports': {
'FourHundredGigE1/0/19': {
'link': 1,
'slot_bay_port': '1:0:19'
},
'FourHundredGigE1/0/20': {
'link': 1,
'slot_bay_port': '1:0:20'
},
'FourHundredGigE1/0/22': {
'link': 1,
'slot_bay_port': '1:0:22'
}
},
'configured_svl_dual_active_detection_ports': {
'FourHundredGigE1/0/15': {
'slot_bay_port': '1:0:15'
},
'FourHundredGigE1/0/17': {
'slot_bay_port': '1:0:17'
}
}
},
2: {
'priority': 1,
'domain': 1,
'mode': 'Aggregation',
'router_id': '0.0.0.0',
'configured_svl_links': {
'link_id': 1
},
'configured_svl_ports': {
'FourHundredGigE2/0/19': {
'link': 1,
'slot_bay_port': '1:0:19'
},
'FourHundredGigE2/0/20': {
'link': 1,
'slot_bay_port': '1:0:20'
},
'FourHundredGigE2/0/22': {
'link': 1,
'slot_bay_port': '1:0:22'
}
},
'configured_svl_dual_active_detection_ports': {
'FourHundredGigE2/0/16': {
'slot_bay_port': '1:0:16'
},
'FourHundredGigE2/0/18': {
'slot_bay_port': '1:0:18'
}
}
}
}
}
| 27.522388 | 55 | 0.368221 |
3d981ab5ae2f7ffa2d1b8db8df6c5fce8768a7c2 | 10,103 | py | Python | opsdroid/connector/telegram/__init__.py | ceccs17d45/opsdroid | 0c7e0c0d34d242bf765639413fb8294e2e3fd232 | [
"Apache-2.0"
] | null | null | null | opsdroid/connector/telegram/__init__.py | ceccs17d45/opsdroid | 0c7e0c0d34d242bf765639413fb8294e2e3fd232 | [
"Apache-2.0"
] | null | null | null | opsdroid/connector/telegram/__init__.py | ceccs17d45/opsdroid | 0c7e0c0d34d242bf765639413fb8294e2e3fd232 | [
"Apache-2.0"
] | null | null | null | """A connector for Telegram."""
import asyncio
import logging
import aiohttp
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message, Image
_LOGGER = logging.getLogger(__name__)
class ConnectorTelegram(Connector):
"""A connector the the char service Telegram."""
def __init__(self, config, opsdroid=None):
"""Create the connector.
Args:
config (dict): configuration settings from the
file config.yaml.
"""
_LOGGER.debug("Loaded telegram connector")
super().__init__(config, opsdroid=opsdroid)
self.name = "telegram"
self.opsdroid = opsdroid
self.latest_update = None
self.default_target = None
self.listening = True
self.default_user = config.get("default-user", None)
self.whitelisted_users = config.get("whitelisted-users", None)
self.update_interval = config.get("update-interval", 1)
self.session = None
self._closing = asyncio.Event()
self.loop = asyncio.get_event_loop()
try:
self.token = config["token"]
except (KeyError, AttributeError):
_LOGGER.error(
"Unable to login: Access token is missing. "
"Telegram connector will be unavailable."
)
@staticmethod
def get_user(response):
"""Get user from response.
The API response is different depending on how
the bot is set up and where the message is coming
from. This method was created to keep if/else
statements to a minium on _parse_message.
Args:
response (dict): Response returned by aiohttp.ClientSession.
"""
user = None
if "username" in response["message"]["from"]:
user = response["message"]["from"]["username"]
elif "first_name" in response["message"]["from"]:
user = response["message"]["from"]["first_name"]
return user
def handle_user_permission(self, response, user):
"""Handle user permissions.
This will check if the user that tried to talk with
the bot is allowed to do so. It will also work with
userid to improve security.
"""
user_id = response["message"]["from"]["id"]
if (
not self.whitelisted_users
or user in self.whitelisted_users
or user_id in self.whitelisted_users
):
return True
return False
def build_url(self, method):
"""Build the url to connect to the API.
Args:
method (string): API call end point.
Return:
String that represents the full API url.
"""
return "https://api.telegram.org/bot{}/{}".format(self.token, method)
async def delete_webhook(self):
"""Delete Telegram webhook.
The Telegram api will thrown an 409 error when an webhook is
active and a call to getUpdates is made. This method will
try to request the deletion of the webhook to make the getUpdate
request possible.
"""
_LOGGER.debug("Sending deleteWebhook request to Telegram...")
resp = await self.session.get(self.build_url("deleteWebhook"))
if resp.status == 200:
_LOGGER.debug("Telegram webhook deleted successfully.")
else:
_LOGGER.debug("Unable to delete webhook.")
async def connect(self):
"""Connect to Telegram.
This method is not an authorization call. It basically
checks if the API token was provided and makes an API
call to Telegram and evaluates the status of the call.
"""
_LOGGER.debug("Connecting to telegram")
self.session = aiohttp.ClientSession()
resp = await self.session.get(self.build_url("getMe"))
if resp.status != 200:
_LOGGER.error("Unable to connect")
_LOGGER.error("Telegram error %s, %s", resp.status, resp.text)
else:
json = await resp.json()
_LOGGER.debug(json)
_LOGGER.debug("Connected to telegram as %s", json["result"]["username"])
async def _parse_message(self, response):
"""Handle logic to parse a received message.
Since everyone can send a private message to any user/bot
in Telegram, this method allows to set a list of whitelisted
users that can interact with the bot. If any other user tries
to interact with the bot the command is not parsed and instead
the bot will inform that user that he is not allowed to talk
with the bot.
We also set self.latest_update to +1 in order to get the next
available message (or an empty {} if no message has been received
yet) with the method self._get_messages().
Args:
response (dict): Response returned by aiohttp.ClientSession.
"""
for result in response["result"]:
_LOGGER.debug(result)
if result.get("edited_message", None):
result["message"] = result.pop("edited_message")
if "channel" in result["message"]["chat"]["type"]:
_LOGGER.debug(
"Channel message parsing not supported " "- Ignoring message"
)
elif "message" in result and "text" in result["message"]:
user = self.get_user(result)
message = Message(
result["message"]["text"], user, result["message"]["chat"], self
)
if self.handle_user_permission(result, user):
await self.opsdroid.parse(message)
else:
message.text = (
"Sorry, you're not allowed " "to speak with this bot."
)
await self.send(message)
self.latest_update = result["update_id"] + 1
else:
_LOGGER.error("Unable to parse the message.")
async def _get_messages(self):
"""Connect to the Telegram API.
Uses an aiohttp ClientSession to connect to Telegram API
and get the latest messages from the chat service.
The data["offset"] is used to consume every new message, the API
returns an int - "update_id" value. In order to get the next
message this value needs to be increased by 1 the next time
the API is called. If no new messages exists the API will just
return an empty {}.
"""
data = {}
if self.latest_update is not None:
data["offset"] = self.latest_update
await asyncio.sleep(self.update_interval)
resp = await self.session.get(self.build_url("getUpdates"), params=data)
if resp.status == 409:
_LOGGER.info(
"Can't get updates because previous "
"webhook is still active. Will try to "
"delete webhook."
)
await self.delete_webhook()
if resp.status != 200:
_LOGGER.error("Telegram error %s, %s", resp.status, resp.text)
self.listening = False
else:
json = await resp.json()
await self._parse_message(json)
async def get_messages_loop(self):
"""Listen for and parse new messages.
The bot will always listen to all opened chat windows,
as long as opsdroid is running. Since anyone can start
a new chat with the bot is recommended that a list of
users to be whitelisted be provided in config.yaml.
The method will sleep asynchronously at the end of
every loop. The time can either be specified in the
config.yaml with the param update-interval - this
defaults to 1 second.
"""
while self.listening:
await self._get_messages()
async def listen(self):
"""Listen method of the connector.
Every connector has to implement the listen method. When an
infinite loop is running, it becomes hard to cancel this task.
So we are creating a task and set it on a variable so we can
cancel the task.
"""
message_getter = self.loop.create_task(self.get_messages_loop())
await self._closing.wait()
message_getter.cancel()
@register_event(Message)
async def send_message(self, message):
"""Respond with a message.
Args:
message (object): An instance of Message.
"""
_LOGGER.debug("Responding with: %s", message.text)
data = dict()
data["chat_id"] = message.target["id"]
data["text"] = message.text
resp = await self.session.post(self.build_url("sendMessage"), data=data)
if resp.status == 200:
_LOGGER.debug("Successfully responded")
else:
_LOGGER.error("Unable to respond.")
@register_event(Image)
async def send_image(self, file_event):
"""Send Image to Telegram.
Gets the chat id from the channel and then
sends the bytes of the image as multipart/form-data.
"""
data = aiohttp.FormData()
data.add_field(
"chat_id", str(file_event.target["id"]), content_type="multipart/form-data"
)
data.add_field(
"photo",
await file_event.get_file_bytes(),
content_type="multipart/form-data",
)
resp = await self.session.post(self.build_url("sendPhoto"), data=data)
if resp.status == 200:
_LOGGER.debug("Sent %s image " "successfully", file_event.name)
else:
_LOGGER.debug("Unable to send image - " "Status Code %s", resp.status)
async def disconnect(self):
"""Disconnect from Telegram.
Stops the infinite loop found in self._listen(), closes
aiohttp session.
"""
self.listening = False
self._closing.set()
await self.session.close()
| 33.902685 | 87 | 0.596456 |
f0789953fd7b7569117f720bd1f889de835c098d | 6,779 | py | Python | pychron/lasers/tasks/laser_actions.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/lasers/tasks/laser_actions.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/lasers/tasks/laser_actions.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# from traits.api import HasTraits
# from traitsui.api import View, Item
from __future__ import absolute_import
from pyface.action.action import Action
from pyface.tasks.action.task_action import TaskAction
from pychron.envisage.view_util import open_view
from pychron.lasers.laser_managers.ilaser_manager import ILaserManager
from pychron.lasers.laser_managers.pychron_laser_manager import PychronLaserManager
from pychron.lasers.pattern.pattern_maker_view import PatternMakerView
class BaseLaserAction(Action):
manager_name = None
manager = None
def _get_manager(self, event, app=None):
if self.manager is not None:
manager = self.manager
else:
if app is None:
app = event.task.window.application
manager = app.get_service(
ILaserManager, 'name=="{}"'.format(self.manager_name)
)
return manager
class LocalLaserAction(BaseLaserAction):
client_action = False
def __init__(self, manager, *args, **kw):
super(LocalLaserAction, self).__init__(*args, **kw)
# man = self._get_manager(None, app=self.window.application)
if isinstance(manager, PychronLaserManager) and not self.client_action:
self.enabled = False
self.manager = manager
# class ExecutePatternAction(LocalLaserAction):
# name = 'Execute Pattern'
#
# def perform(self, event):
# manager = self._get_manager(event)
# if manager is not None:
# manager.execute_pattern()
#
# class ExecuteAndLasePatternAction(LocalLaserAction):
# name = 'Execute Pattern and Lase'
#
# def perform(self, event):
# manager = self._get_manager(event)
# if manager is not None:
# manager.execute_pattern(lase=True)
class OpenScannerAction(LocalLaserAction):
name = "Open Scanner..."
accelerator = "Ctrl+T"
def perform(self, event):
manager = self._get_manager(event)
if manager is not None:
manager.open_scanner()
class OpenAutoTunerAction(LocalLaserAction):
name = "Open AutoTuner..."
# accelerator = 'Ctrl+T'
def perform(self, event):
manager = self._get_manager(event)
if manager is not None:
manager.open_autotuner()
class LaserTaskAction(TaskAction):
# def perform(self, event=None):
# app = self.task.window.application
# method = self._get_attr(self.object, self.method)
# if method:
# method()
# else:
# for i in ('pychron.fusions.co2', 'pychron.fusions.diode'):
# task = app.get_task(i, activate=False)
# method = self._get_attr(task, self.method)
# if method:
# method()
# break
_enabled = None
def _task_changed(self):
if self.task:
if self.task.id in ("pychron.fusions.co2", "pychron.fusions.diode"):
enabled = True
if self.enabled_name:
if self.object:
enabled = bool(
self._get_attr(self.object, self.enabled_name, False)
)
if enabled:
self._enabled = True
else:
self._enabled = False
def _enabled_update(self):
"""
reimplement ListeningAction's _enabled_update
"""
if self.enabled_name:
if self.object:
self.enabled = bool(
self._get_attr(self.object, self.enabled_name, False)
)
else:
self.enabled = False
elif self._enabled is not None:
self.enabled = self._enabled
else:
self.enabled = bool(self.object)
# class TestDegasAction(LaserTaskAction):
# name = 'Test Degas...'
# method = 'test_degas'
class OpenPatternAction(Action):
name = "Open Pattern..."
def perform(self, event=None):
pm = PatternMakerView()
if pm.load_pattern():
open_view(pm)
class NewPatternAction(Action):
name = "New Pattern..."
method = "new_pattern"
def perform(self, event=None):
pm = PatternMakerView()
open_view(pm)
class LaserCalibrationAction(Action):
def _get_task(self, event):
app = event.task.window.application
task_id = "pychron.laser.calibration"
task = app.get_task(task_id)
return task
class PowerMapAction(LaserCalibrationAction):
name = "New Power Map..."
def perform(self, event):
task = self._get_task(event)
task.new_power_map()
class OpenPowerMapAction(LaserCalibrationAction):
name = "Open Power Map"
accelerator = "Ctrl+3"
def perform(self, event):
app = event.task.window.application
task_id = "pychron.laser.calibration"
task = app.get_task(task_id, activate=False)
ps = task.get_power_maps()
if ps:
if task.window.control.isvisible():
task.window.control.raise_()
else:
task.window.open()
task.open_power_maps(ps)
class PowerCalibrationAction(LaserCalibrationAction):
name = "Power Calibration..."
def perform(self, event):
task = self._get_task(event)
task.new_power_calibration()
class PyrometerCalibrationAction(LaserCalibrationAction):
name = "Pyrometer Calibration"
def perform(self, event):
task = self._get_task(event)
task.new_pyrometer_calibration()
class PIDTuningAction(LaserCalibrationAction):
name = "PID Tuning"
def perform(self, event):
task = self._get_task(event)
task.new_pid_tuner()
class LaserScriptExecuteAction(TaskAction):
method = "show_laser_script_executor"
name = "Laser Script..."
# ============= EOF =============================================
| 28.970085 | 83 | 0.603186 |
eb84166e1c712596140d83d3510bf9bc8ae6e66c | 4,622 | py | Python | src/infi/mount_utils/linux/mount/repository.py | Infinidat/mount-utils | e4e08e21c278a2391494a7b9d9d7efdaeb393a9c | [
"BSD-3-Clause"
] | null | null | null | src/infi/mount_utils/linux/mount/repository.py | Infinidat/mount-utils | e4e08e21c278a2391494a7b9d9d7efdaeb393a9c | [
"BSD-3-Clause"
] | null | null | null | src/infi/mount_utils/linux/mount/repository.py | Infinidat/mount-utils | e4e08e21c278a2391494a7b9d9d7efdaeb393a9c | [
"BSD-3-Clause"
] | null | null | null | import platform
import re
from munch import Munch
from ...base.mount import MountRepositoryMixin
from logging import getLogger
from collections import OrderedDict
log = getLogger()
class LinuxMountRepositoryMixin(MountRepositoryMixin):
def _read_fstab(self):
return self._read_file("/etc/fstab")
def _read_mtab(self):
return self._read_file("/etc/mtab")
def _read_utab(self):
return self._read_file("/run/mount/utab")
def _parse_options_for_entry(self, entry):
string = entry["opts"]
results = OrderedDict()
pattern = re.compile(OPTION_PATTERN)
for match in pattern.finditer(string):
key = match.groupdict().get("key")
value = match.groupdict().get("value")
results[key] = self._translate_value(value)
entry["opts"] = results
return results
def _get_entries_dict_from_utab(self):
pattern = re.compile(UTAB_ENTRY_PATTERN_LINUX, re.MULTILINE)
string = self._read_utab()
log.debug(u"utab content = \n{}".format(string))
results = dict([(match.groups()[0], self._parse_options_for_entry(match.groupdict())) for match in
pattern.finditer(string)])
return results
def _canonicalize_path(self, path):
# HPT-2164 sometimes mtab contains devices in their "dm" names which can change between boots.
# 'mount' command handles this by canonicalizing the paths and turning them into the real (/dev/mapper) names:
# https://git.kernel.org/pub/scm/utils/util-linux/util-linux.git/tree/lib/canonicalize.c (canonicalize_path)
import os
import stat
if not os.path.exists(path):
return path
canonical = os.path.abspath(os.path.realpath(path))
if "/" in canonical:
part = canonical.rsplit("/", 1)[1]
if part.startswith("dm-") and part[3:].isdigit() and stat.S_ISBLK(os.stat(canonical).st_mode):
with open("/sys/block/{}/dm/name".format(part), "r") as fd:
name = fd.read().strip()
return "/dev/mapper/" + name
return canonical
def _get_list_of_groupdicts_from_mtab(self):
pattern = re.compile(MOUNT_ENTRY_PATTERN_LINUX, re.MULTILINE)
string = self._read_mtab()
log.debug(u"mtab content = \n{}".format(string))
utab_results = self._get_entries_dict_from_utab()
mtab_results = [match.groupdict() for match in pattern.finditer(string)]
mtab_results = self._parse_options_in_entries(mtab_results)
for mtab_result in mtab_results:
fsname = mtab_result['fsname']
if fsname in utab_results:
mtab_result['opts'].update(utab_results[fsname])
mtab_result['fsname'] = self._canonicalize_path(mtab_result['fsname'])
return mtab_results
def _get_list_of_groupdicts_from_fstab(self):
pattern = re.compile(MOUNT_ENTRY_PATTERN_LINUX, re.MULTILINE)
string = self._read_fstab()
log.debug(u"fstab content = \n{}".format(string))
results = [match.groupdict() for match in pattern.finditer(string)]
return self._parse_options_in_entries(results)
WORD_PATTERN = r"[^# \t\n\r\f\v]+"
FSNAME_PATTERN = r"(?P<fsname>{})".format(WORD_PATTERN)
DIRNAME_PATTERN = r"(?P<dirname>{})".format(WORD_PATTERN)
TYPNAME_PATTERN = r"(?P<typename>{})".format(WORD_PATTERN)
STRING_PATTERN = r"[^,=# \t\n\r\f\v]+"
OPTION_PATTERN = r"(?P<key>{})(?:=(?P<value>{}))?".format(STRING_PATTERN, STRING_PATTERN)
OPTS_PATTERN = r"(?P<opts>{})".format(WORD_PATTERN)
FREQ_PATTERN = r"(?P<freq>\d*)"
PASSNO_PATTERN = r"(?P<passno>[\-\d]*)"
SEP = r"[ \t]+"
MOUNT_ENTRY_PATTERN_LINUX = r"^{fsname}{sep}{dirname}{sep}{typename}{sep}{opts}{sep}{freq}{sep}{passno}$".format(
sep=SEP,
fsname=FSNAME_PATTERN,
dirname=DIRNAME_PATTERN,
typename=TYPNAME_PATTERN,
opts=OPTS_PATTERN,
freq=FREQ_PATTERN,
passno=PASSNO_PATTERN)
UTAB_ENTRY_PATTERN_LINUX = r"^SRC={fsname}{sep}TARGET={dirname}{sep}ROOT=/{sep}OPTS={opts}$".format(
sep=SEP,
fsname=FSNAME_PATTERN,
dirname=DIRNAME_PATTERN,
opts=OPTS_PATTERN)
| 44.442308 | 118 | 0.596928 |
a6efd51e4eaa2fc711fc3ba4ee57c110d4942d6a | 1,448 | py | Python | src/day15.py | chipturner/advent-of-code-2021 | 52d8f84eb9243fa076c9f7c2a2e3836e138ab127 | [
"Apache-2.0"
] | null | null | null | src/day15.py | chipturner/advent-of-code-2021 | 52d8f84eb9243fa076c9f7c2a2e3836e138ab127 | [
"Apache-2.0"
] | null | null | null | src/day15.py | chipturner/advent-of-code-2021 | 52d8f84eb9243fa076c9f7c2a2e3836e138ab127 | [
"Apache-2.0"
] | null | null | null | import numpy
import helpers
import itertools
import collections
import heapq
bignum = 1000000000
def main() -> None:
g = helpers.read_input_digit_grid(int)
base_graph = helpers.read_input_digit_grid(int)
g = numpy.zeros((base_graph.shape[0] * 5, base_graph.shape[1] * 5), dtype=int)
numpy.set_printoptions(threshold=10000)
numpy.set_printoptions(linewidth=numpy.inf)
for m in range(5):
for n in range(5):
g[m * base_graph.shape[0]:(m+1) * base_graph.shape[0], n * base_graph.shape[0]:(n+1) * base_graph.shape[0]] = (base_graph + m + n + 8) % 9 + 1
graph = collections.defaultdict(set)
costs = dict()
for i in range(g.shape[0]):
for j in range(g.shape[1]):
for n in helpers.neighbors(g, i, j):
graph[(i, j)].add(n)
graph[n].add((i, j))
costs[(i, j)] = bignum
visited = set()
costs[(0, 0)] = 0
pq = []
pq_seen = set()
heapq.heappush(pq, (0, (0, 0)))
while pq:
cost, pos = heapq.heappop(pq)
for neigh in helpers.neighbors(g, *pos):
if neigh in visited:
continue
costs[neigh] = min(costs[neigh], g[neigh] + costs[pos])
entry = (costs[neigh], neigh)
if entry not in pq_seen:
pq_seen.add(entry)
heapq.heappush(pq, entry)
visited.add(pos)
print(costs)
main()
| 28.96 | 154 | 0.55663 |
59564e5bb62e6b35c300c2182de43659ef531775 | 41,602 | py | Python | test/aaa_profiling/test_memusage.py | DylanModesitt/sqlalchemy | ce85681050500186678131f948b6ea277a65dc17 | [
"MIT"
] | 6 | 2019-02-18T12:42:44.000Z | 2020-11-11T23:10:17.000Z | test/aaa_profiling/test_memusage.py | 565407548/sqlalchemy | 53af60b3536221f2503af29c1e90cf9db1295faf | [
"MIT"
] | null | null | null | test/aaa_profiling/test_memusage.py | 565407548/sqlalchemy | 53af60b3536221f2503af29c1e90cf9db1295faf | [
"MIT"
] | 2 | 2016-11-02T04:59:02.000Z | 2019-05-11T06:01:30.000Z | import decimal
import gc
import itertools
import multiprocessing
import weakref
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Unicode
from sqlalchemy import util
from sqlalchemy.orm import aliased
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import create_session
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import Load
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.mapper import _mapper_registry
from sqlalchemy.orm.session import _sessions
from sqlalchemy.processors import to_decimal_processor_factory
from sqlalchemy.processors import to_unicode_processor_factory
from sqlalchemy.sql import column
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql.visitors import cloned_traverse
from sqlalchemy.sql.visitors import replacement_traverse
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from ..orm import _fixtures
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
class ASub(A):
pass
def assert_cycles(expected=0):
def decorate(fn):
def go():
fn() # warmup, configure mappers, caches, etc.
gc_collect()
gc_collect()
gc_collect() # multiple calls seem to matter
# gc.set_debug(gc.DEBUG_COLLECTABLE)
try:
return fn() # run for real
finally:
unreachable = gc_collect()
assert unreachable <= expected
gc_collect()
return go
return decorate
def profile_memory(
maxtimes=250, assert_no_sessions=True, get_num_objects=None
):
def decorate(func):
# run the test N times. if length of gc.get_objects()
# keeps growing, assert false
def get_objects_skipping_sqlite_issue():
# pysqlite keeps adding weakref objects which only
# get reset after 220 iterations. We'd like to keep these
# tests under 50 iterations and ideally about ten, so
# just filter them out so that we get a "flatline" more quickly.
if testing.against("sqlite+pysqlite"):
return [
o
for o in gc.get_objects()
if not isinstance(o, weakref.ref)
]
else:
return gc.get_objects()
def profile(queue, func_args):
# give testing.db a brand new pool and don't
# touch the existing pool, since closing a socket
# in the subprocess can affect the parent
testing.db.pool = testing.db.pool.recreate()
gc_collect()
samples = []
max_ = 0
max_grew_for = 0
success = False
until_maxtimes = 0
while True:
if until_maxtimes >= maxtimes // 5:
break
for x in range(5):
try:
func(*func_args)
except Exception as err:
queue.put(
(
"result",
False,
"Test raised an exception: %r" % err,
)
)
raise
gc_collect()
samples.append(
get_num_objects()
if get_num_objects is not None
else len(get_objects_skipping_sqlite_issue())
)
if assert_no_sessions:
assert len(_sessions) == 0
# queue.put(('samples', samples))
latest_max = max(samples[-5:])
if latest_max > max_:
queue.put(
(
"status",
"Max grew from %s to %s, max has "
"grown for %s samples"
% (max_, latest_max, max_grew_for),
)
)
max_ = latest_max
max_grew_for += 1
until_maxtimes += 1
continue
else:
queue.put(
(
"status",
"Max remained at %s, %s more attempts left"
% (max_, max_grew_for),
)
)
max_grew_for -= 1
if max_grew_for == 0:
success = True
break
if not success:
queue.put(
(
"result",
False,
"Ran for a total of %d times, memory kept "
"growing: %r" % (maxtimes, samples),
)
)
else:
queue.put(("result", True, "success"))
def run_in_process(*func_args):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=profile, args=(queue, func_args)
)
proc.start()
while True:
row = queue.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
proc.join()
assert row[1], row[2]
return run_in_process
return decorate
def assert_no_mappers():
clear_mappers()
gc_collect()
assert len(_mapper_registry) == 0
class EnsureZeroed(fixtures.ORMTest):
def setup(self):
_sessions.clear()
_mapper_registry.clear()
self.engine = engines.testing_engine(options={"use_reaper": False})
class MemUsageTest(EnsureZeroed):
__tags__ = ("memory_intensive",)
__requires__ = ("cpython", "no_windows")
def test_type_compile(self):
from sqlalchemy.dialects.sqlite.base import dialect as SQLiteDialect
cast = sa.cast(column("x"), sa.Integer)
@profile_memory()
def go():
dialect = SQLiteDialect()
cast.compile(dialect=dialect)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_init(self):
@profile_memory()
def go():
to_decimal_processor_factory({}, 10)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_process(self):
@profile_memory()
def go():
to_decimal_processor_factory(decimal.Decimal, 10)(1.2)
go()
@testing.requires.cextensions
def test_UnicodeResultProcessor_init(self):
@profile_memory()
def go():
to_unicode_processor_factory("utf8")
go()
def test_ad_hoc_types(self):
"""test storage of bind processors, result processors
in dialect-wide registry."""
from sqlalchemy.dialects import mysql, postgresql, sqlite
from sqlalchemy import types
eng = engines.testing_engine()
for args in (
(types.Integer,),
(types.String,),
(types.PickleType,),
(types.Enum, "a", "b", "c"),
(sqlite.DATETIME,),
(postgresql.ENUM, "a", "b", "c"),
(types.Interval,),
(postgresql.INTERVAL,),
(mysql.VARCHAR,),
):
@profile_memory()
def go():
type_ = args[0](*args[1:])
bp = type_._cached_bind_processor(eng.dialect)
rp = type_._cached_result_processor(eng.dialect, 0)
bp, rp # strong reference
go()
assert not eng.dialect._type_memos
@testing.fails()
def test_fixture_failure(self):
class Foo(object):
pass
stuff = []
@profile_memory(maxtimes=20)
def go():
stuff.extend(Foo() for i in range(100))
go()
class MemUsageWBackendTest(EnsureZeroed):
__tags__ = ("memory_intensive",)
__requires__ = "cpython", "memory_process_intensive"
__sparse_backend__ = True
# ensure a pure growing test trips the assertion
@testing.fails_if(lambda: True)
def test_fixture(self):
class Foo(object):
pass
x = []
@profile_memory(maxtimes=10)
def go():
x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()]
go()
def test_session(self):
metadata = MetaData(self.engine)
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all()
m1 = mapper(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
)
m2 = mapper(B, table2)
@profile_memory()
def go():
sess = create_session()
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.flush()
go()
metadata.drop_all()
del m1, m2
assert_no_mappers()
def test_sessionmaker(self):
@profile_memory()
def go():
sessmaker = sessionmaker(bind=self.engine)
sess = sessmaker()
r = sess.execute(select([1]))
r.close()
sess.close()
del sess
del sessmaker
go()
@testing.emits_warning("Compiled statement cache for mapper.*")
@testing.emits_warning("Compiled statement cache for lazy loader.*")
@testing.crashes("sqlite", ":memory: connection not suitable here")
def test_orm_many_engines(self):
metadata = MetaData(self.engine)
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all()
m1 = mapper(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
_compiled_cache_size=50,
)
m2 = mapper(B, table2, _compiled_cache_size=50)
@profile_memory()
def go():
engine = engines.testing_engine(
options={
"logging_name": "FOO",
"pool_logging_name": "BAR",
"use_reaper": False,
}
)
sess = create_session(bind=engine)
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
engine.dispose()
go()
metadata.drop_all()
del m1, m2
assert_no_mappers()
@testing.emits_warning("Compiled statement cache for.*")
def test_many_updates(self):
metadata = MetaData(self.engine)
wide_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
*[Column("col%d" % i, Integer) for i in range(10)]
)
class Wide(object):
pass
mapper(Wide, wide_table, _compiled_cache_size=10)
metadata.create_all()
session = create_session()
w1 = Wide()
session.add(w1)
session.flush()
session.close()
del session
counter = [1]
@profile_memory()
def go():
session = create_session()
w1 = session.query(Wide).first()
x = counter[0]
dec = 10
while dec > 0:
# trying to count in binary here,
# works enough to trip the test case
if pow(2, dec) < x:
setattr(w1, "col%d" % dec, counter[0])
x -= pow(2, dec)
dec -= 1
session.flush()
session.close()
counter[0] += 1
try:
go()
finally:
metadata.drop_all()
@testing.requires.savepoints
@testing.provide_metadata
def test_savepoints(self):
metadata = self.metadata
some_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
class SomeClass(object):
pass
mapper(SomeClass, some_table)
metadata.create_all()
session = Session(testing.db)
target_strings = (
session.connection().dialect.identifier_preparer._strings
)
session.close()
@profile_memory(
assert_no_sessions=False,
get_num_objects=lambda: len(target_strings),
)
def go():
session = Session(testing.db)
with session.transaction:
sc = SomeClass()
session.add(sc)
with session.begin_nested():
session.query(SomeClass).first()
go()
@testing.crashes("mysql+cymysql", "blocking")
def test_unicode_warnings(self):
metadata = MetaData(self.engine)
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", Unicode(30)),
)
metadata.create_all()
i = [1]
# the times here is cranked way up so that we can see
# pysqlite clearing out its internal buffer and allow
# the test to pass
@testing.emits_warning()
@profile_memory()
def go():
# execute with a non-unicode object. a warning is emitted,
# this warning shouldn't clog up memory.
self.engine.execute(
table1.select().where(table1.c.col2 == "foo%d" % i[0])
)
i[0] += 1
try:
go()
finally:
metadata.drop_all()
def test_warnings_util(self):
counter = itertools.count()
import warnings
warnings.filterwarnings("ignore", "memusage warning.*")
@profile_memory()
def go():
util.warn_limited(
"memusage warning, param1: %s, param2: %s",
(next(counter), next(counter)),
)
go()
def test_mapper_reset(self):
metadata = MetaData(self.engine)
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
@profile_memory()
def go():
mapper(
A,
table1,
properties={"bs": relationship(B, order_by=table2.c.col1)},
)
mapper(B, table2)
sess = create_session()
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
clear_mappers()
metadata.create_all()
try:
go()
finally:
metadata.drop_all()
assert_no_mappers()
def test_alias_pathing(self):
metadata = MetaData(self.engine)
a = Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("bid", Integer, ForeignKey("b.id")),
Column("type", String(30)),
)
asub = Table(
"asub",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("data", String(30)),
)
b = Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
mapper(A, a, polymorphic_identity="a", polymorphic_on=a.c.type)
mapper(ASub, asub, inherits=A, polymorphic_identity="asub")
mapper(B, b, properties={"as_": relationship(A)})
metadata.create_all()
sess = Session()
a1 = ASub(data="a1")
a2 = ASub(data="a2")
a3 = ASub(data="a3")
b1 = B(as_=[a1, a2, a3])
sess.add(b1)
sess.commit()
del sess
# sqlite has a slow enough growth here
# that we have to run it more times to see the
# "dip" again
@profile_memory(maxtimes=120)
def go():
sess = Session()
sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all()
sess.close()
try:
go()
finally:
metadata.drop_all()
clear_mappers()
def test_path_registry(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer),
)
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", ForeignKey("a.id")),
)
m1 = mapper(A, a, properties={"bs": relationship(B)})
mapper(B, b)
@profile_memory()
def go():
ma = sa.inspect(aliased(A))
m1._path_registry[m1.attrs.bs][ma][m1.attrs.bar]
go()
clear_mappers()
def test_with_inheritance(self):
metadata = MetaData(self.engine)
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
ForeignKey("mytable.col1"),
primary_key=True,
test_needs_autoincrement=True,
),
Column("col3", String(30)),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
mapper(
A,
table1,
polymorphic_on=table1.c.col2,
polymorphic_identity="a",
)
mapper(B, table2, inherits=A, polymorphic_identity="b")
sess = create_session()
a1 = A()
a2 = A()
b1 = B(col3="b1")
b2 = B(col3="b2")
for x in [a1, a2, b1, b2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(), A(), B(col3="b1"), B(col3="b2")], alist)
for a in alist:
sess.delete(a)
sess.flush()
# don't need to clear_mappers()
del B
del A
metadata.create_all()
try:
go()
finally:
metadata.drop_all()
assert_no_mappers()
def test_with_manytomany(self):
metadata = MetaData(self.engine)
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table3 = Table(
"t1tot2",
metadata,
Column("t1", Integer, ForeignKey("mytable.col1")),
Column("t2", Integer, ForeignKey("mytable2.col1")),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(
A,
table1,
properties={
"bs": relationship(
B, secondary=table3, backref="as", order_by=table3.c.t1
)
},
)
mapper(B, table2)
sess = create_session()
a1 = A(col2="a1")
a2 = A(col2="a2")
b1 = B(col2="b1")
b2 = B(col2="b2")
a1.bs.append(b1)
a2.bs.append(b2)
for x in [a1, a2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(bs=[B(col2="b1")]), A(bs=[B(col2="b2")])], alist)
for a in alist:
sess.delete(a)
sess.flush()
# don't need to clear_mappers()
del B
del A
metadata.create_all()
try:
go()
finally:
metadata.drop_all()
assert_no_mappers()
@testing.uses_deprecated()
@testing.provide_metadata
def test_key_fallback_result(self):
e = self.engine
m = self.metadata
t = Table("t", m, Column("x", Integer), Column("y", Integer))
m.create_all(e)
e.execute(t.insert(), {"x": 1, "y": 1})
@profile_memory()
def go():
r = e.execute(t.alias().select())
for row in r:
row[t.c.x]
go()
def test_many_discarded_relationships(self):
"""a use case that really isn't supported, nonetheless we can
guard against memleaks here so why not"""
m1 = MetaData()
t1 = Table("t1", m1, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m1,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.id")),
)
class T1(object):
pass
t1_mapper = mapper(T1, t1)
@testing.emits_warning()
@profile_memory()
def go():
class T2(object):
pass
t2_mapper = mapper(T2, t2)
t1_mapper.add_property("bar", relationship(t2_mapper))
s1 = Session()
# this causes the path_registry to be invoked
s1.query(t1_mapper)._compile_context()
go()
# fails on newer versions of pysqlite due to unusual memory behavior
# in pysqlite itself. background at:
# http://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache_deprecated_coercion(self):
metadata = MetaData(self.engine)
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo(object):
pass
class Bar(object):
pass
mapper(
Foo, table1, properties={"bars": relationship(mapper(Bar, table2))}
)
metadata.create_all()
session = sessionmaker()
@profile_memory()
def go():
s = table2.select()
sess = session()
with testing.expect_deprecated(
"Implicit coercion of SELECT and " "textual SELECT constructs"
):
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all()
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache(self):
metadata = MetaData(self.engine)
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo(object):
pass
class Bar(object):
pass
mapper(
Foo, table1, properties={"bars": relationship(mapper(Bar, table2))}
)
metadata.create_all()
session = sessionmaker()
@profile_memory()
def go():
s = table2.select().subquery()
sess = session()
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all()
class CycleTest(_fixtures.FixtureTest):
__tags__ = ("memory_intensive",)
__requires__ = ("cpython", "no_windows")
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = Session()
@assert_cycles()
def go():
return s.query(User).all()
go()
def test_raise_from(self):
@assert_cycles()
def go():
try:
try:
raise KeyError("foo")
except KeyError as ke:
util.raise_(Exception("oops"), from_=ke)
except Exception as err: # noqa
pass
go()
def test_query_alias(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = Session()
u1 = aliased(User)
@assert_cycles()
def go():
s.query(u1).all()
go()
def test_entity_path_w_aliased(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)._path_registry[User.addresses.property]
go()
def test_orm_objects_from_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = Session()
def generate():
objects = s.query(User).filter(User.id == 7).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_orm_objects_from_query_w_selectinload(self):
User, Address = self.classes("User", "Address")
s = Session()
def generate():
objects = s.query(User).options(selectinload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_selectinload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
selectinload(User.addresses)
go()
def test_selectinload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
Load(User).selectinload(User.addresses)
go()
def test_orm_path(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
inspect(User)._path_registry[User.addresses.property][
inspect(Address)
]
go()
def test_joinedload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
joinedload(User.addresses)
go()
def test_joinedload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
l1 = Load(User).joinedload(User.addresses)
l1._generate_cache_key()
go()
def test_orm_objects_from_query_w_joinedload(self):
User, Address = self.classes("User", "Address")
s = Session()
def generate():
objects = s.query(User).options(joinedload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_query_filtered(self):
User, Address = self.classes("User", "Address")
s = Session()
@assert_cycles()
def go():
return s.query(User).filter(User.id == 7).all()
go()
def test_query_joins(self):
User, Address = self.classes("User", "Address")
s = Session()
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(4)
def go():
s.query(User).join(User.addresses).all()
go()
def test_query_joinedload(self):
User, Address = self.classes("User", "Address")
s = Session()
def generate():
s.query(User).options(joinedload(User.addresses)).all()
# cycles here are due to ClauseElement._cloned_set and Load.context,
# others as of cache key
@assert_cycles(29)
def go():
generate()
go()
def test_plain_join(self):
users, addresses = self.tables("users", "addresses")
@assert_cycles()
def go():
str(users.join(addresses))
go()
def test_plain_join_select(self):
users, addresses = self.tables("users", "addresses")
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(7)
def go():
s = select([users]).select_from(users.join(addresses))
state = s._compile_state_factory(s, None)
state.froms
go()
def test_orm_join(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
str(orm_join(User, Address, User.addresses))
go()
def test_join_via_query_relationship(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = Session()
@assert_cycles()
def go():
s.query(User).join(User.addresses)
go()
def test_join_via_query_to_entity(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = Session()
@assert_cycles()
def go():
s.query(User).join(Address)
go()
def test_result_fetchone(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = Session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles()
def go():
result = s.execute(stmt)
while True:
row = result.fetchone()
if row is None:
break
go()
def test_result_fetchall(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = Session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles()
def go():
result = s.execute(stmt)
rows = result.fetchall() # noqa
go()
def test_result_fetchmany(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = Session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles()
def go():
result = s.execute(stmt)
for partition in result.partitions(3):
pass
go()
def test_result_fetchmany_unique(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = Session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles()
def go():
result = s.execute(stmt)
for partition in result.unique().partitions(3):
pass
go()
def test_core_select(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = Session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles()
def go():
s.execute(stmt)
go()
def test_adapt_statement_replacement_traversal(self):
User, Address = self.classes("User", "Address")
statement = select([User]).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
replacement_traverse(statement, {}, lambda x: None)
go()
def test_adapt_statement_cloned_traversal(self):
User, Address = self.classes("User", "Address")
statement = select([User]).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
cloned_traverse(statement, {}, {})
go()
def test_column_adapter_lookup(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
@assert_cycles()
def go():
adapter = sql_util.ColumnAdapter(inspect(u1).selectable)
adapter.columns[User.id]
go()
def test_orm_aliased(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)
go()
@testing.fails
def test_the_counter(self):
@assert_cycles()
def go():
x = []
x.append(x)
go()
def test_weak_sequence(self):
class Foo(object):
pass
f = Foo()
@assert_cycles()
def go():
util.WeakSequence([f])
go()
@testing.provide_metadata
def test_optimized_get(self):
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base(metadata=self.metadata)
class Employee(Base):
__tablename__ = "employee"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(10))
__mapper_args__ = {"polymorphic_on": type}
class Engineer(Employee):
__tablename__ = " engineer"
id = Column(ForeignKey("employee.id"), primary_key=True)
engineer_name = Column(String(50))
__mapper_args__ = {"polymorphic_identity": "engineer"}
Base.metadata.create_all(testing.db)
s = Session(testing.db)
s.add(Engineer(engineer_name="wally"))
s.commit()
s.close()
@assert_cycles()
def go():
e1 = s.query(Employee).first()
e1.engineer_name
go()
def test_visit_binary_product(self):
a, b, q, e, f, j, r = [column(chr_) for chr_ in "abqefjr"]
from sqlalchemy import and_, func
from sqlalchemy.sql.util import visit_binary_product
expr = and_((a + b) == q + func.sum(e + f), j == r)
def visit(expr, left, right):
pass
@assert_cycles()
def go():
visit_binary_product(visit, expr)
go()
def test_session_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.close()
go()
def test_session_commit_rollback(self):
# this is enabled by #5074
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.commit()
go()
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.rollback()
go()
def test_session_multi_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
assert s._transaction is None
s.connection()
s.close()
assert s._transaction is None
s.connection()
assert s._transaction is not None
s.close()
go()
| 25.952589 | 79 | 0.492861 |
61ea0c1ecc6acee4e0935e59d86cbbb22c5fd718 | 23,340 | py | Python | dask_ml/model_selection/_hyperband.py | thomasjpfan/dask-ml | b9d903ee2004178bcbe43a04493e23c2afdb7f60 | [
"BSD-3-Clause"
] | 1 | 2022-02-11T12:06:31.000Z | 2022-02-11T12:06:31.000Z | dask_ml/model_selection/_hyperband.py | thomasjpfan/dask-ml | b9d903ee2004178bcbe43a04493e23c2afdb7f60 | [
"BSD-3-Clause"
] | null | null | null | dask_ml/model_selection/_hyperband.py | thomasjpfan/dask-ml | b9d903ee2004178bcbe43a04493e23c2afdb7f60 | [
"BSD-3-Clause"
] | 3 | 2020-05-11T07:38:05.000Z | 2022-02-11T12:33:50.000Z | from __future__ import division
import logging
import math
from warnings import warn
import numpy as np
from sklearn.utils import check_random_state
from tornado import gen
from ._incremental import BaseIncrementalSearchCV
from ._successive_halving import SuccessiveHalvingSearchCV
logger = logging.getLogger(__name__)
def _get_hyperband_params(R, eta=3):
"""
Parameters
----------
R : int
The maximum number of iterations desired.
eta : int
How aggressive to be in the search
Returns
-------
brackets : Dict[int, Tuple[int, int]]
A dictionary of the form {bracket_id: (n_models, n_initial_iter)}
Notes
-----
The bracket index is a measure of how strong that n,r combination
adapts to prior input. i.e., a bracket ID of 0 means "doesn't adapt
at all" and bracket index of 5 means "adapts pretty strongly"
``R`` and ``eta`` are the terminology that the Hyperband paper uses [1]_.
References
----------
.. [1] "Hyperband: A novel bandit-based approach to hyperparameter
optimization", 2016 by L. Li, K. Jamieson, G. DeSalvo, A.
Rostamizadeh, and A. Talwalkar. https://arxiv.org/abs/1603.06560
"""
s_max = math.floor(math.log(R, eta))
B = (s_max + 1) * R
brackets = list(reversed(range(int(s_max + 1))))
N = [int(math.ceil(B / R * eta ** s / (s + 1))) for s in brackets]
R = [int(R * eta ** -s) for s in brackets]
return {b: (n, r) for b, n, r in zip(brackets, N, R)}
class HyperbandSearchCV(BaseIncrementalSearchCV):
"""Find the best parameters for a particular model with an adaptive
cross-validation algorithm.
Hyperband will find close to the best possible parameters with
the given computational budget [*]_ by spending more time training
high-performing estimators [1]_. This means that Hyperband stops training
estimators that perform poorly -- at it's core, Hyperband is an early
stopping scheme for RandomizedSearchCV.
Hyperband does not require a trade-off between "evaluate many parameters
for a short time" and "train a few parameters for a long time"
like RandomizedSearchCV.
Hyperband requires one input which requires knowing how long
to train the best performing estimator via ``max_iter``.
The other implicit input (the Dask array chuck size) requires
a rough estimate of how many parameters to sample. Specification details
are in :ref:`Notes`.
.. [*] After :math:`N` ``partial_fit`` calls the estimator Hyperband
produces will be close to the best possible estimator that :math:`N`
``partial_fit`` calls could ever produce with high probability (where
"close" means "within log terms of the expected best possible score").
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each hyperparameter
combination. This is assumed to implement the scikit-learn estimator
interface. Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed. The estimator must implement
``partial_fit``, ``set_params``, and work well with ``clone``.
parameters : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
max_iter : int
The maximum number of partial_fit calls to any one model. This should
be the number of ``partial_fit`` calls required for the model to
converge. See :ref:`Notes` for details on setting this parameter.
aggressiveness : int, default=3
How aggressive to be in culling off the different estimators. Higher
values imply higher confidence in scoring (or that
the hyperparameters influence the ``estimator.score`` more
than the data). Theory suggests ``aggressiveness=3`` is close to
optimal. ``aggressiveness=4`` has higher confidence that is likely
suitable for initial exploration.
patience : int, default False
If specified, training stops when the score does not increase by
``tol`` after ``patience`` calls to ``partial_fit``. Off by default.
A ``patience`` value is automatically selected if ``patience=True`` to
work well with the Hyperband model selection algorithm.
tol : float, default 0.001
The required level of improvement to consider stopping training on
that model when ``patience`` is specified. Increasing ``tol`` will
tend to reduce training time at the cost of (potentially) worse
estimators.
test_size : float
Fraction of the dataset to hold out for computing test/validation
scores. Defaults to the size of a single partition of
the input training set.
.. note::
The testing dataset should fit in memory on a single machine.
Adjust the ``test_size`` parameter as necessary to achieve this.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None, the estimator's default scorer (if available) is used.
Examples
--------
>>> import numpy as np
>>> from dask_ml.model_selection import HyperbandSearchCV
>>> from dask_ml.datasets import make_classification
>>> from sklearn.linear_model import SGDClassifier
>>>
>>> X, y = make_classification(chunks=20)
>>> est = SGDClassifier(tol=1e-3)
>>> param_dist = {'alpha': np.logspace(-4, 0, num=1000),
>>> 'loss': ['hinge', 'log', 'modified_huber', 'squared_hinge'],
>>> 'average': [True, False]}
>>>
>>> search = HyperbandSearchCV(est, param_dist)
>>> search.fit(X, y, classes=np.unique(y))
>>> search.best_params_
{'loss': 'log', 'average': False, 'alpha': 0.0080502}
Attributes
----------
metadata and metadata_ : dict[str, Union(int, dict)]
These dictionaries describe the computation performed, either
before computation happens with ``metadata`` or after computation
happens with ``metadata_``. These dictionaries both have keys
* ``n_models``, an int representing how many models will be/is created.
* ``partial_fit_calls``, an int representing how many times
``partial_fit`` will be/is called.
* ``brackets``, a list of the brackets that Hyperband runs. Each
bracket has different values for training time importance and
hyperparameter importance. In addition to ``n_models`` and
``partial_fit_calls``, each element in this list has keys
* ``bracket``, an int the bracket ID. Each bracket corresponds to
a different levels of training time importance.
For bracket 0, training time is important. For the highest
bracket, training time is not important and models are killed
aggressively.
* ``SuccessiveHalvingSearchCV params``, a dictionary used to create
the different brackets. It does not include the
``estimator`` or ``parameters`` parameters.
* ``decisions``, the number of ``partial_fit`` calls Hyperband makes
before making decisions.
These dictionaries are the same if ``patience`` is not specified. If
``patience`` is specified, it's possible that less training is
performed, and ``metadata_`` will reflect that (though ``metadata``
won't).
cv_results_ : Dict[str, np.ndarray]
A dictionary that describes how well each model has performed.
It contains information about every model regardless if it reached
``max_iter``. It has keys
* ``mean_partial_fit_time``
* ``mean_score_time``
* ``std_partial_fit_time``
* ``std_score_time``
* ``test_score``
* ``rank_test_score``
* ``model_id``
* ``partial_fit_calls``
* ``params``
* ``param_{key}``, where ``{key}`` is every key in ``params``.
* ``bracket``
The values in the ``test_score`` key correspond to the last score a model
received on the hold out dataset. The key ``model_id`` corresponds with
``history_``. This dictionary can be imported into a Pandas DataFrame.
In the ``model_id``, the bracket ID prefix corresponds to the bracket
in ``metadata``. Bracket 0 doesn't adapt to previous training at all;
higher values correspond to more adaptation.
history_ : list of dicts
Information about each model after each ``partial_fit`` call. Each dict
the keys
* ``partial_fit_time``
* ``score_time``
* ``score``
* ``model_id``
* ``params``
* ``partial_fit_calls``
* ``elapsed_wall_time``
The key ``model_id`` corresponds to the ``model_id`` in ``cv_results_``.
This list of dicts can be imported into Pandas.
model_history_ : dict of lists of dict
A dictionary of each models history. This is a reorganization of
``history_``: the same information is present but organized per model.
This data has the structure ``{model_id: [h1, h2, h3, ...]}`` where
``h1``, ``h2`` and ``h3`` are elements of ``history_``
and ``model_id`` is the model ID as in ``cv_results_``.
best_estimator_ : BaseEstimator
The model with the highest validation score as selected by
the Hyperband model selection algorithm.
best_score_ : float
Score achieved by ``best_estimator_`` on the vaidation set after the
final call to ``partial_fit``.
best_index_ : int
Index indicating which estimator in ``cv_results_`` corresponds to
the highest score.
best_params_ : dict
Dictionary of best parameters found on the hold-out data.
scorer_ :
The function used to score models, which has a call signature of
``scorer_(estimator, X, y)``.
Notes
-----
To set ``max_iter`` and the chunk size for ``X`` and ``y``, it is required
to estimate
* the number of examples at least one model will see
(``n_examples``). If 10 passes through the data are needed for
the longest trained model, ``n_examples = 10 * len(X)``.
* how many hyper-parameter combinations to sample (``n_params``)
These can be rough guesses. To determine the chunk size and ``max_iter``,
1. Let the chunks size be ``chunk_size = n_examples / n_params``
2. Let ``max_iter = n_params``
Then, every estimator sees no
more than ``max_iter * chunk_size = n_examples`` examples.
Hyperband will actually sample some more hyper-parameter combinations than
``n_examples`` (which is why rough guesses are adequate). For example,
let's say
* about 200 or 300 hyper-parameters need to be tested to effectively
search the possible hyper-parameters
* models need more than ``50 * len(X)`` examples but less than
``100 * len(X)`` examples.
Let's decide to provide ``81 * len(X)`` examples and to sample 243
parameters. Then each chunk will be 1/3rd the dataset and ``max_iter=243``.
If you use ``HyperbandSearchCV``, please use the citation for [2]_
.. code-block:: tex
@InProceedings{sievert2019better,
author = {Scott Sievert and Tom Augspurger and Matthew Rocklin},
title = {{B}etter and faster hyperparameter optimization with {D}ask},
booktitle = {{P}roceedings of the 18th {P}ython in {S}cience {C}onference},
pages = {118 - 125},
year = {2019},
editor = {Chris Calloway and David Lippa and Dillon Niederhut and David Shupe}, # noqa
doi = {10.25080/Majora-7ddc1dd1-011}
}
References
----------
.. [1] "Hyperband: A novel bandit-based approach to hyperparameter
optimization", 2016 by L. Li, K. Jamieson, G. DeSalvo, A.
Rostamizadeh, and A. Talwalkar. https://arxiv.org/abs/1603.06560
.. [2] "Better and faster hyperparameter optimization with Dask", 2018 by
S. Sievert, T. Augspurger, M. Rocklin.
https://doi.org/10.25080/Majora-7ddc1dd1-011
"""
def __init__(
self,
estimator,
parameters,
max_iter=81,
aggressiveness=3,
patience=False,
tol=1e-3,
test_size=None,
random_state=None,
scoring=None,
):
self.aggressiveness = aggressiveness
super(HyperbandSearchCV, self).__init__(
estimator,
parameters,
max_iter=max_iter,
patience=patience,
tol=tol,
test_size=test_size,
random_state=random_state,
scoring=scoring,
)
def _get_SHAs(self, brackets):
patience = _get_patience(
self.patience, self.max_iter, self.aggressiveness, self.tol
)
# This is the first time self.random_state is used after
# HyperbandSearchCV.fit is called.
seed_start = check_random_state(self.random_state).randint(2 ** 31)
self._SHA_seed = seed_start
# These brackets are ordered by adaptivity; bracket=0 is least adaptive
SHAs = {
b: SuccessiveHalvingSearchCV(
self.estimator,
self.parameters,
n_initial_parameters=n,
aggressiveness=self.aggressiveness,
max_iter=self.max_iter,
n_initial_iter=r,
patience=patience,
tol=self.tol,
test_size=self.test_size,
random_state=seed_start + b if b != 0 else self.random_state,
scoring=self.scoring,
)
for b, (n, r) in brackets.items()
}
return SHAs
@gen.coroutine
def _fit(self, X, y, **fit_params):
X, y, scorer = self._validate_parameters(X, y)
brackets = _get_hyperband_params(self.max_iter, eta=self.aggressiveness)
SHAs = self._get_SHAs(brackets)
# Which bracket to run first? Going to go with most adaptive;
# that works best on one machine.
# (though it doesn't matter a ton; _fit prioritizes high scores
_brackets_ids = list(reversed(sorted(SHAs)))
# _fit is run in parallel because it's also a tornado coroutine
_SHAs = yield [SHAs[b]._fit(X, y, **fit_params) for b in _brackets_ids]
SHAs = {b: SHA for b, SHA in zip(_brackets_ids, _SHAs)}
# This for-loop rename estimator IDs and pulls out wall times
key = "bracket={}-{}".format
for b, SHA in SHAs.items():
new_ids = {old: key(b, old) for old in SHA.cv_results_["model_id"]}
SHA.cv_results_["model_id"] = np.array(
[new_ids[old] for old in SHA.cv_results_["model_id"]]
)
SHA.model_history_ = {
new_ids[old]: v for old, v in SHA.model_history_.items()
}
for hist in SHA.model_history_.values():
for h in hist:
h["model_id"] = new_ids[h["model_id"]]
h["bracket"] = b
for b, SHA in SHAs.items():
n = len(SHA.cv_results_["model_id"])
SHA.cv_results_["bracket"] = np.ones(n, dtype=int) * b
cv_keys = {k for SHA in SHAs.values() for k in SHA.cv_results_.keys()}
cv_results = {
k: [v for b in _brackets_ids for v in SHAs[b].cv_results_[k]]
for k in cv_keys
}
cv_results = {k: np.array(v) for k, v in cv_results.items()}
scores = {b: SHA.best_score_ for b, SHA in SHAs.items()}
best_bracket = max(scores, key=scores.get)
best_estimator = SHAs[best_bracket].best_estimator_
estimator_history = {
ident: hist
for SHA in SHAs.values()
for ident, hist in SHA.model_history_.items()
}
# Order history by time
history = sum([SHA.history_ for b, SHA in SHAs.items()], [])
idx = np.argsort([v["elapsed_wall_time"] for v in history])
history = [history[i] for i in idx]
best_model_id = SHAs[best_bracket].cv_results_["model_id"][
SHAs[best_bracket].best_index_
]
best_index = np.argwhere(np.array(cv_results["model_id"]) == best_model_id)
best_index = best_index.flat[0]
meta, _ = _get_meta(
{b: SHA.history_ for b, SHA in SHAs.items()}, brackets.keys(), SHAs, key
)
self.metadata_ = {
"n_models": sum(m["n_models"] for m in meta),
"partial_fit_calls": sum(m["partial_fit_calls"] for m in meta),
"brackets": meta,
}
self.best_index_ = int(best_index)
self.best_estimator_ = best_estimator
self.best_score_ = scores[best_bracket]
self.best_params_ = cv_results["params"][best_index]
self.scorer_ = scorer
self.model_history_ = estimator_history
self.history_ = history
self.cv_results_ = cv_results
self.multimetric_ = SHAs[best_bracket].multimetric_
self._SuccessiveHalvings_ = SHAs
raise gen.Return(self)
@property
def metadata(self):
bracket_info = _hyperband_paper_alg(self.max_iter, eta=self.aggressiveness)
num_models = sum(b["n_models"] for b in bracket_info)
for bracket in bracket_info:
bracket["decisions"] = sorted(list(bracket["decisions"]))
num_partial_fit = sum(b["partial_fit_calls"] for b in bracket_info)
bracket_info = list(reversed(sorted(bracket_info, key=lambda x: x["bracket"])))
brackets = _get_hyperband_params(self.max_iter, eta=self.aggressiveness)
SHAs = self._get_SHAs(brackets)
for bracket in bracket_info:
b = bracket["bracket"]
bracket["SuccessiveHalvingSearchCV params"] = _get_SHA_params(SHAs[b])
bracket_info = sorted(bracket_info, key=lambda x: x["bracket"])
info = {
"partial_fit_calls": num_partial_fit,
"n_models": num_models,
"brackets": bracket_info,
}
return info
def _get_meta(hists, brackets, SHAs, key):
meta_ = []
history_ = {}
for bracket in brackets:
hist = hists[bracket]
info_hist = {key(bracket, h["model_id"]): [] for h in hist}
for h in hist:
info_hist[key(bracket, h["model_id"])] += [h]
hist = info_hist
history_.update(hist)
calls = {k: max(hi["partial_fit_calls"] for hi in h) for k, h in hist.items()}
decisions = {hi["partial_fit_calls"] for h in hist.values() for hi in h}
if bracket != max(brackets):
decisions.discard(1)
meta_.append(
{
"decisions": sorted(list(decisions)),
"n_models": len(hist),
"bracket": bracket,
"partial_fit_calls": sum(calls.values()),
"SuccessiveHalvingSearchCV params": _get_SHA_params(SHAs[bracket]),
}
)
meta_ = sorted(meta_, key=lambda x: x["bracket"])
return meta_, history_
def _get_SHA_params(SHA):
"""
Parameters
----------
SHA : SuccessiveHalvingSearchCV
Returns
-------
params : dict
Dictionary to re-create a SuccessiveHalvingSearchCV without the
estimator or parameters
Example
-------
>>> from sklearn.linear_model import SGDClassifier
>>> model = SGDClassifier()
>>> params = {"alpha": np.logspace(-1, 1)}
>>> SHA = SuccessiveHalvingSearchCV(model, params, tol=0.1,
... patience=True, random_state=42)
>>> _get_SHA_params(SHA)
{'aggressiveness': 3,
'max_iter': 100,
'n_initial_iter': 9,
'n_initial_parameters': 10,
'patience': True,
'random_state': 42,
'scoring': None,
'test_size': None,
'tol': 0.1}
"""
return {
k: v
for k, v in SHA.get_params().items()
if "estimator_" not in k and k != "parameters" and k != "estimator"
}
def _hyperband_paper_alg(R, eta=3):
"""
Algorithm 1 from the Hyperband paper [1]_.
References
----------
1. "Hyperband: A novel bandit-based approach to hyperparameter
optimization", 2016 by L. Li, K. Jamieson, G. DeSalvo, A. Rostamizadeh,
and A. Talwalkar. https://arxiv.org/abs/1603.06560
"""
s_max = math.floor(math.log(R, eta))
B = (s_max + 1) * R
brackets = reversed(range(int(s_max + 1)))
hists = {}
for s in brackets:
n = int(math.ceil(B / R * eta ** s / (s + 1)))
r = R * eta ** -s
r = int(r)
T = set(range(n))
hist = {
"num_estimators": n,
"estimators": {n: 0 for n in range(n)},
"decisions": [],
}
for i in range(s + 1):
n_i = math.floor(n * eta ** -i)
r_i = np.round(r * eta ** i).astype(int)
L = {model: r_i for model in T}
hist["estimators"].update(L)
hist["decisions"] += [r_i]
to_keep = math.floor(n_i / eta)
T = {model for i, model in enumerate(T) if i < to_keep}
hists[s] = hist
info = [
{
"bracket": k,
"n_models": hist["num_estimators"],
"partial_fit_calls": sum(hist["estimators"].values()),
"decisions": {int(h) for h in hist["decisions"]},
}
for k, hist in hists.items()
]
return info
def _get_patience(patience, max_iter, aggressiveness, tol):
if not isinstance(patience, bool) and patience < max(max_iter // aggressiveness, 1):
msg = (
"The goal of `patience` is to stop training estimators that have "
"already converged *when few estimators remain*. "
"Hyperband is already an (almost optimal) adaptive scheme, "
"and patience should be large to be a minimal layer on top "
"of Hyperband. \n\n"
"To clear this warning, set \n\n"
" * patience=True\n"
" * patience >= {}\n"
" * tol=None or tol=np.nan\n\n"
"instead of patience={} "
)
if (tol is not None) and not np.isnan(tol):
warn(msg.format(max_iter // aggressiveness, patience))
elif isinstance(patience, bool) and patience:
return max(max_iter // aggressiveness, 1)
elif isinstance(patience, bool) and not patience:
return False
return int(patience)
| 38.137255 | 102 | 0.613882 |
6a47e5128605d93cfd4b53993bf91f0f3ff3d723 | 3,167 | py | Python | autopsy_app/__init__.py | ep4sh/autopsy | 2bc7235657f94af5bad7975831e3709a363d125c | [
"MIT"
] | 1 | 2022-01-28T11:06:19.000Z | 2022-01-28T11:06:19.000Z | autopsy_app/__init__.py | ep4sh/autopsy | 2bc7235657f94af5bad7975831e3709a363d125c | [
"MIT"
] | 5 | 2021-04-28T10:53:07.000Z | 2022-02-06T01:08:09.000Z | autopsy_app/__init__.py | ep4sh/autopsy | 2bc7235657f94af5bad7975831e3709a363d125c | [
"MIT"
] | null | null | null | """
Create Autopsy Postmortem Web Application
Classes:
--- admin
AutopsyModelView(sqla.ModelView)
AutopsyAdminIndexView(admin.AdminIndexView)
--- form
RegistrationForm(FlaskForm)
LoginForm(FlaskForm)
ProfileForm(FlaskForm)
PostmortemForm(FlaskForm)
SupportForm(FlaskForm)
SearchForm(FlaskForm)
RequestResetForm(FlaskForm)
ResetForm(FlaskForm)
--- models
User(db.Model, UserMixin)
Role(db.Model)
UserRoles(db.Model)
Mortem(db.Model)
Support(db.Model)
Methods:
--- misc
def resize_screenshot(scr) -> BytesIO obj
def get_tags(tags_data) -> str
def auto_tag(content) -> list
def verify_password(true_pass, data) -> bool
def generate_password(data) -> str
def choose_random_mortem(max_id) -> int / None
def define_mortem_url() -> define_mortem_url / str
def send_admin_email(db, support_case) -> None
def send_email(email, token) -> None
--- routes
def dashboard() -> render_template
def reset() -> render_template / redirect
def reset_password(token) -> render_template / redirect
def login() -> render_template / redirect
def register() -> render_template / redirect
def logout() -> redirect
def profile() -> render_template
def postmortems() -> render_template
def add_postmortem() -> render_template / redirect
def get_postmortem(url) -> render_template
def update_postmortem(url) -> render_template / redirect
def search() -> render_template / redirect
def support() -> render_template / redirect
def page_not_found(e) -> render_template
def page_forbidden(e) -> render_template
"""
import os
from dotenv import load_dotenv
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
# Create an app
app = Flask(__name__)
load_dotenv()
# securty settings
app.secret_key = os.getenv("FLASK_SECRET")
# mail settings
app.mail_server = os.getenv("MAIL_SERVER")
app.mail_port = os.getenv("MAIL_PORT")
app.mail_use_ssl = os.getenv("MAIL_USE_SSL")
app.mail_username = os.getenv("MAIL_USERNAME")
app.mail_password = os.getenv("MAIL_PASSWORD")
# database settings
app.db_host = os.getenv("DATABASE_HOST")
app.db_user = os.getenv("DATABASE_USER")
app.db_password = os.getenv("DATABASE_PASSWORD")
app.db_port = os.getenv("DATABASE_PORT")
app.db_name = os.getenv("DATABASE_NAME")
app.db_uri = (f"postgresql://{app.db_user}:{app.db_password}@"
f"{app.db_host}:{app.db_port}/{app.db_name}")
app.db_track_modifications = os.getenv("SQLALCHEMY_TRACK_MODIFICATIONS")
# Flask config
app.config['SQLALCHEMY_DATABASE_URI'] = app.db_uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = app.db_track_modifications
app.config['MAIL_SERVER'] = app.mail_server
app.config['MAIL_PORT'] = app.mail_port
app.config['MAIL_USE_SSL'] = app.mail_use_ssl
app.config['MAIL_USERNAME'] = app.mail_username
app.config['MAIL_PASSWORD'] = app.mail_password
mail = Mail(app)
flask_bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
from autopsy_app import routes
| 32.316327 | 73 | 0.735712 |
e783eab59a815b7dbcaede013a4317d693a6ba8b | 1,142 | py | Python | simulation.py | 1orwell/yrs2013 | cc02ad037e9ee87be73519fe6be5c783e94d0ad8 | [
"MIT"
] | null | null | null | simulation.py | 1orwell/yrs2013 | cc02ad037e9ee87be73519fe6be5c783e94d0ad8 | [
"MIT"
] | 1 | 2017-07-15T11:31:06.000Z | 2017-07-15T12:34:10.000Z | simulation.py | 1orwell/yrs2013 | cc02ad037e9ee87be73519fe6be5c783e94d0ad8 | [
"MIT"
] | null | null | null |
from sets import Set
import pickle, random, os
from collections import defaultdict
import simclass
'''Virus spreading simulation using movement of people as given by fake.py
input format is
{id: {time : position}}
'''
#Options
#fin = './days/790-0.dat'
#fin = './groups/fake_groups.dat'
chance_of_infection = 1/36.0
default = './days/100-0.dat'
###
def simulate(fin):
fin = os.path.join('data',fin)
data = pickle.load(open(fin))
coords = data['coords']
ms = data['movement']
s = simclass.Simulation(ms, chance_of_infection)
#set infected people
s.people[32].infected = True
s.people[33].infected = True
s.people[34].infected = True
s.people[35].infected = True
#format: key = tick, value = list of infected people
for tick in range(0, 2000):
s.step()
print 'Simulation finished'
print 'Writing to file'
fileName, fileExtension = os.path.splitext(fin)
output = fileName+'-display'+fileExtension
out = {'coords': coords, 'virus': s.infected_per_tick, 'moves': s.moves_per_tick}
pickle.dump(out, open(output, 'wb'))
print 'finished writing'
if __name__ == '__main__':
simulate(default)
| 23.791667 | 83 | 0.69352 |
c5f555eba2cbe5bb01b06af27285af58e4925234 | 1,111 | py | Python | model/calendar.py | Zonglin-Li6565/EInkUI | 13e6ac430e67d7e1e37deb550aa511e5d01150c6 | [
"MIT"
] | 224 | 2019-01-25T06:52:23.000Z | 2022-01-04T11:57:29.000Z | model/calendar.py | Zonglin-Li6565/EInkUI | 13e6ac430e67d7e1e37deb550aa511e5d01150c6 | [
"MIT"
] | 11 | 2019-04-02T06:23:23.000Z | 2021-12-29T04:26:53.000Z | model/calendar.py | Zonglin-Li6565/EInkUI | 13e6ac430e67d7e1e37deb550aa511e5d01150c6 | [
"MIT"
] | 24 | 2019-01-25T10:55:16.000Z | 2022-03-09T01:38:12.000Z | import calendar
import datetime
from typing import List, Tuple
def get_month_str() -> str:
return calendar.month_name[datetime.datetime.today().month]
def get_calendar_days() -> Tuple[List[int], Tuple[int, int]]:
"""
Get calendar grid of dates as well as starting week index and weekday index
"""
today = datetime.date.today()
year = today.year
month = today.month
weekday_idx = (today.weekday() + 1) % 7
# week start on Sunday
calendar_lib = calendar.Calendar(firstweekday=6)
weeks: List[List[datetime.date]] = calendar_lib.monthdatescalendar(
year, month)
week_idx: int = 0
for week in weeks:
if week[weekday_idx] == today:
break
week_idx += 1
if week_idx >= 5:
month += 1
if month > 12:
month = 1
year += 1
weeks = calendar_lib.monthdatescalendar(year, month)
week_idx = 0
if len(weeks) >= 5:
weeks = weeks[:5]
week_list: List[datetime.date] = sum(weeks, [])
return list(map(lambda date: date.day, week_list)), (week_idx, weekday_idx)
| 28.487179 | 79 | 0.620162 |
49717ff05b0a8652e80d608424cb3c4dcf0fcc40 | 555 | py | Python | cv/cap.py | jt120/start-ml | 46a9ba31e4e9ce0d82cef0d28a15bd999c7b4147 | [
"Apache-2.0"
] | null | null | null | cv/cap.py | jt120/start-ml | 46a9ba31e4e9ce0d82cef0d28a15bd999c7b4147 | [
"Apache-2.0"
] | null | null | null | cv/cap.py | jt120/start-ml | 46a9ba31e4e9ce0d82cef0d28a15bd999c7b4147 | [
"Apache-2.0"
] | null | null | null | import cv2
cam = cv2.VideoCapture(0)
cv2.namedWindow("test")
img_counter = 0
while True:
ret, frame = cam.read()
cv2.imshow("test", frame)
if not ret:
break
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
cam.release()
cv2.destroyAllWindows() | 19.137931 | 60 | 0.581982 |
44a83220fa8c80b298f20969d4a982fbe2baa085 | 5,906 | py | Python | Predictor.py | dgirzadas/Pulse-of-the-City | d1bb113f18ab9f524211c612806d2c2908f0246f | [
"MIT"
] | 2 | 2020-07-20T10:11:36.000Z | 2020-07-20T10:13:11.000Z | Predictor.py | dgirzadas/Pulse-of-the-City | d1bb113f18ab9f524211c612806d2c2908f0246f | [
"MIT"
] | null | null | null | Predictor.py | dgirzadas/Pulse-of-the-City | d1bb113f18ab9f524211c612806d2c2908f0246f | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import datetime as dt
import pickle
import re
import Printer
def get_prediction_ids(dataframe):
"""
Gives the matrix of ids for columns that contain the predictions [<low_ids>, <mid_ids>, <high_ids>]
:param dataframe: prediction dataframe from the predict_timeframe function.
:return: numpy array of indices for specific prediction columns
"""
return np.array([
np.arange(1, dataframe.shape[1], 3),
np.arange(2, dataframe.shape[1], 3),
np.arange(3, dataframe.shape[1], 3),
])
def dump_names():
"""
Prints the list of location names and their IDs for predictions.
"""
print(pd.read_csv('Data/behavior_zones.csv')['Name'])
class Predictor(object):
def __init__(self):
"""
Constructor method for the predictor class.
Initialises the prediction models, scaling factors and the location list
"""
self.models = []
self.avg_preds = np.load('Models/average_predictions.npy')
self.daily_scales = pd.read_csv('Data/daily_scales.csv').set_index('Date')
self.conf_stds = np.load('Data/conf_stds.npy')
self.location_names = pd.read_csv('Data/behavior_zones.csv')['Name']
def predict_timeframe(self, start_time, end_time, request_ids=None, ci=True):
"""
Generates a dataframe of hourly predictions for the given location IDs within the requested timeframe.
:param ci: Boolean parameter, defining if the user wants the confidence interval included in the prediction.
:param request_ids: list of location IDs for prediction
:param start_time: String of the starting date
:param end_time: String of the ending date
:return: dataframe with hourly predictions for each location
"""
print("Generating predictions...")
if request_ids is None:
request_ids = range(self.avg_preds.shape[0])
requests = self.__generate_pred_requests(start_time, end_time)
data = pd.DataFrame()
for i, request in enumerate(requests):
Printer.loading_bar(int((i+1)/(len(requests)/100)))
prediction = self.predict(request[0], request[1], request[2], request[3], locations=request_ids, ci=ci)
data = data.append(prediction)
return data
def predict(self, year, month, day, hour, locations, ci):
"""
Returns an estimated pedestrian count for a given date and hour
:param ci: Boolean parameter, defining if the user wants the confidence interval included in the prediction.
:param year: year for prediction
:param locations: list of location indices, if not passsed, all locations are returned
:param month: month for prediction
:param day: day for prediction
:param hour: hour for prediction. 0 means 00:00 - 01:00, etc.
:return: dataframe of predicted pedestrian numbers for each location
"""
try:
predict_date = dt.date(year, month, day)
scale_factor = self.daily_scales.loc[str(month) + '-' + str(day)]['Scale']
predictions = pd.DataFrame()
predictions['Datetime'] = [str(dt.datetime(year, month, day, hour))]
for i, location_id in enumerate(locations):
pred = int(self.avg_preds[location_id, predict_date.weekday(), hour])
if ci:
predictions[str(self.location_names[location_id]) + '_low'] = int(
np.max([(pred - self.conf_stds[location_id, predict_date.weekday(), hour]) * scale_factor, 0]))
predictions[str(self.location_names[location_id]) + '_mid'] = int(np.max([pred * scale_factor, 0]))
predictions[str(self.location_names[location_id]) + '_high'] = int(
np.max([(pred + self.conf_stds[location_id, predict_date.weekday(), hour]) * scale_factor, 0]))
else:
predictions[str(self.location_names[location_id])] = int(np.max([pred * scale_factor, 0]))
return predictions
except IndexError:
raise IndexError("\nInvalid location index")
except KeyError:
raise IndexError("\nInvalid location index")
def __generate_pred_requests(self, start_time, end_time):
"""
Generates a list containing required information for the prediction model.
:param start_time:
:param end_time:
:return:
"""
requests = []
year_start, month_start, day_start, hour_start = self.__parse_date(start_time)
year_end, month_end, day_end, hour_end = self.__parse_date(end_time)
start_date = dt.datetime(year_start, month_start, day_start, hour_start)
self.startDate = dt.date(start_date.year, 1, 1)
end_date = dt.datetime(year_end, month_end, day_end, hour_end)
date = start_date
if start_time == end_time:
requests.append([date.year, date.month, date.day, date.hour])
return requests
else:
while date < end_date:
requests.append([date.year, date.month, date.day, date.hour])
date += dt.timedelta(1 / 24)
return requests
@staticmethod
def __parse_date(date):
"""
Parses the date from the received string
:param date: string, containing the date and time
:return: integers, representing the year, month, day and hour of the string
"""
try:
m = re.match(r'(.*)-(.*)-(.*)T(.*):(.*):(.*)', date)
return int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
except Exception:
raise Exception("\nDate parsing error: \nMake sure the date is in format '<year>-<month>-<day>T<hours>:<minutes>:<seconds>'")
| 41.591549 | 137 | 0.625974 |
fcb1114fab5efb365daa12d4ac4a845b55ea147b | 1,236 | py | Python | testing/functions/scipy_interpolation_linear.py | Navidhaddad/Interpolation-jeppesen | 4b408278ac7761169693483cea778b9425c60a80 | [
"MIT"
] | null | null | null | testing/functions/scipy_interpolation_linear.py | Navidhaddad/Interpolation-jeppesen | 4b408278ac7761169693483cea778b9425c60a80 | [
"MIT"
] | null | null | null | testing/functions/scipy_interpolation_linear.py | Navidhaddad/Interpolation-jeppesen | 4b408278ac7761169693483cea778b9425c60a80 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import json
from sklearn.model_selection import train_test_split
import scipy
def scipy_interpolation (data, interpolation_point):
y = data.FUELFLOW
X = data.drop(['FUELFLOW'], axis=1)
points , values = closest_points(X,y,interpolation_point,4000)
interpolated_val = scipy.interpolate.griddata(points, values, interpolation_point, method='linear', rescale='TRUE')
return interpolated_val
def closest_points(X,y,interpolation_point, n):
X['distance'] = X.sub(interpolation_point).pow(2).sum(1).pow(0.5) #calculate euclidean distance
X = X.sort_values(['distance']).iloc[0:2500] #sort
X = X.drop(['distance'],axis=1)
X = X.reset_index()
X.columns = ['index_', 'DISA', 'ALTITUDE', 'MASS','MACH'] #rename columns
y.columns = ['index_', 'FUELFLOW']
X = X.join(y, on = 'index_', how='left',) #join 20 nearest point with
#corresonding fuel flow
val = X['FUELFLOW']
poi = X.drop(['index_','FUELFLOW'], axis=1)
return poi , val | 32.526316 | 120 | 0.572006 |
0e52f03da6c2c6479b7594e73e60ba6a5058b21f | 11,413 | py | Python | Schema/XProject.py | ndevenish/xia2 | 51eb0911457119f80803d5d061d44dc5f19b5a6e | [
"BSD-3-Clause"
] | null | null | null | Schema/XProject.py | ndevenish/xia2 | 51eb0911457119f80803d5d061d44dc5f19b5a6e | [
"BSD-3-Clause"
] | null | null | null | Schema/XProject.py | ndevenish/xia2 | 51eb0911457119f80803d5d061d44dc5f19b5a6e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# XProject.py
# Copyright (C) 2006 CCLRC, Graeme Winter
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
# This represents the "top level" of the .xinfo hierarchy, and should
# exactly correspond to the contents of the .xinfo file.
from __future__ import absolute_import, division
from xia2.Handlers.Phil import PhilIndex
# output stream
from xia2.Handlers.Streams import Debug
from xia2.Handlers.Syminfo import Syminfo
from xia2.Handlers.XInfo import XInfo
from xia2.Schema.XCrystal import XCrystal
from xia2.Schema.XSample import XSample
from xia2.Schema.XWavelength import XWavelength
# hooks to all of the child objects
# .xinfo parser
class XProject(object):
'''A representation of a complete project. This will contain a dictionary
of crystals.'''
def __init__(self, xinfo_file=None, name=None):
self._crystals = { }
if xinfo_file:
self.setup_from_xinfo_file(xinfo_file)
else:
self._name = name
# serialization functions
def to_dict(self):
obj = {}
obj['__id__'] = 'XProject'
import inspect
attributes = inspect.getmembers(self, lambda m:not(inspect.isroutine(m)))
for a in attributes:
if a[0] == '_crystals':
crystals = {}
for cname, cryst in a[1].iteritems():
crystals[cname] = cryst.to_dict()
obj[a[0]] = crystals
elif a[0].startswith('__'):
continue
else:
obj[a[0]] = a[1]
return obj
@classmethod
def from_dict(cls, obj):
assert obj['__id__'] == 'XProject'
return_obj = cls()
for k, v in obj.iteritems():
if k == '_crystals':
v_ = {}
for cname, cdict in v.iteritems():
cryst = XCrystal.from_dict(cdict)
cryst._project = return_obj
v_[cname] = cryst
v = v_
setattr(return_obj, k, v)
return return_obj
def as_json(self, filename=None, compact=True):
import json
obj = self.to_dict()
if compact:
text = json.dumps(obj, skipkeys=True, separators=(',',':'), ensure_ascii=True)
else:
text = json.dumps(obj, skipkeys=True, indent=2, ensure_ascii=True)
# If a filename is set then dump to file otherwise return string
if filename is not None:
with open(filename, 'w') as outfile:
outfile.write(text)
else:
return text
@classmethod
def from_json(cls, filename=None, string=None):
import json
def _decode_dict(data):
''' Decode a dict to str from unicode. '''
from dxtbx.serialize.load import _decode_list
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
try:
key = float(key)
if int(key) == key: key = int(key)
except ValueError:
pass
rv[key] = value
return rv
assert [filename, string].count(None) == 1
if filename is not None:
with open(filename, 'rb') as f:
string = f.read()
obj = json.loads(string, object_hook=_decode_dict)
return cls.from_dict(obj)
def get_output(self):
result = 'Project: %s\n' % self._name
for crystal in self._crystals.keys():
result += self._crystals[crystal].get_output()
return result[:-1]
def summarise(self):
'''Produce summary information.'''
summary = ['Project: %s' % self._name]
for crystal in self._crystals.keys():
for record in self._crystals[crystal].summarise():
summary.append(record)
return summary
def get_name(self):
return self._name
def add_crystal(self, xcrystal):
'''Add a new xcrystal to the project.'''
if not xcrystal.__class__.__name__ == 'XCrystal':
raise RuntimeError('crystal must be class XCrystal.')
if xcrystal.get_name() in self._crystals.keys():
raise RuntimeError('XCrystal with name %s already exists' % \
xcrystal.get_name())
self._crystals[xcrystal.get_name()] = xcrystal
def get_crystals(self):
return self._crystals
def setup_from_xinfo_file(self, xinfo_file):
'''Set up this object & all subobjects based on the .xinfo
file contents.'''
settings = PhilIndex.params.xia2.settings
sweep_ids = [sweep.id for sweep in settings.sweep]
sweep_ranges = [sweep.range for sweep in settings.sweep]
if not sweep_ids:
sweep_ids = None
sweep_ranges = None
xinfo = XInfo(xinfo_file, sweep_ids=sweep_ids, sweep_ranges=sweep_ranges)
self._name = xinfo.get_project()
crystals = xinfo.get_crystals()
for crystal in crystals.keys():
xc = XCrystal(crystal, self)
if 'sequence' in crystals[crystal]:
xc.set_aa_sequence(crystals[crystal]['sequence'])
if 'ha_info' in crystals[crystal]:
if crystals[crystal]['ha_info'] != { }:
xc.set_ha_info(crystals[crystal]['ha_info'])
if 'scaled_merged_reflection_file' in crystals[crystal]:
xc.set_scaled_merged_reflections(
crystals[crystal]['scaled_merged_reflections'])
if 'reference_reflection_file' in crystals[crystal]:
xc.set_reference_reflection_file(
crystals[crystal]['reference_reflection_file'])
if 'freer_file' in crystals[crystal]:
xc.set_freer_file(crystals[crystal]['freer_file'])
# user assigned spacegroup
if 'user_spacegroup' in crystals[crystal]:
xc.set_user_spacegroup(crystals[crystal]['user_spacegroup'])
elif settings.space_group is not None:
# XXX do we ever actually get here?
xc.set_user_spacegroup(settings.space_group.type().lookup_symbol())
# add a default sample if none present in xinfo file
if not crystals[crystal]['samples']:
crystals[crystal]['samples']['X1'] = {}
for sample in crystals[crystal]['samples'].keys():
sample_info = crystals[crystal]['samples'][sample]
xsample = XSample(sample, xc)
xc.add_sample(xsample)
if not crystals[crystal]['wavelengths']:
raise RuntimeError('No wavelengths specified in xinfo file')
for wavelength in crystals[crystal]['wavelengths'].keys():
# FIXME 29/NOV/06 in here need to be able to cope with
# no wavelength information - this should default to the
# information in the image header (John Cowan pointed
# out that this was untidy - requiring that it agrees
# with the value in the header makes this almost
# useless.)
wave_info = crystals[crystal]['wavelengths'][wavelength]
if 'wavelength' not in wave_info:
Debug.write(
'No wavelength value given for wavelength %s' % wavelength)
else:
Debug.write(
'Overriding value for wavelength %s to %8.6f' % \
(wavelength, float(wave_info['wavelength'])))
# handle case where user writes f" in place of f''
if 'f"' in wave_info and not \
'f\'\'' in wave_info:
wave_info['f\'\''] = wave_info['f"']
xw = XWavelength(wavelength, xc,
wavelength = wave_info.get('wavelength', 0.0),
f_pr = wave_info.get('f\'', 0.0),
f_prpr = wave_info.get('f\'\'', 0.0),
dmin = wave_info.get('dmin', 0.0),
dmax = wave_info.get('dmax', 0.0))
# in here I also need to look and see if we have
# been given any scaled reflection files...
# check to see if we have a user supplied lattice...
if 'user_spacegroup' in crystals[crystal]:
lattice = Syminfo.get_lattice(
crystals[crystal]['user_spacegroup'])
elif settings.space_group is not None:
# XXX do we ever actually get here?
lattice = Syminfo.get_lattice(
settings.space_group.type().lookup_symbol())
else:
lattice = None
# and also user supplied cell constants - from either
# the xinfo file (the first port of call) or the
# command-line.
if 'user_cell' in crystals[crystal]:
cell = crystals[crystal]['user_cell']
elif settings.unit_cell is not None:
# XXX do we ever actually get here?
cell = settings.unit_cell.parameters()
else:
cell = None
dmin = wave_info.get('dmin', 0.0)
dmax = wave_info.get('dmax', 0.0)
if dmin == 0.0 and dmax == 0.0:
dmin = PhilIndex.params.xia2.settings.resolution.d_min
dmax = PhilIndex.params.xia2.settings.resolution.d_max
# want to be able to locally override the resolution limits
# for this sweep while leaving the rest for the data set
# intact...
for sweep_name in crystals[crystal]['sweeps'].keys():
sweep_info = crystals[crystal]['sweeps'][sweep_name]
sample_name = sweep_info.get('sample')
if sample_name is None:
if len(crystals[crystal]['samples']) == 1:
sample_name = crystals[crystal]['samples'].keys()[0]
else:
raise RuntimeError('No sample given for sweep %s' %sweep_name)
xsample = xc.get_xsample(sample_name)
assert xsample is not None
dmin_old = dmin
dmax_old = dmax
replace = False
if 'RESOLUTION' in sweep_info:
values = map(float, sweep_info['RESOLUTION'].split())
if len(values) == 1:
dmin = values[0]
elif len(values) == 2:
dmin = min(values)
dmax = max(values)
else:
raise RuntimeError('bad resolution for sweep %s' % sweep_name)
replace = True
if sweep_info['wavelength'] == wavelength:
frames_to_process = sweep_info.get('start_end')
xsweep = xw.add_sweep(
sweep_name,
sample=xsample,
directory=sweep_info.get('DIRECTORY'),
image=sweep_info.get('IMAGE'),
beam=sweep_info.get('beam'),
reversephi=sweep_info.get('reversephi', False),
distance=sweep_info.get('distance'),
gain=float(sweep_info.get('GAIN', 0.0)),
dmin=dmin, dmax=dmax,
polarization=float(sweep_info.get(
'POLARIZATION', 0.0)),
frames_to_process=frames_to_process,
user_lattice=lattice,
user_cell=cell,
epoch=sweep_info.get('epoch', 0),
ice=sweep_info.get('ice', False),
excluded_regions=sweep_info.get(
'excluded_regions', []),
)
xsample.add_sweep(xsweep)
dmin = dmin_old
dmax = dmax_old
xc.add_wavelength(xw)
self.add_crystal(xc)
def write_xifo(self):
'''Write an updated .xinfo file which takes into account the input
provided by the user on the command line and any input xinfo
file: this is what xia2 understood to be the problem.'''
raise NotImplementedError('FIXME this method must be implemented')
| 32.51567 | 84 | 0.613423 |
d14e202563b28e6ec0c470156441b3250d5e866b | 1,190 | py | Python | api.py | cqparts/cqparts_webapi | 67adc36f54dc5d486734133440a5b05908febcf8 | [
"Apache-2.0"
] | 3 | 2019-02-09T01:14:06.000Z | 2021-01-05T17:52:14.000Z | api.py | cqparts/cqparts_webapi | 67adc36f54dc5d486734133440a5b05908febcf8 | [
"Apache-2.0"
] | 4 | 2018-11-12T12:42:56.000Z | 2018-12-23T02:32:05.000Z | api.py | cqparts/cqparts_webapi | 67adc36f54dc5d486734133440a5b05908febcf8 | [
"Apache-2.0"
] | 1 | 2018-08-30T10:41:24.000Z | 2018-08-30T10:41:24.000Z | from flask import Blueprint, jsonify
bp = Blueprint("api", __name__, url_prefix="/api/v0/")
d = ""
@bp.route("/")
def index():
return "Index"
@bp.route("/list/<path:modelname>")
def subcat(modelname):
return jsonify(d.prefix(modelname))
@bp.route("/show/<path:modelname>")
def show(modelname):
return jsonify(d.params(modelname))
@bp.route("/stat/unbuilt")
def unbuilt():
un = []
l = d.treeiter("export")
for i in l:
if (i.built == False) & (i.is_leaf == True):
un.append(i.dir())
return jsonify(un)
@bp.route("/stat/built")
def built():
b = []
l = d.treeiter("export")
for i in l:
if (i.built == True) & (i.is_leaf == True):
b.append(i.info())
return jsonify(b)
@bp.route("/stat/all")
def all():
a = []
l = d.treeiter("export")
for i in l:
if i.is_leaf == True:
a.append(i.info())
else:
a.append(d.prefix(i.get_path()[1:]))
return jsonify(a)
@bp.route("/stat/showcase")
def showcase():
a = []
l = d.treeiter("export/showcase")
for i in l:
if i.is_leaf == True:
a.append(i.info())
return jsonify(a)
| 19.193548 | 54 | 0.547899 |
a2a1381eec723e20683a703b1584a92108852c39 | 5,073 | py | Python | infrastructure/storage/swift.py | Barometre-de-la-Science-Ouverte/bso3-harvest-publication | 06c729a1e44ed87e8f73b4c2bd456f5e09a73e34 | [
"MIT"
] | null | null | null | infrastructure/storage/swift.py | Barometre-de-la-Science-Ouverte/bso3-harvest-publication | 06c729a1e44ed87e8f73b4c2bd456f5e09a73e34 | [
"MIT"
] | null | null | null | infrastructure/storage/swift.py | Barometre-de-la-Science-Ouverte/bso3-harvest-publication | 06c729a1e44ed87e8f73b4c2bd456f5e09a73e34 | [
"MIT"
] | null | null | null | import shutil
from swiftclient.service import SwiftError, SwiftService, SwiftUploadObject
from typing import List
from config.harvester_config import config_harvester
from domain.ovh_path import OvhPath
from application.server.main.logger import get_logger
from config.logger_config import LOGGER_LEVEL
logger = get_logger(__name__, level=LOGGER_LEVEL)
METADATA_DUMP = config_harvester['metadata_dump']
PUBLICATIONS_DUMP = config_harvester['publications_dump']
class Swift(object):
def __init__(self, config):
self.config = config
options = self._init_swift_options()
options['object_uu_threads'] = 20
self.swift = SwiftService(options=options)
container_names = []
try:
list_account_part = self.swift.list()
for page in list_account_part:
if page["success"]:
for item in page["listing"]:
i_name = item["name"]
container_names.append(i_name)
if i_name == METADATA_DUMP:
print("using input SWIFT", METADATA_DUMP, "container:", item)
elif i_name == PUBLICATIONS_DUMP:
print("using output SWIFT", PUBLICATIONS_DUMP, "container:", item)
else:
logger.error("error listing SWIFT object storage containers")
except SwiftError as e:
logger.exception("error listing containers")
if PUBLICATIONS_DUMP not in container_names:
# create the container
try:
self.swift.post(container=PUBLICATIONS_DUMP)
except SwiftError:
logger.exception(
"error creating SWIFT object storage container " + PUBLICATIONS_DUMP)
else:
logger.debug("container already exists on SWIFT object storage: " + PUBLICATIONS_DUMP)
def _init_swift_options(self):
options = {}
for key in self.config["swift"]:
if self.config["swift"][key] and len(self.config["swift"][key].strip()) > 0:
options[key] = self.config["swift"][key]
return options
def upload_files_to_swift(self, container, file_path_dest_path_tuples: List):
"""
Bulk upload of a list of files to current SWIFT object storage container under the same destination path
"""
# Slightly modified to be able to upload to more than one dest_path
objs = [SwiftUploadObject(file_path, object_name=str(dest_path)) for file_path, dest_path in
file_path_dest_path_tuples if isinstance(dest_path, OvhPath)]
try:
for result in self.swift.upload(container, objs):
if not result['success']:
error = result['error']
if result['action'] == "upload_object":
logger.error("Failed to upload object %s to container %s: %s" % (
container, result['object'], error))
else:
logger.exception("%s" % error, exc_info=True)
else:
if result['action'] == "upload_object":
logger.debug(f'Result upload : {result["object"]} succesfully uploaded on {result["container"]} (from {result["path"]})')
except SwiftError:
logger.exception("error uploading file to SWIFT container", exc_info=True)
def download_files(self, container, file_path, dest_path):
"""
Download a file given a path and returns the download destination file path.
"""
if type(file_path) == str:
objs = [file_path]
elif type(file_path) == list:
objs = file_path
try:
for down_res in self.swift.download(container=container, objects=objs):
if down_res['success']:
local_path = down_res['path']
shutil.move(local_path, dest_path)
else:
logger.error("'%s' download failed" % down_res['object'])
except SwiftError:
logger.exception("error downloading file from SWIFT container")
def get_swift_list(self, container, dir_name=None):
"""
Return all contents of a given dir in SWIFT object storage.
Goes through the pagination to obtain all file names.
afaik, this is terribly inefficient, as we have to go through all the objects of the storage.
"""
result = []
try:
list_parts_gen = self.swift.list(container=container)
for page in list_parts_gen:
if page["success"]:
for item in page["listing"]:
if dir_name is None or item["name"].startswith(dir_name):
result.append(item["name"])
else:
logger.error(page["error"])
except SwiftError as e:
logger.error(e.value)
return result
| 42.991525 | 145 | 0.58565 |
397caf9e474112b395f6110bc7de7d39c072ef05 | 466 | py | Python | scripts/buzzer3.py | gblackwell/pimouse_ros | b746a22ec0c4ccc7e7281b4283e1ef32ead0b264 | [
"BSD-3-Clause"
] | null | null | null | scripts/buzzer3.py | gblackwell/pimouse_ros | b746a22ec0c4ccc7e7281b4283e1ef32ead0b264 | [
"BSD-3-Clause"
] | null | null | null | scripts/buzzer3.py | gblackwell/pimouse_ros | b746a22ec0c4ccc7e7281b4283e1ef32ead0b264 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import UInt16
def write_freq(hz=0):
bfile = "/dev/rtbuzzer0"
try:
with open(bfile, "w") as f:
f.write(str(hz) + "\n")
except IOError:
rospy.logerr("can't write to " + bfile)
def recv_buzzer(data):
rospy.loginfo(data)
rospy.loginfo(data.data)
write_freq(data.data)
if __name__ == "__main__":
rospy.init_node("buzzer")
rospy.Subscriber("buzzer", UInt16, recv_buzzer)
rospy.spin()
| 21.181818 | 49 | 0.675966 |
f5870be706c295f0bbacdd42842735fa72968f9b | 745 | py | Python | thingity/tests/test_palette.py | ianhomer/thingity | 7be534f0776799326acc0aae39d92813af61fb39 | [
"MIT"
] | null | null | null | thingity/tests/test_palette.py | ianhomer/thingity | 7be534f0776799326acc0aae39d92813af61fb39 | [
"MIT"
] | null | null | null | thingity/tests/test_palette.py | ianhomer/thingity | 7be534f0776799326acc0aae39d92813af61fb39 | [
"MIT"
] | null | null | null | import pytest
from pytest_bdd import scenarios, when, then, parsers
from .. import Palette
scenarios("features/palette.feature")
@pytest.fixture
def context():
return dict()
@when(parsers.parse("I have the empty palette"))
def I_have_empty_palette(context):
context["palette"] = Palette()
@when(parsers.parse("I have the {theme} palette"))
def I_have_named_palette(context, theme):
context["palette"] = Palette(theme=theme)
@then(parsers.parse("the color for {name} is PURPLE"))
def color_should_be_purple(context, name):
assert context["palette"].color(name) == "\033[95m"
@then(parsers.parse("the color for {name} is empty"))
def color_should_be_empty(context, name):
assert context["palette"].color(name) == ""
| 24.032258 | 55 | 0.722148 |
18ddbd72c4c351c4a46de79156da4b020baa11fe | 5,760 | py | Python | get_together/views/__init__.py | hmschreck/GetTogether | acc645d065121cf34418dd70ebb96da3588f5acb | [
"BSD-2-Clause"
] | null | null | null | get_together/views/__init__.py | hmschreck/GetTogether | acc645d065121cf34418dd70ebb96da3588f5acb | [
"BSD-2-Clause"
] | null | null | null | get_together/views/__init__.py | hmschreck/GetTogether | acc645d065121cf34418dd70ebb96da3588f5acb | [
"BSD-2-Clause"
] | null | null | null | from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from django.contrib.auth import logout as logout_user
from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
from events.models.locale import City
from events.models.events import Event, Place, Attendee
from events.models.profiles import Team, UserProfile, Member
from events.models.search import Searchable
from events.forms import SearchForm
from events import location
from accounts.decorators import setup_wanted
from django.conf import settings
import simple_ga as ga
import datetime
import simplejson
import geocoder
import math
import traceback
from .teams import *
from .orgs import *
from .events import *
from .places import *
from .user import *
from .new_user import *
from .new_team import *
from .speakers import *
from .utils import *
KM_PER_DEGREE_LAT = 110.574
KM_PER_DEGREE_LNG = 111.320 # At the equator
DEFAULT_NEAR_DISTANCE = 100 # kilometeres
# Create your views here.
@setup_wanted
def home(request, *args, **kwards):
context = {}
if request.user.is_authenticated:
user_teams = Team.objects.filter(owner_profile=request.user.profile)
if len(user_teams) > 0:
context['user_teams'] = user_teams
near_distance = int(request.GET.get("distance", DEFAULT_NEAR_DISTANCE))
context['distance'] = near_distance
context['geoip_lookup'] = False
city=None
ll = None
if "city" in request.GET and request.GET.get("city"):
context['city_search'] = True
city = City.objects.get(id=request.GET.get("city"))
context['city'] = city
ll = [city.latitude, city.longitude]
ga.add_event(request, 'homepage_search', category='search', label=city.short_name)
else :
context['city_search'] = False
try:
g = location.get_geoip(request)
if g.latlng is not None and g.latlng[0] is not None and g.latlng[1] is not None:
ll = g.latlng
context['geoip_lookup'] = True
try:
city_distance = 1 #km
while city is None and city_distance < 100:
minlat = ll[0]-(city_distance/KM_PER_DEGREE_LAT)
maxlat = ll[0]+(city_distance/KM_PER_DEGREE_LAT)
minlng = ll[1]-(city_distance/(KM_PER_DEGREE_LNG*math.cos(math.radians(ll[0]))))
maxlng = ll[1]+(city_distance/(KM_PER_DEGREE_LNG*math.cos(math.radians(ll[0]))))
nearby_cities = City.objects.filter(latitude__gte=minlat, latitude__lte=maxlat, longitude__gte=minlng, longitude__lte=maxlng)
if len(nearby_cities) == 0:
city_distance += 1
else:
city = sorted(nearby_cities, key=lambda city: location.city_distance_from(ll, city))[0]
if request.user.profile.city is None:
profile = request.user.profile
profile.city = city
profile.save()
except:
raise Exception('City lookup filed')
else:
raise Exception('Geocoder result has no latlng')
except Exception as err:
context['geoip_lookup'] = False
print("Geocoder lookup failed for %s" % request.META.get('REMOTE_ADDR'), err)
if ll is not None:
context['latitude'] = ll[0]
context['longitude'] = ll[1]
try:
minlat = ll[0]-(near_distance/KM_PER_DEGREE_LAT)
maxlat = ll[0]+(near_distance/KM_PER_DEGREE_LAT)
minlng = ll[1]-(near_distance/(KM_PER_DEGREE_LNG*math.cos(math.radians(ll[0]))))
maxlng = ll[1]+(near_distance/(KM_PER_DEGREE_LNG*math.cos(math.radians(ll[0]))))
context['minlat'] = minlat
context['maxlat'] = maxlat
context['minlng'] = minlng
context['maxlng'] = maxlng
near_events = Searchable.objects.filter(latitude__gte=minlat, latitude__lte=maxlat, longitude__gte=minlng, longitude__lte=maxlng, end_time__gte=datetime.datetime.now())
context['near_events'] = sorted(near_events, key=lambda searchable: location.searchable_distance_from(ll, searchable))
# # If there aren't any teams in the user's geoip area, show them the closest ones
if context['geoip_lookup'] and len(near_events) < 1:
context['closest_events'] = sorted(Searchable.objects.filter(end_time__gte=datetime.datetime.now()),
key=lambda searchable: location.searchable_distance_from(ll, searchable))[:3]
near_teams = Team.objects.filter(city__latitude__gte=minlat, city__latitude__lte=maxlat, city__longitude__gte=minlng, city__longitude__lte=maxlng)
context['near_teams'] = sorted(near_teams, key=lambda team: location.team_distance_from(ll, team))
# # If there aren't any teams in the user's geoip area, show them the closest ones
if context['geoip_lookup'] and len(near_teams) < 1:
context['closest_teams'] = sorted(Team.objects.all(), key=lambda team: location.team_distance_from(ll, team))[:3]
except Exception as err:
print("Error looking up nearby teams and events", err)
traceback.print_exc()
initial_search = {'distance': near_distance}
if city is not None and city.id > 0:
initial_search['city'] = city.id
search_form = SearchForm(initial=initial_search)
context['search_form'] = search_form
return render(request, 'get_together/index.html', context)
| 43.308271 | 180 | 0.642882 |
000b21e18d29d5cacd2745d07a38da9ebb3ae50f | 5,645 | py | Python | graphene_django/rest_framework/mutation.py | bellini666/graphene-django | 558288afcefe49c0183e0ab16bb2c6ead4d8041d | [
"MIT"
] | 4,038 | 2016-09-18T01:45:22.000Z | 2022-03-31T01:06:57.000Z | graphene_django/rest_framework/mutation.py | bellini666/graphene-django | 558288afcefe49c0183e0ab16bb2c6ead4d8041d | [
"MIT"
] | 1,104 | 2016-09-19T20:10:22.000Z | 2022-03-30T17:37:46.000Z | graphene_django/rest_framework/mutation.py | bellini666/graphene-django | 558288afcefe49c0183e0ab16bb2c6ead4d8041d | [
"MIT"
] | 791 | 2016-09-18T13:48:11.000Z | 2022-03-29T08:32:06.000Z | from collections import OrderedDict
from django.shortcuts import get_object_or_404
from rest_framework import serializers
import graphene
from graphene.relay.mutation import ClientIDMutation
from graphene.types import Field, InputField
from graphene.types.mutation import MutationOptions
from graphene.types.objecttype import yank_fields_from_attrs
from ..types import ErrorType
from .serializer_converter import convert_serializer_field
class SerializerMutationOptions(MutationOptions):
lookup_field = None
model_class = None
model_operations = ["create", "update"]
serializer_class = None
def fields_for_serializer(
serializer,
only_fields,
exclude_fields,
is_input=False,
convert_choices_to_enum=True,
lookup_field=None,
):
fields = OrderedDict()
for name, field in serializer.fields.items():
is_not_in_only = only_fields and name not in only_fields
is_excluded = any(
[
name in exclude_fields,
field.write_only
and not is_input, # don't show write_only fields in Query
field.read_only
and is_input
and lookup_field != name, # don't show read_only fields in Input
]
)
if is_not_in_only or is_excluded:
continue
fields[name] = convert_serializer_field(
field, is_input=is_input, convert_choices_to_enum=convert_choices_to_enum
)
return fields
class SerializerMutation(ClientIDMutation):
class Meta:
abstract = True
errors = graphene.List(
ErrorType, description="May contain more than one error for same field."
)
@classmethod
def __init_subclass_with_meta__(
cls,
lookup_field=None,
serializer_class=None,
model_class=None,
model_operations=("create", "update"),
only_fields=(),
exclude_fields=(),
convert_choices_to_enum=True,
_meta=None,
**options
):
if not serializer_class:
raise Exception("serializer_class is required for the SerializerMutation")
if "update" not in model_operations and "create" not in model_operations:
raise Exception('model_operations must contain "create" and/or "update"')
serializer = serializer_class()
if model_class is None:
serializer_meta = getattr(serializer_class, "Meta", None)
if serializer_meta:
model_class = getattr(serializer_meta, "model", None)
if lookup_field is None and model_class:
lookup_field = model_class._meta.pk.name
input_fields = fields_for_serializer(
serializer,
only_fields,
exclude_fields,
is_input=True,
convert_choices_to_enum=convert_choices_to_enum,
lookup_field=lookup_field,
)
output_fields = fields_for_serializer(
serializer,
only_fields,
exclude_fields,
is_input=False,
convert_choices_to_enum=convert_choices_to_enum,
lookup_field=lookup_field,
)
if not _meta:
_meta = SerializerMutationOptions(cls)
_meta.lookup_field = lookup_field
_meta.model_operations = model_operations
_meta.serializer_class = serializer_class
_meta.model_class = model_class
_meta.fields = yank_fields_from_attrs(output_fields, _as=Field)
input_fields = yank_fields_from_attrs(input_fields, _as=InputField)
super(SerializerMutation, cls).__init_subclass_with_meta__(
_meta=_meta, input_fields=input_fields, **options
)
@classmethod
def get_serializer_kwargs(cls, root, info, **input):
lookup_field = cls._meta.lookup_field
model_class = cls._meta.model_class
if model_class:
if "update" in cls._meta.model_operations and lookup_field in input:
instance = get_object_or_404(
model_class, **{lookup_field: input[lookup_field]}
)
partial = True
elif "create" in cls._meta.model_operations:
instance = None
partial = False
else:
raise Exception(
'Invalid update operation. Input parameter "{}" required.'.format(
lookup_field
)
)
return {
"instance": instance,
"data": input,
"context": {"request": info.context},
"partial": partial,
}
return {"data": input, "context": {"request": info.context}}
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
kwargs = cls.get_serializer_kwargs(root, info, **input)
serializer = cls._meta.serializer_class(**kwargs)
if serializer.is_valid():
return cls.perform_mutate(serializer, info)
else:
errors = ErrorType.from_errors(serializer.errors)
return cls(errors=errors)
@classmethod
def perform_mutate(cls, serializer, info):
obj = serializer.save()
kwargs = {}
for f, field in serializer.fields.items():
if not field.write_only:
if isinstance(field, serializers.SerializerMethodField):
kwargs[f] = field.to_representation(obj)
else:
kwargs[f] = field.get_attribute(obj)
return cls(errors=None, **kwargs)
| 32.073864 | 86 | 0.619663 |
8388a3d202fffb061be2a50096c4a6d6082112be | 617 | py | Python | score-sets/GRCh37/RVIS/make.py | jimhavrilla/pathoscore | 4d0c2ada1c534bd672e106d253d089745e2f2417 | [
"MIT"
] | 24 | 2017-08-25T17:27:24.000Z | 2021-12-24T18:05:02.000Z | score-sets/GRCh37/RVIS/make.py | jimhavrilla/pathoscore | 4d0c2ada1c534bd672e106d253d089745e2f2417 | [
"MIT"
] | 36 | 2017-08-25T17:32:45.000Z | 2019-10-22T21:32:05.000Z | score-sets/GRCh37/RVIS/make.py | jimhavrilla/pathoscore | 4d0c2ada1c534bd672e106d253d089745e2f2417 | [
"MIT"
] | 9 | 2017-08-25T21:38:43.000Z | 2021-12-24T18:05:03.000Z | import sys
import toolshed as ts
from collections import defaultdict
genes = defaultdict(float)
for d in ts.reader(sys.argv[1]):
genes[d['CCDSr20']] = float(d['%RVIS[pop_maf_0.05%(any)]']) # gene, value at 0.05% MAF
for line in open(sys.argv[2], "r"): # gene list
fields = line.strip().split("\t")
chrom = fields[0]; start = fields[1]; end = fields[2]; gene = fields[3]
RVIS=genes[gene]
if RVIS == 0.0: # RVIS does not have this score, is dict default
continue
print("{chrom}\t{start}\t{end}\t{gene}\t{RVIS}".format(chrom=chrom, start=start,
end=end, gene=gene, RVIS=RVIS))
| 38.5625 | 90 | 0.641815 |
4d18a820f60927f68cd5581ab1aa3c607744ddfc | 6,997 | py | Python | svdbs/tests/factories.py | brand-fabian/varfish-server | 6a084d891d676ff29355e72a29d4f7b207220283 | [
"MIT"
] | 14 | 2019-09-30T12:44:17.000Z | 2022-02-04T14:45:16.000Z | svdbs/tests/factories.py | brand-fabian/varfish-server | 6a084d891d676ff29355e72a29d4f7b207220283 | [
"MIT"
] | 244 | 2021-03-26T15:13:15.000Z | 2022-03-31T15:48:04.000Z | svdbs/tests/factories.py | brand-fabian/varfish-server | 6a084d891d676ff29355e72a29d4f7b207220283 | [
"MIT"
] | 8 | 2020-05-19T21:55:13.000Z | 2022-03-31T07:02:58.000Z | """Factory Boy factory classes for ``svdbs``."""
import binning
import factory
from ..models import (
DgvGoldStandardSvs,
DgvSvs,
ExacCnv,
ThousandGenomesSv,
DbVarSv,
GnomAdSv,
EXAC_POP_CHOICES,
)
class DgvGoldStandardSvsFactory(factory.django.DjangoModelFactory):
class Meta:
model = DgvGoldStandardSvs
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start_outer = factory.Sequence(lambda n: (n + 1) * 100 - 10)
start_inner = factory.Sequence(lambda n: (n + 1) * 100 + 10)
end_inner = factory.Sequence(lambda n: (n + 1) * 100 + 90)
end_outer = factory.Sequence(lambda n: (n + 1) * 100 + 110)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100 - 11, (n + 1) * 100 + 110))
accession = factory.Sequence(lambda n: "DGV-GS-%d" % n)
sv_type = "DEL"
sv_sub_type = "DEL"
num_studies = 1
studies = factory.Sequence(lambda n: ["DGV-GS-STUDY-%d" % n])
num_platforms = 1
platforms = factory.Sequence(lambda n: ["DGV-GS-PLATFORM-%d" % n])
num_algorithms = 1
algorithms = factory.Sequence(lambda n: ["DGV-GS-ALGO-%d" % n])
num_variants = 1
num_carriers = 1
num_unique_samples = 1
num_carriers_african = 0
num_carriers_asian = 0
num_carriers_european = 0
num_carriers_mexican = 0
num_carriers_middle_east = 1
num_carriers_native_american = 0
num_carriers_north_american = 0
num_carriers_oceania = 0
num_carriers_south_american = 0
num_carriers_admixed = 0
num_carriers_unknown = 0
class DgvSvsFactory(factory.django.DjangoModelFactory):
class Meta:
model = DgvSvs
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start = factory.Sequence(lambda n: (n + 1) * 100)
end = factory.Sequence(lambda n: (n + 1) * 100 + 100)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100, (n + 1) * 100 + 100))
accession = factory.Sequence(lambda n: "DGV-%d" % n)
sv_type = "DEL"
sv_sub_type = "DEL"
study = factory.Sequence(lambda n: "DGV-STUDY-%d" % n)
platform = factory.Sequence(lambda n: ["DGV-PLATFORM-%d" % n])
num_samples = 1
observed_gains = 0
observed_losses = 1
class ExacCnvFactory(factory.django.DjangoModelFactory):
class Meta:
model = ExacCnv
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start = factory.Sequence(lambda n: (n + 1) * 100)
end = factory.Sequence(lambda n: (n + 1) * 100 + 100)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100, (n + 1) * 100 + 100))
sv_type = "DEL"
population = factory.Iterator([x[0] for x in EXAC_POP_CHOICES])
phred_score = factory.Iterator(list(range(30)))
class ThousandGenomesSvFactory(factory.django.DjangoModelFactory):
class Meta:
model = ThousandGenomesSv
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start = factory.Sequence(lambda n: (n + 1) * 100)
end = factory.Sequence(lambda n: (n + 1) * 100 + 100)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100, (n + 1) * 100 + 100))
start_ci_left = -100
start_ci_right = 100
end_ci_left = -100
end_ci_right = 100
sv_type = "DEL"
source_call_set = "DEL_delly"
mobile_element_info = []
num_samples = 1
num_alleles = 2
num_var_alleles = 1
num_alleles_afr = 2
num_var_alleles_afr = 1
num_alleles_amr = 0
num_var_alleles_amr = 0
num_alleles_eas = 0
num_var_alleles_eas = 0
num_alleles_eur = 0
num_var_alleles_eur = 0
num_alleles_sas = 0
num_var_alleles_sas = 0
class DbVarSvFactory(factory.django.DjangoModelFactory):
class Meta:
model = DbVarSv
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start = factory.Sequence(lambda n: (n + 1) * 100)
end = factory.Sequence(lambda n: (n + 1) * 100 + 100)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100, (n + 1) * 100 + 100))
num_carriers = 1
sv_type = "DEL"
method = "Sequencing"
analysis = "Read_depth"
platform = factory.Sequence(lambda n: "DBVAR-PLATFORM-%d" % n)
study = factory.Sequence(lambda n: "DBVAR-STUDY-%d" % n)
clinical_assertions = []
clinvar_accessions = []
bin_size = "large"
min_ins_length = None
max_ins_length = None
class GnomAdSvFactory(factory.django.DjangoModelFactory):
class Meta:
model = GnomAdSv
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start = factory.Sequence(lambda n: (n + 1) * 100)
end = factory.Sequence(lambda n: (n + 1) * 100 + 100)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100, (n + 1) * 100 + 100))
ref = "N"
alt = ["<DUP>"]
name = [factory.Sequence(lambda n: "DBVAR-SV-%d" % n)]
svtype = "DEL"
svlen = 100
filter = ["PASS"]
evidence = ["BAF", "RD"]
algorithms = ["depth"]
chr2 = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
cpx_type = None
cpx_intervals = []
source = None
strands = None
unresolved_type = None
pcrplus_depleted = False
pesr_gt_overdispersion = False
protein_coding_lof = []
protein_coding_dup_lof = []
protein_coding_copy_gain = []
protein_coding_dup_partial = []
protein_coding_msv_exon_ovr = []
protein_coding_intronic = []
protein_coding_inv_span = []
protein_coding_utr = []
protein_coding_nearest_tss = []
protein_coding_intergenic = False
protein_coding_promoter = []
an = 2
ac = [1]
af = [0.5]
n_bi_genos = 1
n_homref = 0
n_het = 1
n_homalt = 0
freq_homref = 0.5
freq_het = 0.5
freq_homalt = 0.0
popmax_af = 0.5
afr_an = 1
afr_ac = [1]
afr_af = [0.5]
afr_n_bi_genos = 0
afr_n_homref = 0
afr_n_het = 0
afr_n_homalt = 0
afr_freq_homref = 0.0
afr_freq_het = 0.0
afr_freq_homalt = 0.0
amr_an = 0
amr_ac = [0]
amr_af = [0.0]
amr_n_bi_genos = 0
amr_n_homref = 0
amr_n_het = 0
amr_n_homalt = 0
amr_freq_homref = 0.0
amr_freq_het = 0.0
amr_freq_homalt = 0.0
eas_an = 0
eas_ac = [0]
eas_af = [0.0]
eas_n_bi_genos = 0
eas_n_homref = 0
eas_n_het = 0
eas_n_homalt = 0
eas_freq_homref = 0.0
eas_freq_het = 0.0
eas_freq_homalt = 0.0
eur_an = 0
eur_ac = [0]
eur_af = [0.0]
eur_n_bi_genos = 0
eur_n_homref = 0
eur_n_het = 0
eur_n_homalt = 0
eur_freq_homref = 0.0
eur_freq_het = 0.0
eur_freq_homalt = 0.0
oth_an = 0
oth_ac = [0]
oth_af = [0.0]
oth_n_bi_genos = 0
oth_n_homref = 0
oth_n_het = 0
oth_n_homalt = 0
oth_freq_homref = 0.0
oth_freq_het = 0.0
oth_freq_homalt = 0.0
| 27.439216 | 97 | 0.624125 |
186a3f1c7692173c034ff51a1ee3eec118aa4d7f | 776 | py | Python | debian/python-nova/usr/lib/python2.7/dist-packages/nova/tests/compute/__init__.py | bopopescu/stacklab-nova | 4ab1698659b663ef222255610d1a5c042706dd65 | [
"Apache-2.0"
] | null | null | null | debian/python-nova/usr/lib/python2.7/dist-packages/nova/tests/compute/__init__.py | bopopescu/stacklab-nova | 4ab1698659b663ef222255610d1a5c042706dd65 | [
"Apache-2.0"
] | null | null | null | debian/python-nova/usr/lib/python2.7/dist-packages/nova/tests/compute/__init__.py | bopopescu/stacklab-nova | 4ab1698659b663ef222255610d1a5c042706dd65 | [
"Apache-2.0"
] | 1 | 2020-07-24T08:31:57.000Z | 2020-07-24T08:31:57.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
from nova.tests import *
| 38.8 | 78 | 0.731959 |
f3f332f1ff4e8eed822a742cea8350ea68c4608b | 4,228 | py | Python | python/ray/tune/suggest/__init__.py | carlos-aguayo/ray | fedbdd5dc6a47aa9cba170816f8c0950193b4fd6 | [
"Apache-2.0"
] | 1 | 2020-11-09T04:14:58.000Z | 2020-11-09T04:14:58.000Z | python/ray/tune/suggest/__init__.py | carlos-aguayo/ray | fedbdd5dc6a47aa9cba170816f8c0950193b4fd6 | [
"Apache-2.0"
] | null | null | null | python/ray/tune/suggest/__init__.py | carlos-aguayo/ray | fedbdd5dc6a47aa9cba170816f8c0950193b4fd6 | [
"Apache-2.0"
] | null | null | null | from ray.tune.suggest.search import SearchAlgorithm
from ray.tune.suggest.basic_variant import BasicVariantGenerator
from ray.tune.suggest.suggestion import Searcher, ConcurrencyLimiter
from ray.tune.suggest.search_generator import SearchGenerator
from ray.tune.suggest.variant_generator import grid_search
from ray.tune.suggest.repeater import Repeater
def create_searcher(
search_alg,
metric=None,
mode=None,
**kwargs,
):
"""Instantiate a search algorithm based on the given string.
This is useful for swapping between different search algorithms.
Args:
search_alg (str): The search algorithm to use.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
**kwargs: Additional parameters.
These keyword arguments will be passed to the initialization
function of the chosen class.
Returns:
ray.tune.suggest.Searcher: The search algorithm.
Example:
>>> search_alg = tune.create_searcher('ax')
"""
def _import_ax_search():
from ray.tune.suggest.ax import AxSearch
return AxSearch
def _import_dragonfly_search():
from ray.tune.suggest.dragonfly import DragonflySearch
return DragonflySearch
def _import_skopt_search():
from ray.tune.suggest.skopt import SkOptSearch
return SkOptSearch
def _import_hyperopt_search():
from ray.tune.suggest.hyperopt import HyperOptSearch
return HyperOptSearch
def _import_bayesopt_search():
from ray.tune.suggest.bayesopt import BayesOptSearch
return BayesOptSearch
def _import_bohb_search():
from ray.tune.suggest.bohb import TuneBOHB
return TuneBOHB
def _import_nevergrad_search():
from ray.tune.suggest.nevergrad import NevergradSearch
return NevergradSearch
def _import_optuna_search():
from ray.tune.suggest.optuna import OptunaSearch
return OptunaSearch
def _import_zoopt_search():
from ray.tune.suggest.zoopt import ZOOptSearch
return ZOOptSearch
def _import_sigopt_search():
from ray.tune.suggest.sigopt import SigOptSearch
return SigOptSearch
SEARCH_ALG_IMPORT = {
"ax": _import_ax_search,
"dragonfly": _import_dragonfly_search,
"skopt": _import_skopt_search,
"hyperopt": _import_hyperopt_search,
"bayesopt": _import_bayesopt_search,
"bohb": _import_bohb_search,
"nevergrad": _import_nevergrad_search,
"optuna": _import_optuna_search,
"zoopt": _import_zoopt_search,
"sigopt": _import_sigopt_search,
}
search_alg = search_alg.lower()
if search_alg not in SEARCH_ALG_IMPORT:
raise ValueError(
f"Search alg must be one of {list(SEARCH_ALG_IMPORT)}. "
f"Got: {search_alg}")
SearcherClass = SEARCH_ALG_IMPORT[search_alg]()
return SearcherClass(metric=metric, mode=mode, **kwargs)
__all__ = [
"SearchAlgorithm", "Searcher", "BasicVariantGenerator", "SearchGenerator",
"grid_search", "Repeater", "ConcurrencyLimiter"
]
def BayesOptSearch(*args, **kwargs):
raise DeprecationWarning("""This class has been moved. Please import via
`from ray.tune.suggest.bayesopt import BayesOptSearch`""")
def HyperOptSearch(*args, **kwargs):
raise DeprecationWarning("""This class has been moved. Please import via
`from ray.tune.suggest.hyperopt import HyperOptSearch`""")
def NevergradSearch(*args, **kwargs):
raise DeprecationWarning("""This class has been moved. Please import via
`from ray.tune.suggest.nevergrad import NevergradSearch`""")
def SkOptSearch(*args, **kwargs):
raise DeprecationWarning("""This class has been moved. Please import via
`from ray.tune.suggest.skopt import SkOptSearch`""")
def SigOptSearch(*args, **kwargs):
raise DeprecationWarning("""This class has been moved. Please import via
`from ray.tune.suggest.sigopt import SigOptSearch`""")
| 33.824 | 78 | 0.701514 |
fe18acf4bfb2e7d9d46def6c72b8063a583711a9 | 566 | py | Python | PP4E-Examples-1.4/Examples/PP4E/Gui/Tour/dialogTable.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | PP4E-Examples-1.4/Examples/PP4E/Gui/Tour/dialogTable.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | PP4E-Examples-1.4/Examples/PP4E/Gui/Tour/dialogTable.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | # define a name:callback demos table
from tkinter.filedialog import askopenfilename # get standard dialogs
from tkinter.colorchooser import askcolor # they live in Lib\tkinter
from tkinter.messagebox import askquestion, showerror
from tkinter.simpledialog import askfloat
demos = {
'Open': askopenfilename,
'Color': askcolor,
'Query': lambda: askquestion('Warning', 'You typed "rm *"\nConfirm?'),
'Error': lambda: showerror('Error!', "He's dead, Jim"),
'Input': lambda: askfloat('Entry', 'Enter credit card number')
}
| 37.733333 | 82 | 0.696113 |
9c35c1f545132156bdda89331f7ca11282215e16 | 6,543 | py | Python | secretScript_german.py | grapealope/secrets | c26b99824d004c26e76c5672d293abd48cc1052e | [
"MIT"
] | 1 | 2018-02-22T03:42:45.000Z | 2018-02-22T03:42:45.000Z | secretScript_german.py | grapealope/secrets | c26b99824d004c26e76c5672d293abd48cc1052e | [
"MIT"
] | null | null | null | secretScript_german.py | grapealope/secrets | c26b99824d004c26e76c5672d293abd48cc1052e | [
"MIT"
] | null | null | null | import os, sys
import json
import csv
import collections
import random
from pprint import pprint
from playsound import playsound
import datetime
from six import string_types
from boto3 import Session
from botocore.exceptions import BotoCoreError, ClientError
from contextlib import closing
from pollySpeak import newSession, pollySpeech
from concatMp3 import concatMp3
from nltk import tokenize
from google.cloud import translate
import utils
from utils import createTimestampedDir
from pollySpeak import newSession, pollySpeech, speakSecrets
# Do this in Python 2 to get around unicode issue
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# Defaults (integrate this into main, and with keyword args)
from params import *
# datapath = '/Users/kalmar/Documents/code/secrets/secrets_data/secrets_edit_italian.json'
# datapath = '/Users/kalmar/Documents/code/secrets/secrets_data/secrets_english.json'
# datapath = '/Users/kalmar/Documents/code/secrets/secrets_data/secrets_edit_german_new.json'
datapath = '/Users/kalmar/Documents/code/secrets/secrets_data/secrets_german_berlin.json'
secrets = []
with open(datapath) as data_file:
secrets = json.load(data_file)
frequency_german = 0.7
whisperFreq = 0.15
secrets_per_file = 70
tally_de = 0
tally_en = 0
for secret in secrets:
if secret['language'] == '':
die_roll = random.random()
if die_roll < frequency_german:
secret['language'] = 'de'
tally_de += 1
else:
secret['language'] = 'en'
tally_en += 1
elif secret['language'] == 'de':
tally_de += 1
elif secret['language'] == 'en':
tally_en += 1
else:
print("language '{}' is invalid!".format(secret['language']))
print(tally_de, tally_en)
german_secrets = [secret for secret in secrets if secret['language'] == 'de' and secret['publish']]
english_secrets = [secret for secret in secrets if secret['language'] == 'en' and secret['publish']]
random.shuffle(german_secrets)
random.shuffle(english_secrets)
german_secrets = german_secrets[0:140]
english_secrets = english_secrets[0:60]
# make list of german secrets and english secrets
# each pass through the loop, randomly speak and remove one secret from either queue
# continue until all secrets have been used
mp3path = createTimestampedDir(mp3path_base)
fdx = 0
idx = 0
done = False
secrets_done = False
while not done:
# roll a die, if value is less than the threshold, speak a german secret
die_roll = random.random()
if die_roll < frequency_german and len(german_secrets) > 0:
print(fdx, idx, 'de')
# choose a random secret from the list of german secrets
secret = german_secrets.pop()
speakSecrets([secret], germanVoiceIds, mp3path, language='de', whisperFreq=whisperFreq, concatSecretMp3s=False, outputFileIdx=idx)
idx+=1
elif die_roll >= frequency_german and len(english_secrets) > 0:
print(fdx, idx, 'en')
secret = english_secrets.pop()
speakSecrets([secret], englishVoiceIds, mp3path, language='en', whisperFreq=whisperFreq, concatSecretMp3s=False, outputFileIdx=idx)
idx+=1
secrets_done = (len(german_secrets) == 0) and (len(english_secrets) == 0)
if (idx % secrets_per_file == 0) or secrets_done:
range_start = fdx * secrets_per_file
range_stop = idx-1
print('range: {}-{}'.format(range_start, range_stop))
print('mergedSecrets-{}'.format(fdx))
# concatMp3(mp3path + '/', file_name='mergedSecrets-{}'.format(fdx), file_padding='random', range_start=range_start, range_stop=range_stop, verbose=True)
concatMp3(mp3path + '/', file_name='mergedSecrets-{}'.format(fdx), file_padding='random', random_min=3000, random_max=40000,
range_start=range_start, range_stop=range_stop, verbose=True)
fdx+=1
if secrets_done:
done = True
for fdx in range(0,8):
range_start = fdx * secrets_per_file
range_stop = range_start + secrets_per_file
print('{} range: {}-{}'.format(fdx, range_start, range_stop))
concatMp3(mp3path + '/', file_name='mergedSecrets-padded-10-{}'.format(fdx), file_padding='random', random_min=1500, random_max=10000,
range_start=range_start, range_stop=range_stop, verbose=True)
# ------------------------------------------ #
# English
voiceIds = ['Joanna', 'Kendra', 'Amy', 'Joey', 'Brian']
speakSecrets(english_secrets[0:70], voiceIds, createTimestampedDir(mp3path_base), randVoice=True)
# ------------------------------------------ #
# German
# make list of german secrets and english secrets
# each pass through the loop, randomly speak and remove one secret from either queue
# continue until all secrets have been used
voiceIds = ['Vicki', 'Marlene', 'Hans']
speakSecrets(secrets[0:15], voiceIds, createTimestampedDir(mp3path_base),
randVoice=True,
language='de',
target_lang='de')
# ------------------------------------------ #
# Italian
voiceIds = ['Carla', 'Giorgio']
speakSecrets(italian_secrets[0:50], voiceIds, createTimestampedDir(mp3path_base),
randVoice=True,
language='it',
target_lang='it')
speakSecrets(italian_secrets[50:100], voiceIds, createTimestampedDir(mp3path_base),
randVoice=True,
language='it',
target_lang='it')
speakSecrets(italian_secrets[100:], voiceIds, createTimestampedDir(mp3path_base),
randVoice=True,
language='it',
target_lang='it')
# ------------------------------------------ #
# Fix timing
mp3path = '/Users/kalmar/Documents/code/secrets/audio/2017-11-10-test'
concatMp3(mp3path + '/', file_padding=45000)
concatMp3(mp3path + '/', file_padding='random')
mp3path = '/Users/kalmar/Documents/code/secrets/audio/2017-11-10-1'
concatMp3(mp3path + '/', file_padding='random')
mp3path = '/Users/kalmar/Documents/code/secrets/audio/2017-11-10-2'
concatMp3(mp3path + '/', file_padding='random')
mp3path = '/Users/kalmar/Documents/code/secrets/audio/2017-11-10-3'
concatMp3(mp3path + '/', file_padding='random')
mp3path = '/Users/kalmar/Documents/code/secrets/audio/2017-11-10-4'
concatMp3(mp3path + '/', file_padding='random')
mp3path = '/Users/kalmar/Documents/code/secrets/audio/2017-11-10-5'
concatMp3(mp3path + '/', file_padding='random')
mp3path = '/Users/kalmar/Documents/code/secrets/audio/2017-05-20-03-25'
concatMp3(mp3path + '/', file_padding=mp3_padding)
mp3path = '/Users/kalmar/Documents/code/secrets/audio/2017-05-20-03-43'
concatMp3(mp3path + '/', file_padding=mp3_padding)
mp3path = '/Users/kalmar/Documents/code/secrets/audio/2017-05-20-03-48'
concatMp3(mp3path + '/', file_padding=mp3_padding)
mp3path = '/Users/kalmar/Documents/code/secrets/audio/2017-05-20-03-52'
concatMp3(mp3path + '/', file_padding=mp3_padding)
| 33.726804 | 157 | 0.725355 |
703410bf2adaf81ca8ad0c6fcb70457aaa8c5793 | 1,254 | py | Python | src/bobbit/modules/suggest.py | camicarballo/camelot-bobbit | d0fbb1e2080ff533b0f5ae5194cc5d5b934b6b5f | [
"MIT"
] | null | null | null | src/bobbit/modules/suggest.py | camicarballo/camelot-bobbit | d0fbb1e2080ff533b0f5ae5194cc5d5b934b6b5f | [
"MIT"
] | null | null | null | src/bobbit/modules/suggest.py | camicarballo/camelot-bobbit | d0fbb1e2080ff533b0f5ae5194cc5d5b934b6b5f | [
"MIT"
] | null | null | null | # suggest.py
import logging
# Metadata
NAME = 'suggest'
ENABLE = True
PATTERN = r'^!suggest (?P<target>[-\w#]*) (?P<suggestion>.*)'
USAGE = '''Usage: !suggest <channel> <message>
This anonymously sends a message to the specified channel.
Example:
> !suggest #cse-40175-fa18 what about bob?
'''
# Constants
WHITELIST = []
TEMPLATE = '{color}{red}Anonymous coward suggests{color}: {suggestion}'
# Command
async def suggest(bot, message, target, suggestion):
if message.highlighted:
return
if not target.startswith('#'):
target = '#' + target
if not target in WHITELIST:
return message.with_body(f'Channel {target} not allowed')
logging.info('Anonymous message from %s: %s', message.nick, message.body)
return message.copy(
body = bot.client.format_text(TEMPLATE, suggestion=suggestion),
nick = 'anonymous',
channel = target,
)
# Register
def register(bot):
global WHITELIST, TEMPLATE
config = bot.config.load_module_config('suggest')
WHITELIST = config.get('whitelist', WHITELIST)
TEMPLATE = config.get('template' , TEMPLATE)
return (
('command', PATTERN, suggest),
)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
| 23.222222 | 77 | 0.649123 |
d79b7743a7d529fdd79614f79ef096611a92c468 | 20,864 | py | Python | Cython/Build/IpythonMagic.py | aiudirog/cython | 37a13a5563a1108dca15ffc45e2b614bbf7db94f | [
"Apache-2.0"
] | 1 | 2019-06-05T00:10:34.000Z | 2019-06-05T00:10:34.000Z | Cython/Build/IpythonMagic.py | aiudirog/cython | 37a13a5563a1108dca15ffc45e2b614bbf7db94f | [
"Apache-2.0"
] | null | null | null | Cython/Build/IpythonMagic.py | aiudirog/cython | 37a13a5563a1108dca15ffc45e2b614bbf7db94f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
=====================
Cython related magics
=====================
Magic command interface for interactive work with Cython
.. note::
The ``Cython`` package needs to be installed separately. It
can be obtained using ``easy_install`` or ``pip``.
Usage
=====
To enable the magics below, execute ``%load_ext cython``.
``%%cython``
{CYTHON_DOC}
``%%cython_inline``
{CYTHON_INLINE_DOC}
``%%cython_pyximport``
{CYTHON_PYXIMPORT_DOC}
Author:
* Brian Granger
Code moved from IPython and adapted by:
* Martín Gaitán
Parts of this code were taken from Cython.inline.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file ipython-COPYING.rst, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import imp
import io
import os
import re
import sys
import time
import copy
import distutils.log
import textwrap
try:
reload
except NameError: # Python 3
from imp import reload
import hashlib
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
from IPython.core import display
from IPython.core import magic_arguments
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.utils import py3compat
try:
from IPython.paths import get_ipython_cache_dir
except ImportError:
# older IPython version
from IPython.utils.path import get_ipython_cache_dir
from IPython.utils.text import dedent
from ..Shadow import __version__ as cython_version
from ..Compiler.Errors import CompileError
from .Inline import cython_inline
from .Dependencies import cythonize
PGO_CONFIG = {
'gcc': {
'gen': ['-fprofile-generate', '-fprofile-dir={TEMPDIR}'],
'use': ['-fprofile-use', '-fprofile-correction', '-fprofile-dir={TEMPDIR}'],
},
# blind copy from 'configure' script in CPython 3.7
'icc': {
'gen': ['-prof-gen'],
'use': ['-prof-use'],
}
}
PGO_CONFIG['mingw32'] = PGO_CONFIG['gcc']
@magics_class
class CythonMagics(Magics):
def __init__(self, shell):
super(CythonMagics, self).__init__(shell)
self._reloads = {}
self._code_cache = {}
self._pyximport_installed = False
def _import_all(self, module):
mdict = module.__dict__
if '__all__' in mdict:
keys = mdict['__all__']
else:
keys = [k for k in mdict if not k.startswith('_')]
for k in keys:
try:
self.shell.push({k: mdict[k]})
except KeyError:
msg = "'module' object has no attribute '%s'" % k
raise AttributeError(msg)
@cell_magic
def cython_inline(self, line, cell):
"""Compile and run a Cython code cell using Cython.inline.
This magic simply passes the body of the cell to Cython.inline
and returns the result. If the variables `a` and `b` are defined
in the user's namespace, here is a simple example that returns
their sum::
%%cython_inline
return a+b
For most purposes, we recommend the usage of the `%%cython` magic.
"""
locs = self.shell.user_global_ns
globs = self.shell.user_ns
return cython_inline(cell, locals=locs, globals=globs)
@cell_magic
def cython_pyximport(self, line, cell):
"""Compile and import a Cython code cell using pyximport.
The contents of the cell are written to a `.pyx` file in the current
working directory, which is then imported using `pyximport`. This
magic requires a module name to be passed::
%%cython_pyximport modulename
def f(x):
return 2.0*x
The compiled module is then imported and all of its symbols are
injected into the user's namespace. For most purposes, we recommend
the usage of the `%%cython` magic.
"""
module_name = line.strip()
if not module_name:
raise ValueError('module name must be given')
fname = module_name + '.pyx'
with io.open(fname, 'w', encoding='utf-8') as f:
f.write(cell)
if 'pyximport' not in sys.modules or not self._pyximport_installed:
import pyximport
pyximport.install()
self._pyximport_installed = True
if module_name in self._reloads:
module = self._reloads[module_name]
# Note: reloading extension modules is not actually supported
# (requires PEP-489 reinitialisation support).
# Don't know why this should ever have worked as it reads here.
# All we really need to do is to update the globals below.
#reload(module)
else:
__import__(module_name)
module = sys.modules[module_name]
self._reloads[module_name] = module
self._import_all(module)
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-a', '--annotate', nargs='?', const="default", type=str,
choices={"default","fullc"},
help="Produce a colorized HTML version of the source. "
"Use --annotate=fullc to include entire "
"generated C/C++-code."
)
@magic_arguments.argument(
'-+', '--cplus', action='store_true', default=False,
help="Output a C++ rather than C file."
)
@magic_arguments.argument(
'-3', dest='language_level', action='store_const', const=3, default=None,
help="Select Python 3 syntax."
)
@magic_arguments.argument(
'-2', dest='language_level', action='store_const', const=2, default=None,
help="Select Python 2 syntax."
)
@magic_arguments.argument(
'-f', '--force', action='store_true', default=False,
help="Force the compilation of a new module, even if the source has been "
"previously compiled."
)
@magic_arguments.argument(
'-c', '--compile-args', action='append', default=[],
help="Extra flags to pass to compiler via the `extra_compile_args` "
"Extension flag (can be specified multiple times)."
)
@magic_arguments.argument(
'--link-args', action='append', default=[],
help="Extra flags to pass to linker via the `extra_link_args` "
"Extension flag (can be specified multiple times)."
)
@magic_arguments.argument(
'-l', '--lib', action='append', default=[],
help="Add a library to link the extension against (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-n', '--name',
help="Specify a name for the Cython module."
)
@magic_arguments.argument(
'-L', dest='library_dirs', metavar='dir', action='append', default=[],
help="Add a path to the list of library directories (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-I', '--include', action='append', default=[],
help="Add a path to the list of include directories (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-S', '--src', action='append', default=[],
help="Add a path to the list of src files (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'--pgo', dest='pgo', action='store_true', default=False,
help=("Enable profile guided optimisation in the C compiler. "
"Compiles the cell twice and executes it in between to generate a runtime profile.")
)
@magic_arguments.argument(
'--verbose', dest='quiet', action='store_false', default=True,
help=("Print debug information like generated .c/.cpp file location "
"and exact gcc/g++ command invoked.")
)
@cell_magic
def cython(self, line, cell):
"""Compile and import everything from a Cython code cell.
The contents of the cell are written to a `.pyx` file in the
directory `IPYTHONDIR/cython` using a filename with the hash of the
code. This file is then cythonized and compiled. The resulting module
is imported and all of its symbols are injected into the user's
namespace. The usage is similar to that of `%%cython_pyximport` but
you don't have to pass a module name::
%%cython
def f(x):
return 2.0*x
To compile OpenMP codes, pass the required `--compile-args`
and `--link-args`. For example with gcc::
%%cython --compile-args=-fopenmp --link-args=-fopenmp
...
To enable profile guided optimisation, pass the ``--pgo`` option.
Note that the cell itself needs to take care of establishing a suitable
profile when executed. This can be done by implementing the functions to
optimise, and then calling them directly in the same cell on some realistic
training data like this::
%%cython --pgo
def critical_function(data):
for item in data:
...
# execute function several times to build profile
from somewhere import some_typical_data
for _ in range(100):
critical_function(some_typical_data)
In Python 3.5 and later, you can distinguish between the profile and
non-profile runs as follows::
if "_pgo_" in __name__:
... # execute critical code here
"""
args = magic_arguments.parse_argstring(self.cython, line)
code = cell if cell.endswith('\n') else cell + '\n'
lib_dir = os.path.join(get_ipython_cache_dir(), 'cython')
key = (code, line, sys.version_info, sys.executable, cython_version)
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if args.pgo:
key += ('pgo',)
if args.force:
# Force a new module name by adding the current time to the
# key which is hashed to determine the module name.
key += (time.time(),)
if args.name:
module_name = py3compat.unicode_to_str(args.name)
else:
module_name = "_cython_magic_" + hashlib.sha1(str(key).encode('utf-8')).hexdigest()
html_file = os.path.join(lib_dir, module_name + '.html')
module_path = os.path.join(lib_dir, module_name + self.so_ext)
have_module = os.path.isfile(module_path)
need_cythonize = args.pgo or not have_module
if args.annotate:
if not os.path.isfile(html_file):
need_cythonize = True
extension = None
if need_cythonize:
extensions = self._cythonize(module_name, code, lib_dir, args, quiet=args.quiet)
assert len(extensions) == 1
extension = extensions[0]
self._code_cache[key] = module_name
if args.pgo:
self._profile_pgo_wrapper(extension, lib_dir)
self._build_extension(extension, lib_dir, pgo_step_name='use' if args.pgo else None,
quiet=args.quiet)
module = imp.load_dynamic(module_name, module_path)
self._import_all(module)
if args.annotate:
try:
with io.open(html_file, encoding='utf-8') as f:
annotated_html = f.read()
except IOError as e:
# File could not be opened. Most likely the user has a version
# of Cython before 0.15.1 (when `cythonize` learned the
# `force` keyword argument) and has already compiled this
# exact source without annotation.
print('Cython completed successfully but the annotated '
'source could not be read.', file=sys.stderr)
print(e, file=sys.stderr)
else:
return display.HTML(self.clean_annotated_html(annotated_html))
def _profile_pgo_wrapper(self, extension, lib_dir):
"""
Generate a .c file for a separate extension module that calls the
module init function of the original module. This makes sure that the
PGO profiler sees the correct .o file of the final module, but it still
allows us to import the module under a different name for profiling,
before recompiling it into the PGO optimised module. Overwriting and
reimporting the same shared library is not portable.
"""
extension = copy.copy(extension) # shallow copy, do not modify sources in place!
module_name = extension.name
pgo_module_name = '_pgo_' + module_name
pgo_wrapper_c_file = os.path.join(lib_dir, pgo_module_name + '.c')
with io.open(pgo_wrapper_c_file, 'w', encoding='utf-8') as f:
f.write(textwrap.dedent(u"""
#include "Python.h"
#if PY_MAJOR_VERSION < 3
extern PyMODINIT_FUNC init%(module_name)s(void);
PyMODINIT_FUNC init%(pgo_module_name)s(void); /*proto*/
PyMODINIT_FUNC init%(pgo_module_name)s(void) {
PyObject *sys_modules;
init%(module_name)s(); if (PyErr_Occurred()) return;
sys_modules = PyImport_GetModuleDict(); /* borrowed, no exception, "never" fails */
if (sys_modules) {
PyObject *module = PyDict_GetItemString(sys_modules, "%(module_name)s"); if (!module) return;
PyDict_SetItemString(sys_modules, "%(pgo_module_name)s", module);
Py_DECREF(module);
}
}
#else
extern PyMODINIT_FUNC PyInit_%(module_name)s(void);
PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void); /*proto*/
PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void) {
return PyInit_%(module_name)s();
}
#endif
""" % {'module_name': module_name, 'pgo_module_name': pgo_module_name}))
extension.sources = extension.sources + [pgo_wrapper_c_file] # do not modify in place!
extension.name = pgo_module_name
self._build_extension(extension, lib_dir, pgo_step_name='gen')
# import and execute module code to generate profile
so_module_path = os.path.join(lib_dir, pgo_module_name + self.so_ext)
imp.load_dynamic(pgo_module_name, so_module_path)
def _cythonize(self, module_name, code, lib_dir, args, quiet=True):
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
c_include_dirs = args.include
c_src_files = list(map(str, args.src))
if 'numpy' in code:
import numpy
c_include_dirs.append(numpy.get_include())
with io.open(pyx_file, 'w', encoding='utf-8') as f:
f.write(code)
extension = Extension(
name=module_name,
sources=[pyx_file] + c_src_files,
include_dirs=c_include_dirs,
library_dirs=args.library_dirs,
extra_compile_args=args.compile_args,
extra_link_args=args.link_args,
libraries=args.lib,
language='c++' if args.cplus else 'c',
)
try:
opts = dict(
quiet=quiet,
annotate=args.annotate,
force=True,
language_level=min(3, sys.version_info[0]),
)
if args.language_level is not None:
assert args.language_level in (2, 3)
opts['language_level'] = args.language_level
return cythonize([extension], **opts)
except CompileError:
return None
def _build_extension(self, extension, lib_dir, temp_dir=None, pgo_step_name=None, quiet=True):
build_extension = self._get_build_extension(
extension, lib_dir=lib_dir, temp_dir=temp_dir, pgo_step_name=pgo_step_name)
old_threshold = None
try:
if not quiet:
old_threshold = distutils.log.set_threshold(distutils.log.DEBUG)
build_extension.run()
finally:
if not quiet and old_threshold is not None:
distutils.log.set_threshold(old_threshold)
def _add_pgo_flags(self, build_extension, step_name, temp_dir):
compiler_type = build_extension.compiler.compiler_type
if compiler_type == 'unix':
compiler_cmd = build_extension.compiler.compiler_so
# TODO: we could try to call "[cmd] --version" for better insights
if not compiler_cmd:
pass
elif 'clang' in compiler_cmd or 'clang' in compiler_cmd[0]:
compiler_type = 'clang'
elif 'icc' in compiler_cmd or 'icc' in compiler_cmd[0]:
compiler_type = 'icc'
elif 'gcc' in compiler_cmd or 'gcc' in compiler_cmd[0]:
compiler_type = 'gcc'
elif 'g++' in compiler_cmd or 'g++' in compiler_cmd[0]:
compiler_type = 'gcc'
config = PGO_CONFIG.get(compiler_type)
orig_flags = []
if config and step_name in config:
flags = [f.format(TEMPDIR=temp_dir) for f in config[step_name]]
for extension in build_extension.extensions:
orig_flags.append((extension.extra_compile_args, extension.extra_link_args))
extension.extra_compile_args = extension.extra_compile_args + flags
extension.extra_link_args = extension.extra_link_args + flags
else:
print("No PGO %s configuration known for C compiler type '%s'" % (step_name, compiler_type),
file=sys.stderr)
return orig_flags
@property
def so_ext(self):
"""The extension suffix for compiled modules."""
try:
return self._so_ext
except AttributeError:
self._so_ext = self._get_build_extension().get_ext_filename('')
return self._so_ext
def _clear_distutils_mkpath_cache(self):
"""clear distutils mkpath cache
prevents distutils from skipping re-creation of dirs that have been removed
"""
try:
from distutils.dir_util import _path_created
except ImportError:
pass
else:
_path_created.clear()
def _get_build_extension(self, extension=None, lib_dir=None, temp_dir=None,
pgo_step_name=None, _build_ext=build_ext):
self._clear_distutils_mkpath_cache()
dist = Distribution()
config_files = dist.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
dist.parse_config_files(config_files)
if not temp_dir:
temp_dir = lib_dir
add_pgo_flags = self._add_pgo_flags
if pgo_step_name:
base_build_ext = _build_ext
class _build_ext(_build_ext):
def build_extensions(self):
add_pgo_flags(self, pgo_step_name, temp_dir)
base_build_ext.build_extensions(self)
build_extension = _build_ext(dist)
build_extension.finalize_options()
if temp_dir:
temp_dir = py3compat.cast_bytes_py2(temp_dir, encoding=sys.getfilesystemencoding())
build_extension.build_temp = temp_dir
if lib_dir:
lib_dir = py3compat.cast_bytes_py2(lib_dir, encoding=sys.getfilesystemencoding())
build_extension.build_lib = lib_dir
if extension is not None:
build_extension.extensions = [extension]
return build_extension
@staticmethod
def clean_annotated_html(html):
"""Clean up the annotated HTML source.
Strips the link to the generated C or C++ file, which we do not
present to the user.
"""
r = re.compile('<p>Raw output: <a href="(.*)">(.*)</a>')
html = '\n'.join(l for l in html.splitlines() if not r.match(l))
return html
__doc__ = __doc__.format(
# rST doesn't see the -+ flag as part of an option list, so we
# hide it from the module-level docstring.
CYTHON_DOC=dedent(CythonMagics.cython.__doc__\
.replace('-+, --cplus', '--cplus ')),
CYTHON_INLINE_DOC=dedent(CythonMagics.cython_inline.__doc__),
CYTHON_PYXIMPORT_DOC=dedent(CythonMagics.cython_pyximport.__doc__),
)
| 38.072993 | 114 | 0.610142 |
2db0dad9890943fdf0ae8704d6ddb293ad11ce7e | 1,200 | py | Python | rps.py | bovinezro/rock-paper-scissors | 9ae17827e243a6b5e2f383ad20e76988b11bbbd8 | [
"Unlicense"
] | null | null | null | rps.py | bovinezro/rock-paper-scissors | 9ae17827e243a6b5e2f383ad20e76988b11bbbd8 | [
"Unlicense"
] | null | null | null | rps.py | bovinezro/rock-paper-scissors | 9ae17827e243a6b5e2f383ad20e76988b11bbbd8 | [
"Unlicense"
] | null | null | null | from random import *
replay = "y"
picks = ["rock", "paper", "scissors"]
tally = [0,0]
while replay.lower() == "y":
cpu = randint(0,2)
user = "a"
while (user.lower() != "rock" and
user.lower() != "paper" and
user.lower() != "scissors" and
user.lower() != "r" and
user.lower() != "p" and
user.lower() != "s"):
user = input("enter [r]ock, [p]aper, or [s]cissors: ")
if user == "rock" or user == "r":
user = 0
elif user == "paper" or user == "p":
user = 1
elif user == "scissors" or user == "s":
user = 2
if user == 0 and cpu == 2:
print(picks[user], "beats", picks[cpu], "so you win!")
tally[0] = tally[0] + 1
elif user > cpu:
print(picks[user], "beats", picks[cpu], "so you win!")
tally[0] = tally[0] + 1
elif user < cpu:
print(picks[user], "loses to", picks[cpu], "so you lose!")
tally[1] = tally[1] + 1
elif user == cpu:
print("you both picked", picks[cpu], "so it's a tie...")
print(tally[0], "wins,", tally[1], "losses")
replay = "x"
while replay.lower() != "y" and replay.lower() != "n":
replay = input("play again? [y/n]")
| 29.268293 | 66 | 0.504167 |
99639dd2c2d3006222ab036ba3f6caf99450b3dc | 2,841 | py | Python | bluezero/central.py | ubiquiti/python-bluezero-REMOVE | 3c60ed33075488343c2ef968d2a965420410125f | [
"MIT"
] | 1 | 2020-02-27T07:23:57.000Z | 2020-02-27T07:23:57.000Z | bluezero/central.py | ubiquiti/python-bluezero-REMOVE | 3c60ed33075488343c2ef968d2a965420410125f | [
"MIT"
] | null | null | null | bluezero/central.py | ubiquiti/python-bluezero-REMOVE | 3c60ed33075488343c2ef968d2a965420410125f | [
"MIT"
] | null | null | null | """Classes that represent the GATT features of a remote device."""
from time import sleep
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
from bluezero import adapter
from bluezero import device
from bluezero import GATT
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
logger.addHandler(NullHandler())
class Central:
"""Create a BLE instance taking the Central role."""
def __init__(self, device_addr, adapter_addr=None):
if adapter_addr is None:
self.dongle = adapter.Adapter()
logger.debug('Adapter is: {}'.format(self.dongle.address))
else:
self.dongle = adapter.Adapter(adapter_addr)
if not self.dongle.powered:
self.dongle.powered = True
logger.debug('Adapter was off, now powered on')
self.rmt_device = device.Device(self.dongle.address, device_addr)
self._characteristics = []
def add_characteristic(self, srv_uuid, chrc_uuid):
"""
Specify a characteristic of interest on the remote device by using
the GATT Service UUID and Characteristic UUID
:param srv_uuid: 128 bit UUID
:param chrc_uuid: 128 bit UUID
:return:
"""
chrc_hndl = GATT.Characteristic(self.dongle.address,
self.rmt_device.address,
srv_uuid,
chrc_uuid)
self._characteristics.append(chrc_hndl)
return chrc_hndl
def load_gatt(self):
"""
Once the remote device has been connected to and the GATT database
has been resolved then it needs to be loaded.
:return:
"""
for chrc in self._characteristics:
chrc.resolve_gatt()
@property
def services_resolved(self):
return self.rmt_device.services_resolved
@property
def connected(self):
"""Indicate whether the remote device is currently connected."""
return self.rmt_device.connected
def connect(self, profile=None):
"""
Initiate a connection to the remote device and load
GATT database once resolved
:param profile: (optional) profile to use for the connection.
"""
if profile is None:
self.rmt_device.connect()
else:
self.rmt_device.connect(profile)
while not self.rmt_device.services_resolved:
sleep(0.5)
self.load_gatt()
def disconnect(self):
"""Disconnect from the remote device."""
self.rmt_device.disconnect()
def run(self):
self.dongle.run()
def quit(self):
self.dongle.quit()
| 29.905263 | 74 | 0.617388 |
567b8a95867aa6056c2679669595d2b9aaf14398 | 181 | py | Python | examples/elixir_rockstar.py | johniek/meteor-rock | 8a6eb3d0966f9a4717293d1d7ee9954aa340c304 | [
"MIT"
] | 1 | 2021-07-31T23:33:42.000Z | 2021-07-31T23:33:42.000Z | examples/elixir_rockstar.py | gombirot/rockstar | 3b8956bbe931a0ac12b6c435f490af524e59bcc4 | [
"MIT"
] | 1 | 2015-07-22T12:26:06.000Z | 2015-07-22T12:26:06.000Z | examples/elixir_rockstar.py | johniek/meteor-rock | 8a6eb3d0966f9a4717293d1d7ee9954aa340c304 | [
"MIT"
] | null | null | null | from RockStar import RockStar
elixir_code = 'IO.puts "Hello world"'
rock_it_bro = RockStar(days=400, file_name='helloWorld.exs', code=elixir_code)
rock_it_bro.make_me_a_rockstar()
| 30.166667 | 78 | 0.801105 |
724ec38204fbc4b181dd3911dbfe768f0af16b07 | 1,541 | py | Python | examples/bio_based/run_test_BBO.py | ashishpatel26/mealpy | 62160e61b8bd4b084e44b80fda720e6bd6332e03 | [
"MIT"
] | 1 | 2021-05-20T06:53:08.000Z | 2021-05-20T06:53:08.000Z | examples/bio_based/run_test_BBO.py | chenyuxiang0425/mealpy | 69e8dc727e15527e31ac5ace1debe92a0bc7d828 | [
"MIT"
] | null | null | null | examples/bio_based/run_test_BBO.py | chenyuxiang0425/mealpy | 69e8dc727e15527e31ac5ace1debe92a0bc7d828 | [
"MIT"
] | 1 | 2020-09-30T21:14:33.000Z | 2020-09-30T21:14:33.000Z | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 03:39, 07/06/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from opfunu.cec_basic.cec2014_nobias import *
from mealpy.bio_based.BBO import BaseBBO, OriginalBBO
## Setting parameters
obj_func = F3
# lb = [-15, -10, -3, -15, -10, -3, -15, -10, -3, -15, -10, -3, -15, -10, -3]
# ub = [15, 10, 3, 15, 10, 3, 15, 10, 3, 15, 10, 3, 15, 10, 3]
lb = [-100]
ub = [100]
problem_size = 200
batch_size = 25
verbose = True
epoch = 1000
pop_size = 50
md1 = BaseBBO(obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[0])
print(md1.solution[1])
print(md1.loss_train)
md1 = OriginalBBO(obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[0])
print(md1.solution[1])
print(md1.loss_train)
| 42.805556 | 105 | 0.452953 |
ab3ea302cb1b54bcb11cc17abdfcafe16e7a3056 | 5,153 | py | Python | textattack/commands/attack/run_attack_single_threaded.py | chong-z/TextAttack | 9842160b558db2118365770029be70782327a40a | [
"MIT"
] | null | null | null | textattack/commands/attack/run_attack_single_threaded.py | chong-z/TextAttack | 9842160b558db2118365770029be70782327a40a | [
"MIT"
] | null | null | null | textattack/commands/attack/run_attack_single_threaded.py | chong-z/TextAttack | 9842160b558db2118365770029be70782327a40a | [
"MIT"
] | null | null | null | """A command line parser to run an attack from user specifications."""
from collections import deque, OrderedDict
import os
import time
import tqdm
import textattack
from .attack_args_helpers import (
parse_attack_from_args,
parse_dataset_from_args,
parse_logger_from_args,
parse_model_from_args,
)
logger = textattack.shared.logger
def run(args, checkpoint=None):
# Only use one GPU, if we have one.
# TODO: Running Universal Sentence Encoder uses multiple GPUs
if "CUDA_VISIBLE_DEVICES" not in os.environ:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Disable tensorflow logs, except in the case of an error.
if "TF_CPP_MIN_LOG_LEVEL" not in os.environ:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
if args.checkpoint_resume:
num_remaining_attacks = checkpoint.num_remaining_attacks
worklist = checkpoint.worklist
worklist_tail = checkpoint.worklist_tail
logger.info(
"Recovered from checkpoint previously saved at {}".format(
checkpoint.datetime
)
)
print(checkpoint, "\n")
else:
num_remaining_attacks = args.num_examples
worklist = deque(range(0, args.num_examples))
worklist_tail = worklist[-1]
start_time = time.time()
# Attack
model = parse_model_from_args(args)
attack = parse_attack_from_args(args=args, model=model)
print(attack, "\n")
# Logger
if args.checkpoint_resume:
attack_log_manager = checkpoint.log_manager
else:
attack_log_manager = parse_logger_from_args(args)
load_time = time.time()
textattack.shared.logger.info(f"Load time: {load_time - start_time}s")
if args.interactive:
print("Running in interactive mode")
print("----------------------------")
while True:
print('Enter a sentence to attack or "q" to quit:')
text = input()
if text == "q":
break
if not text:
continue
print("Attacking...")
if '#' in text:
text = text.split('#')
text = OrderedDict([(f'text{i}', text[i].strip()) for i in range(len(text))])
attacked_text = textattack.shared.attacked_text.AttackedText(text)
initial_result, _ = attack.goal_function.init_attack_example(attacked_text, 0)
result = next(attack.attack_dataset([(text, initial_result)]))
print(result.__str__(color_method="ansi") + "\n")
else:
# Not interactive? Use default dataset.
dataset = parse_dataset_from_args(args)
pbar = tqdm.tqdm(total=num_remaining_attacks, smoothing=0)
if args.checkpoint_resume:
num_results = checkpoint.results_count
num_failures = checkpoint.num_failed_attacks
num_successes = checkpoint.num_successful_attacks
else:
num_results = 0
num_failures = 0
num_successes = 0
for result in attack.attack_dataset(dataset, indices=worklist):
attack_log_manager.log_result(result)
if not args.disable_stdout:
print("\n")
if (not args.attack_n) or (
not isinstance(result, textattack.attack_results.SkippedAttackResult)
):
pbar.update(1)
else:
# worklist_tail keeps track of highest idx that has been part of worklist
# Used to get the next dataset element when attacking with `attack_n` = True.
worklist_tail += 1
worklist.append(worklist_tail)
num_results += 1
if (
type(result) == textattack.attack_results.SuccessfulAttackResult
or type(result) == textattack.attack_results.MaximizedAttackResult
):
num_successes += 1
if type(result) == textattack.attack_results.FailedAttackResult:
num_failures += 1
pbar.set_description(
"[Succeeded / Failed / Total] {} / {} / {}".format(
num_successes, num_failures, num_results
)
)
if (
args.checkpoint_interval
and len(attack_log_manager.results) % args.checkpoint_interval == 0
):
new_checkpoint = textattack.shared.Checkpoint(
args, attack_log_manager, worklist, worklist_tail
)
new_checkpoint.save()
attack_log_manager.flush()
pbar.close()
print()
# Enable summary stdout
if args.disable_stdout:
attack_log_manager.enable_stdout()
attack_log_manager.log_attack_details(attack=attack, model=model)
attack_log_manager.log_extra_stats()
attack_log_manager.log_summary()
attack_log_manager.flush()
print()
# finish_time = time.time()
textattack.shared.logger.info(f"Attack time: {time.time() - load_time}s")
return attack_log_manager.results
| 33.679739 | 93 | 0.603532 |
15099ddea1a4040796b9932bd97468d5315cea40 | 11,716 | py | Python | mpi_array/globale_creation_test.py | mpi-array/mpi_array | 6a6c707300f7c65d6be5e7e3ef196d7abea10a06 | [
"MIT"
] | 2 | 2018-06-05T14:05:09.000Z | 2021-05-08T14:16:33.000Z | mpi_array/globale_creation_test.py | mpi-array/mpi_array | 6a6c707300f7c65d6be5e7e3ef196d7abea10a06 | [
"MIT"
] | null | null | null | mpi_array/globale_creation_test.py | mpi-array/mpi_array | 6a6c707300f7c65d6be5e7e3ef196d7abea10a06 | [
"MIT"
] | 3 | 2018-01-01T17:52:31.000Z | 2021-06-08T15:48:29.000Z | """
=================================================
The :mod:`mpi_array.globale_creation_test` Module
=================================================
Module for testing creation/factory functions which
generate instances of :mod:`mpi_array.globale.gndarray`.
Execute as::
python -m mpi_array.globale_creation_test
and with parallelism::
mpirun -n 2 python -m mpi_array.globale_creation_test
mpirun -n 4 python -m mpi_array.globale_creation_test
mpirun -n 27 python -m mpi_array.globale_creation_test
Classes
=======
.. autosummary::
:toctree: generated/
:template: autosummary/inherits_TestCase_class.rst
GndarrayCreationTest - Tests for :func:`mpi_array.globale.gndarray` creation functions.
"""
from __future__ import absolute_import
import numpy as _np
import mpi4py.MPI as _mpi
from .license import license as _license, copyright as _copyright, version as _version
from . import unittest as _unittest
from . import logging as _logging # noqa: E402,F401
from .globale import gndarray as _gndarray
from .globale_creation import asarray as _asarray, asanyarray as _asanyarray
from .globale_creation import empty as _empty, zeros as _zeros, ones as _ones, copy as _copy
from .globale_creation import empty_like as _empty_like, zeros_like as _zeros_like
from .globale_creation import ones_like as _ones_like
from . import locale as _locale
from .comms import create_distribution as _create_distribution, LT_PROCESS, LT_NODE, DT_CLONED
from .indexing import IndexingExtent as _IndexingExtent
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class GndarrayCreationTest(_unittest.TestCase):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale.gndarray` instance generation.
"""
def test_asarray_with_scalar(self):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_creation.asarray`.
"""
sary0 = _asarray(5.0)
self.assertTrue(sary0.__class__ is _gndarray)
self.assertEqual(_np.dtype("float64"), sary0)
self.assertEqual(0, sary0.ndim)
self.assertSequenceEqual((), sary0.shape)
self.assertTrue(sary0.locale_comms.peer_comm is _mpi.COMM_WORLD)
sary1 = _asarray(sary0)
self.assertTrue(sary1 is sary0)
def test_asarray_with_tuple(self):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_creation.asarray`.
"""
tary0 = _asarray(_np.linspace(100.0, 200.0, 101).tolist())
tary0.rank_logger.debug("tary0.num_locales = %s" % (tary0.num_locales,))
self.assertTrue(tary0.__class__ is _gndarray)
self.assertEqual(_np.dtype("float64"), tary0)
self.assertTrue(tary0.locale_comms.peer_comm is _mpi.COMM_WORLD)
tary1 = _asarray(tary0)
self.assertTrue(tary1 is tary0)
def test_asarray_with_subclass(self):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_creation.asarray`.
"""
class GndarraySubclass(_gndarray):
pass
candd = _create_distribution(shape=(8, 32, 32, 32))
lndarray_proxy, rma_window_buffer = \
_locale.empty(
comms_and_distrib=candd,
dtype="int8",
order='C',
return_rma_window_buffer=True
)
ary_subclass = GndarraySubclass(candd, rma_window_buffer, lndarray_proxy)
self.assertTrue(ary_subclass.__class__ is not _gndarray)
self.assertTrue(isinstance(ary_subclass, _gndarray))
asary0 = _asarray(ary_subclass)
self.assertTrue(asary0.__class__ is _gndarray)
def test_asanyarray_with_tuple(self):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_creation.asanyarray`.
"""
tary0 = _asanyarray(_np.linspace(100.0, 200.0, 101).tolist())
tary0.rank_logger.debug("tary0.num_locales = %s" % (tary0.num_locales,))
self.assertTrue(tary0.__class__ is _gndarray)
self.assertEqual(_np.dtype("float64"), tary0)
self.assertTrue(tary0.locale_comms.peer_comm is _mpi.COMM_WORLD)
tary1 = _asanyarray(tary0)
self.assertTrue(tary1 is tary0)
def test_asanyarray_with_subclass(self):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_creation.asanyarray`.
"""
class GndarraySubclass(_gndarray):
pass
candd = _create_distribution(shape=(8, 32, 32, 32))
lndarray_proxy, rma_window_buffer = \
_locale.empty(
comms_and_distrib=candd,
dtype="int8",
order='C',
return_rma_window_buffer=True
)
ary_subclass = GndarraySubclass(candd, rma_window_buffer, lndarray_proxy)
self.assertTrue(ary_subclass.__class__ is not _gndarray)
self.assertTrue(isinstance(ary_subclass, _gndarray))
asanyary0 = _asanyarray(ary_subclass)
self.assertTrue(asanyary0.__class__ is GndarraySubclass)
self.assertTrue(asanyary0 is ary_subclass)
def test_empty_scalar(self):
"""
Test for :func:`mpi_array.globale.empty` and :func:`mpi_array.globale.empty_like`.
"""
gary = \
_empty(
shape=(),
dtype="float64",
locale_type=LT_PROCESS,
distrib_type=DT_CLONED
)
gary.lndarray_proxy[...] = 4
self.assertEqual(4, gary.lndarray_proxy.lndarray)
def test_empty_shared_1d(self):
"""
Test for :func:`mpi_array.globale.empty` and :func:`mpi_array.globale.empty_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = _create_distribution(shape=gshape)
gary = _empty(comms_and_distrib=cand, dtype="int64")
self.assertEqual(_np.dtype("int64"), gary.dtype)
self.assertSequenceEqual(
list(lshape),
list(_IndexingExtent(gary.lndarray_proxy.rank_view_slice_n).shape)
)
gary1 = _empty_like(gary)
self.assertEqual(_np.dtype("int64"), gary1.dtype)
self.assertSequenceEqual(
list(lshape),
list(_IndexingExtent(gary1.lndarray_proxy.rank_view_slice_n).shape)
)
ary = _empty_like(_np.zeros(lshape, dtype="int64"))
self.assertEqual(_np.dtype("int64"), ary.dtype)
self.assertSequenceEqual(
list(lshape),
list(ary.shape)
)
def test_empty_non_shared_1d(self):
"""
Test for :func:`mpi_array.globale_creation.empty`
and :func:`mpi_array.globale_creation.empty_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = _create_distribution(shape=gshape, locale_type=LT_PROCESS)
gary = _empty(comms_and_distrib=cand, dtype="int64")
self.assertEqual(_np.dtype("int64"), gary.dtype)
self.assertSequenceEqual(list(lshape), list(gary.lndarray_proxy.shape))
self.assertSequenceEqual(
list(lshape),
list(_IndexingExtent(gary.lndarray_proxy.rank_view_slice_n).shape)
)
gary1 = _empty_like(gary)
self.assertEqual(_np.dtype("int64"), gary1.dtype)
self.assertSequenceEqual(list(lshape), list(gary1.lndarray_proxy.shape))
self.assertSequenceEqual(
list(lshape),
list(_IndexingExtent(gary1.lndarray_proxy.rank_view_slice_n).shape)
)
def test_zeros_shared_1d(self):
"""
Test for :func:`mpi_array.globale_creation.zeros`
and :func:`mpi_array.globale_creation.zeros_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = _create_distribution(shape=gshape, locale_type=LT_NODE)
gary = _zeros(comms_and_distrib=cand, dtype="int64")
self.assertEqual(_np.dtype("int64"), gary.dtype)
gary.locale_comms.peer_comm.barrier()
self.assertTrue((gary == 0).all())
gary1 = _zeros_like(gary)
self.assertEqual(_np.dtype("int64"), gary1.dtype)
gary.locale_comms.peer_comm.barrier()
self.assertTrue((gary1 == 0).all())
def test_zeros_non_shared_1d(self):
"""
Test for :func:`mpi_array.globale_creation.zeros`
and :func:`mpi_array.globale_creation.zeros_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = _create_distribution(shape=gshape, locale_type=LT_PROCESS)
gary = _zeros(comms_and_distrib=cand, dtype="int64")
self.assertEqual(_np.dtype("int64"), gary.dtype)
gary.locale_comms.peer_comm.barrier()
self.assertTrue((gary == 0).all())
gary1 = _zeros_like(gary)
self.assertEqual(_np.dtype("int64"), gary1.dtype)
gary.locale_comms.peer_comm.barrier()
self.assertTrue((gary1 == 0).all())
def test_ones_shared_1d(self):
"""
Test for :func:`mpi_array.globale_creation.ones`
and :func:`mpi_array.globale_creation.ones_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = _create_distribution(shape=gshape)
gary = _ones(comms_and_distrib=cand, dtype="int64")
self.assertEqual(_np.dtype("int64"), gary.dtype)
gary.locale_comms.peer_comm.barrier()
self.assertTrue((gary == 1).all())
gary1 = _ones_like(gary)
self.assertEqual(_np.dtype("int64"), gary1.dtype)
gary.locale_comms.peer_comm.barrier()
self.assertTrue((gary1 == 1).all())
def test_ones_non_shared_1d(self):
"""
Test for :func:`mpi_array.globale_creation.ones`
and :func:`mpi_array.globale_creation.ones_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = _create_distribution(shape=gshape, locale_type=LT_PROCESS)
gary = _ones(comms_and_distrib=cand, dtype="int64")
self.assertEqual(_np.dtype("int64"), gary.dtype)
gary.locale_comms.peer_comm.barrier()
self.assertTrue((gary == 1).all())
gary1 = _ones_like(gary)
self.assertEqual(_np.dtype("int64"), gary1.dtype)
gary.locale_comms.peer_comm.barrier()
self.assertTrue((gary1 == 1).all())
def test_copy_shared_1d(self):
"""
Test for :func:`mpi_array.globale_creation.copy`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = _create_distribution(gshape)
gary = _ones(comms_and_distrib=cand, dtype="int64")
self.assertEqual(_np.dtype("int64"), gary.dtype)
gary.rank_view_n[...] = gary.locale_comms.peer_comm.rank
gary1 = _copy(gary)
self.assertEqual(_np.dtype("int64"), gary1.dtype)
gary.locale_comms.peer_comm.barrier()
self.assertTrue((gary1 == gary).all())
def test_copy_non_shared_1d(self):
"""
Test for :func:`mpi_array.globale_creation.copy`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = _create_distribution(gshape, locale_type=LT_PROCESS)
gary = _ones(comms_and_distrib=cand, dtype="int64")
self.assertEqual(_np.dtype("int64"), gary.dtype)
gary.rank_view_n[...] = gary.locale_comms.peer_comm.rank
gary1 = _copy(gary)
self.assertEqual(_np.dtype("int64"), gary1.dtype)
gary.locale_comms.peer_comm.barrier()
self.assertTrue((gary1 == gary).all())
_unittest.main(__name__)
__all__ = [s for s in dir() if not s.startswith('_')]
| 34.662722 | 94 | 0.641174 |
e5d0c271e224a7ae943346712961b53fcc28e213 | 64,071 | py | Python | src/transformers/modeling_lxmert.py | katarinaslama/transformers-1 | a5a8eeb772b185b0746f3ce9be6ae43181d2ca71 | [
"Apache-2.0"
] | 4 | 2021-01-15T20:20:47.000Z | 2021-11-14T18:33:42.000Z | src/transformers/modeling_lxmert.py | yym6472/transformers | abd01205561e5caec167c1fbb20bccea24d7ba46 | [
"Apache-2.0"
] | 1 | 2021-09-15T09:20:01.000Z | 2022-03-02T17:16:01.000Z | src/transformers/modeling_lxmert.py | yym6472/transformers | abd01205561e5caec167c1fbb20bccea24d7ba46 | [
"Apache-2.0"
] | 1 | 2020-11-11T14:29:22.000Z | 2020-11-11T14:29:22.000Z | # coding=utf-8
# Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch LXMERT model. """
import logging
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
from .activations import ACT2FN, gelu
from .configuration_lxmert import LxmertConfig
from .file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_callable,
replace_return_docstrings,
)
from .modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__)
_CONFIG_FOR_DOC = "LxmertConfig"
_TOKENIZER_FOR_DOC = "LxmertTokenizer"
LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"unc-nlp/lxmert-base-uncased",
]
class GeLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return gelu(x)
@dataclass
class LxmertModelOutput(ModelOutput):
"""
Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilites for
the language, visual, and, cross-modality encoders.
(note: the visual encoder in Lxmert is referred to as the "relation-ship" encoder")
Args:
language_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the language encoder.
vision_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the visual encoder.
pooled_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification, CLS, token)
further processed by a Linear layer and a Tanh activation function. The Linear
language_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
language_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
vision_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
language_output: Optional[torch.FloatTensor] = None
vision_output: Optional[torch.FloatTensor] = None
pooled_output: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
language_attentions: Optional[Tuple[torch.FloatTensor]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LxmertForQuestionAnsweringOutput(ModelOutput):
"""
Output type of :class:`~transformers.LxmertForQuestionAnswering`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.k.
question_answering_score: (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, n_qa_answers)`, `optional`):
Prediction scores of question answering objective (classification).
language_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
language_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
vision_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
question_answering_score: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
language_attentions: Optional[Tuple[torch.FloatTensor]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LxmertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.LxmertForPreTrainingModel`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
cross_relationship_score: (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the textual matching objective (classification) head (scores of True/False
continuation before SoftMax).
question_answering_score: (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, n_qa_answers)`):
Prediction scores of question answering objective (classification).
language_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
language_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
vision_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: [torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
cross_relationship_score: Optional[torch.FloatTensor] = None
question_answering_score: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
language_attentions: Optional[Tuple[torch.FloatTensor]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n
in [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class LxmertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
seq_length = input_shape[1]
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LxmertAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.head_size = self.num_attention_heads * self.attention_head_size
# visual_dim = 2048
if ctx_dim is None:
ctx_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.head_size)
self.key = nn.Linear(ctx_dim, self.head_size)
self.value = nn.Linear(ctx_dim, self.head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LxmertAttentionOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LxmertCrossAttentionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.att = LxmertAttention(config)
self.output = LxmertAttentionOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False):
output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions)
if output_attentions:
attention_probs = output[1]
attention_output = self.output(output[0], input_tensor)
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
class LxmertSelfAttentionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LxmertAttention(config)
self.output = LxmertAttentionOutput(config)
def forward(self, input_tensor, attention_mask, output_attentions=False):
# Self attention attends to itself, thus keys and querys are the same (input_tensor).
output = self.self(
input_tensor,
input_tensor,
attention_mask,
output_attentions=output_attentions,
)
if output_attentions:
attention_probs = output[1]
attention_output = self.output(output[0], input_tensor)
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
class LxmertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class LxmertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LxmertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = LxmertSelfAttentionLayer(config)
self.intermediate = LxmertIntermediate(config)
self.output = LxmertOutput(config)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
attention_output = outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs[1:] # add attentions if we output them
return outputs
class LxmertXLayer(nn.Module):
def __init__(self, config):
super().__init__()
# The cross-attention Layer
self.visual_attention = LxmertCrossAttentionLayer(config)
# Self-attention Layers
self.lang_self_att = LxmertSelfAttentionLayer(config)
self.visn_self_att = LxmertSelfAttentionLayer(config)
# Intermediate and Output Layers (FFNs)
self.lang_inter = LxmertIntermediate(config)
self.lang_output = LxmertOutput(config)
self.visn_inter = LxmertIntermediate(config)
self.visn_output = LxmertOutput(config)
def cross_att(
self,
lang_input,
lang_attention_mask,
visual_input,
visual_attention_mask,
output_x_attentions=False,
):
# Cross Attention
lang_att_output = self.visual_attention(
lang_input,
visual_input,
ctx_att_mask=visual_attention_mask,
output_attentions=output_x_attentions,
)
visual_att_output = self.visual_attention(
visual_input,
lang_input,
ctx_att_mask=lang_attention_mask,
output_attentions=False,
)
return lang_att_output, visual_att_output
def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask):
# Self Attention
lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False)
visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False)
return lang_att_output[0], visual_att_output[0]
def output_fc(self, lang_input, visual_input):
# FC layers
lang_inter_output = self.lang_inter(lang_input)
visual_inter_output = self.visn_inter(visual_input)
# Layer output
lang_output = self.lang_output(lang_inter_output, lang_input)
visual_output = self.visn_output(visual_inter_output, visual_input)
return lang_output, visual_output
def forward(
self,
lang_feats,
lang_attention_mask,
visual_feats,
visual_attention_mask,
output_attentions=False,
):
lang_att_output, visual_att_output = self.cross_att(
lang_input=lang_feats,
lang_attention_mask=lang_attention_mask,
visual_input=visual_feats,
visual_attention_mask=visual_attention_mask,
output_x_attentions=output_attentions,
)
attention_probs = lang_att_output[1:]
lang_att_output, visual_att_output = self.self_att(
lang_att_output[0],
lang_attention_mask,
visual_att_output[0],
visual_attention_mask,
)
lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output)
return (
(
lang_output,
visual_output,
attention_probs[0],
)
if output_attentions
else (lang_output, visual_output)
)
class LxmertVisualFeatureEncoder(nn.Module):
def __init__(self, config):
super().__init__()
feat_dim = config.visual_feat_dim
pos_dim = config.visual_pos_dim
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
# Box position encoding
self.box_fc = nn.Linear(pos_dim, config.hidden_size)
self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, visual_feats, visual_pos):
x = self.visn_fc(visual_feats)
x = self.visn_layer_norm(x)
y = self.box_fc(visual_pos)
y = self.box_layer_norm(y)
output = (x + y) / 2
output = self.dropout(output)
return output
class LxmertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
# Obj-level image embedding layer
self.visn_fc = LxmertVisualFeatureEncoder(config)
self.config = config
# Number of layers
self.num_l_layers = config.l_layers
self.num_x_layers = config.x_layers
self.num_r_layers = config.r_layers
# Layers
# Using self.layer instead of self.l_layer to support loading BERT weights.
self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)])
self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)])
self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)])
def forward(
self,
lang_feats,
lang_attention_mask,
visual_feats,
visual_pos,
visual_attention_mask=None,
output_attentions=None,
):
vision_hidden_states = ()
language_hidden_states = ()
vision_attentions = () if output_attentions or self.config.output_attentions else None
language_attentions = () if output_attentions or self.config.output_attentions else None
cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
visual_feats = self.visn_fc(visual_feats, visual_pos)
# Run language layers
for layer_module in self.layer:
l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions)
lang_feats = l_outputs[0]
language_hidden_states = language_hidden_states + (lang_feats,)
if language_attentions is not None:
language_attentions = language_attentions + (l_outputs[1],)
# Run relational layers
for layer_module in self.r_layers:
v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions)
visual_feats = v_outputs[0]
vision_hidden_states = vision_hidden_states + (visual_feats,)
if vision_attentions is not None:
vision_attentions = vision_attentions + (v_outputs[1],)
# Run cross-modality layers
for layer_module in self.x_layers:
x_outputs = layer_module(
lang_feats,
lang_attention_mask,
visual_feats,
visual_attention_mask,
output_attentions=output_attentions,
)
lang_feats, visual_feats = x_outputs[:2]
vision_hidden_states = vision_hidden_states + (visual_feats,)
language_hidden_states = language_hidden_states + (lang_feats,)
if cross_encoder_attentions is not None:
cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
visual_encoder_outputs = (
vision_hidden_states,
vision_attentions if output_attentions else None,
)
lang_encoder_outputs = (
language_hidden_states,
language_attentions if output_attentions else None,
)
return (
visual_encoder_outputs,
lang_encoder_outputs,
cross_encoder_attentions if output_attentions else None,
)
class LxmertPooler(nn.Module):
def __init__(self, config):
super(LxmertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class LxmertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(LxmertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act]
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class LxmertLMPredictionHead(nn.Module):
def __init__(self, config, lxmert_model_embedding_weights):
super(LxmertLMPredictionHead, self).__init__()
self.transform = LxmertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(
lxmert_model_embedding_weights.size(1),
lxmert_model_embedding_weights.size(0),
bias=False,
)
self.decoder.weight = lxmert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class LxmertVisualAnswerHead(nn.Module):
def __init__(self, config, num_labels):
super().__init__()
hid_dim = config.hidden_size
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim, hid_dim * 2),
GeLU(),
nn.LayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, num_labels),
)
def forward(self, hidden_states):
return self.logit_fc(hidden_states)
class LxmertVisualObjHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LxmertPredictionHeadTransform(config)
# Decide the use of visual losses
visual_losses = {}
if config.visual_obj_loss:
visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
if config.visual_attr_loss:
visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
if config.visual_obj_loss:
visual_losses["feat"] = {
"shape": (-1, config.visual_feat_dim),
"num": config.visual_feat_dim,
}
self.visual_losses = visual_losses
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_dict = nn.ModuleDict(
{key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses}
)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
output = {}
for key in self.visual_losses:
output[key] = self.decoder_dict[key](hidden_states)
return output
class LxmertPreTrainingHeads(nn.Module):
def __init__(self, config, lxmert_model_embedding_weights):
super(LxmertPreTrainingHeads, self).__init__()
self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class LxmertPreTrainedModel(PreTrainedModel):
"""An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = LxmertConfig
load_tf_weights = load_tf_weights_in_lxmert
base_model_prefix = "lxmert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
LXMERT_START_DOCSTRING = r"""
The LXMERT model was proposed in `LXMERT: Learning Cross-Modality Encoder Representations from Transformers
<https://arxiv.org/abs/1908.07490>`__ by Hao Tan and Mohit Bansal. It's a vision and language transformer model,
pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome,
using a combination of masked language modeling, region of interest feature regression,
cross entropy loss for question answering attribute prediction, and object tag predicition.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.LxmertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
LXMERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.LxmertTokenizer`.
See :meth:`transformers.PreTrainedTokenizer.encode` and
:meth:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
visual_feats: (:obj:`torch.FloatTensor` of shape :obj:՝(batch_size, num_visual_features, visual_feat_dim)՝):
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
These are currently not provided by the transformers library.
visual_pos: (:obj:`torch.FloatTensor` of shape :obj:՝(batch_size, num_visual_features, visual_pos_dim)՝):
This input represents spacial features corresponding to their relative (via index) visual features.
The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of
0 to 1.
These are currently not provided by the transformers library.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
visual_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
LXMERT_START_DOCSTRING,
)
class LxmertModel(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = LxmertEmbeddings(config)
self.encoder = LxmertEncoder(config)
self.pooler = LxmertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
@add_start_docstrings_to_callable(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="unc-nlp/lxmert-base-uncased",
output_type=LxmertModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
visual_feats=None,
visual_pos=None,
attention_mask=None,
visual_attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
assert visual_feats is not None, "`visual_feats` cannot be `None`"
assert visual_pos is not None, "`visual_pos` cannot be `None`"
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Process the visual attention mask
if visual_attention_mask is not None:
extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype)
extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * -10000.0
else:
extended_visual_attention_mask = None
# Positional Word Embeddings
embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds)
# Run Lxmert encoder
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
visual_feats=visual_feats,
visual_pos=visual_pos,
visual_attention_mask=extended_visual_attention_mask,
output_attentions=output_attentions,
)
visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
vision_hidden_states = visual_encoder_outputs[0]
language_hidden_states = lang_encoder_outputs[0]
all_attentions = ()
if output_attentions:
language_attentions = lang_encoder_outputs[1]
vision_attentions = visual_encoder_outputs[1]
cross_encoder_attentions = encoder_outputs[2]
all_attentions = (
language_attentions,
vision_attentions,
cross_encoder_attentions,
)
hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
visual_output = vision_hidden_states[-1]
lang_output = language_hidden_states[-1]
pooled_output = self.pooler(lang_output)
if not return_dict:
return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
return LxmertModelOutput(
pooled_output=pooled_output,
language_output=lang_output,
vision_output=visual_output,
language_hidden_states=language_hidden_states if output_hidden_states else None,
vision_hidden_states=vision_hidden_states if output_hidden_states else None,
language_attentions=language_attentions if output_attentions else None,
vision_attentions=vision_attentions if output_attentions else None,
cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
)
@add_start_docstrings(
"""Lxmert Model with a specified pre-training head on top. """,
LXMERT_START_DOCSTRING,
)
class LxmertForPreTraining(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# Configuration
self.config = config
self.num_qa_labels = config.num_qa_labels
self.visual_loss_normalizer = config.visual_loss_normalizer
# Use of pre-training tasks
self.task_mask_lm = config.task_mask_lm
self.task_obj_predict = config.task_obj_predict
self.task_matched = config.task_matched
self.task_qa = config.task_qa
# Lxmert backbone
self.lxmert = LxmertModel(config)
# Pre-training heads
self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight)
if self.task_obj_predict:
self.obj_predict_head = LxmertVisualObjHead(config)
if self.task_qa:
self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
# Weight initialization
self.init_weights()
# Loss functions
self.loss_fcts = {
"l2": SmoothL1Loss(reduction="none"),
"visual_ce": CrossEntropyLoss(reduction="none"),
"ce": CrossEntropyLoss(),
}
visual_losses = {}
if config.visual_obj_loss:
visual_losses["obj"] = {
"shape": (-1,),
"num": config.num_object_labels,
"loss": "visual_ce",
}
if config.visual_attr_loss:
visual_losses["attr"] = {
"shape": (-1,),
"num": config.num_attr_labels,
"loss": "visual_ce",
}
if config.visual_obj_loss:
visual_losses["feat"] = {
"shape": (-1, config.visual_feat_dim),
"num": config.visual_feat_dim,
"loss": "l2",
}
self.visual_losses = visual_losses
def resize_num_qa_labels(self, num_labels):
"""
Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size will add newly
initialized weights. Reducing the size will remove weights from the end
Args:
cur_qa_logit_layer (:obj:`torch.nn.Linear`):
Old linear layer to be resized.
num_labels (:obj:`int`, `optional`):
New number of labels in the linear layer weight matrix.
Increasing the size will add newly initialized weights at the end. Reducing the size will remove
weights from the end. If not provided or :obj:`None`, just returns a pointer to the qa labels
:obj:`torch.nn.Linear`` module of the model wihtout doing anything.
Return:
:obj:`torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
"""
cur_qa_logit_layer = self.get_qa_logit_layer()
if num_labels is None or cur_qa_logit_layer is None:
return
new_qa_logit_layer = self._resize_qa_labels(num_labels)
self.config.num_qa_labels = num_labels
self.num_qa_labels = num_labels
return new_qa_logit_layer
def _resize_qa_labels(self, num_labels):
cur_qa_logit_layer = self.get_qa_logit_layer()
new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
self._set_qa_logit_layer(new_qa_logit_layer)
return self.get_qa_logit_layer()
def get_qa_logit_layer(self) -> nn.Module:
"""
Returns the the linear layer that produces question answering logits.
Returns:
:obj:`nn.Module`: A torch module mapping the question answering prediction hidden states or :obj:`None` if
LXMERT does not have a visual answering head.
"""
if hasattr(self, "answer_head"):
return self.answer_head.logit_fc[-1]
def _set_qa_logit_layer(self, qa_logit_layer):
self.answer_head.logit_fc[-1] = qa_logit_layer
def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
if num_labels is None:
return cur_qa_logit_layer
cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
if cur_qa_labels == num_labels:
return cur_qa_logit_layer
# Build new linear output
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
else:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
# initialize all new labels
self._init_weights(new_qa_logit_layer)
# Copy labels from the previous weights
num_labels_to_copy = min(cur_qa_labels, num_labels)
new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
return new_qa_logit_layer
@add_start_docstrings_to_callable(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
visual_feats=None,
visual_pos=None,
attention_mask=None,
visual_attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
masked_lm_labels=None,
obj_labels=None,
matched_label=None,
ans=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
masked_lm_labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
obj_labels: (``Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]``, `optional`):
each key is named after each one of the visual losses and each element of the tuple is of the shape
``(batch_size, num_features)`` and ``(batch_size, num_features, visual_feature_dim)``
for each the label id and the label score respectively
matched_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``:
- 0 indicates that the sentence does not match the image,
- 1 indicates that the sentence does match the image.
ans: (``Torch.Tensor`` of shape ``(batch_size)``, `optional`):
a one hot representation hof the correct answer `optional`
Returns:
"""
device = input_ids.device if input_ids is not None else inputs_embeds.device
lxmert_output = self.lxmert(
input_ids=input_ids,
visual_feats=visual_feats,
visual_pos=visual_pos,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
visual_attention_mask=visual_attention_mask,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
lang_output, visual_output, pooled_output = (
lxmert_output[0],
lxmert_output[1],
lxmert_output[2],
)
lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
if self.task_qa:
answer_score = self.answer_head(pooled_output)
else:
answer_score = pooled_output[0][0]
total_loss = (
None
if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None)
else torch.tensor(0.0, device=device)
)
if masked_lm_labels is not None and self.task_mask_lm:
masked_lm_loss = self.loss_fcts["ce"](
lang_prediction_scores.view(-1, self.config.vocab_size),
masked_lm_labels.view(-1),
)
total_loss += masked_lm_loss
if matched_label is not None and self.task_matched:
matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1))
total_loss += matched_loss
if obj_labels is not None and self.task_obj_predict:
total_visual_loss = torch.tensor(0.0, device=input_ids.device)
visual_prediction_scores_dict = self.obj_predict_head(visual_output)
for key, key_info in self.visual_losses.items():
label, mask_conf = obj_labels[key]
output_dim = key_info["num"]
loss_fct_name = key_info["loss"]
label_shape = key_info["shape"]
weight = self.visual_loss_normalizer
visual_loss_fct = self.loss_fcts[loss_fct_name]
visual_prediction_scores = visual_prediction_scores_dict[key]
visual_loss = visual_loss_fct(
visual_prediction_scores.view(-1, output_dim),
label.view(*label_shape),
)
if visual_loss.dim() > 1: # Regression Losses
visual_loss = visual_loss.mean(1)
visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight
total_visual_loss += visual_loss
total_loss += total_visual_loss
if ans is not None and self.task_qa:
answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1))
total_loss += answer_loss
if not return_dict:
output = (
lang_prediction_scores,
cross_relationship_score,
answer_score,
) + lxmert_output[3:]
return ((total_loss,) + output) if total_loss is not None else output
return LxmertForPreTrainingOutput(
loss=total_loss,
prediction_logits=lang_prediction_scores,
cross_relationship_score=cross_relationship_score,
question_answering_score=answer_score,
language_hidden_states=lxmert_output.language_hidden_states,
vision_hidden_states=lxmert_output.vision_hidden_states,
language_attentions=lxmert_output.language_attentions,
vision_attentions=lxmert_output.vision_attentions,
cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
)
@add_start_docstrings(
"""Lxmert Model with a visual-answering head on top for downstream QA tasks""",
LXMERT_START_DOCSTRING,
)
class LxmertForQuestionAnswering(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# Configuration
self.config = config
self.num_qa_labels = config.num_qa_labels
self.visual_loss_normalizer = config.visual_loss_normalizer
# Lxmert backbone
self.lxmert = LxmertModel(config)
self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
# Weight initialization
self.init_weights()
# Loss function
self.loss = CrossEntropyLoss()
def resize_num_qa_labels(self, num_labels):
"""
Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size will add newly
initialized weights. Reducing the size will remove weights from the end
Args:
cur_qa_logit_layer (:obj:`torch.nn.Linear`):
Old linear layer to be resized.
num_labels (:obj:`int`, `optional`):
New number of labels in the linear layer weight matrix.
Increasing the size will add newly initialized weights at the end. Reducing the size will remove
weights from the end. If not provided or :obj:`None`, just returns a pointer to the qa labels
:obj:`torch.nn.Linear`` module of the model wihtout doing anything.
Return:
:obj:`torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
"""
cur_qa_logit_layer = self.get_qa_logit_layer()
if num_labels is None or cur_qa_logit_layer is None:
return
new_qa_logit_layer = self._resize_qa_labels(num_labels)
self.config.num_qa_labels = num_labels
self.num_qa_labels = num_labels
return new_qa_logit_layer
def _resize_qa_labels(self, num_labels):
cur_qa_logit_layer = self.get_qa_logit_layer()
new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
self._set_qa_logit_layer(new_qa_logit_layer)
return self.get_qa_logit_layer()
def get_qa_logit_layer(self) -> nn.Module:
"""
Returns the the linear layer that produces question answering logits
Returns:
:obj:`nn.Module`: A torch module mapping the question answering prediction hidden states.
:obj:`None`: A NoneType object if Lxmert does not have the visual answering head.
"""
if hasattr(self, "answer_head"):
return self.answer_head.logit_fc[-1]
def _set_qa_logit_layer(self, qa_logit_layer):
self.answer_head.logit_fc[-1] = qa_logit_layer
def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
if num_labels is None:
return cur_qa_logit_layer
cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
if cur_qa_labels == num_labels:
return cur_qa_logit_layer
# Build new linear output
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
else:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
# initialize all new labels
self._init_weights(new_qa_logit_layer)
# Copy labels from the previous weights
num_labels_to_copy = min(cur_qa_labels, num_labels)
new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
return new_qa_logit_layer
@add_start_docstrings_to_callable(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="unc-nlp/lxmert-base-uncased",
output_type=LxmertForQuestionAnsweringOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
visual_feats=None,
visual_pos=None,
attention_mask=None,
visual_attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels: (``Torch.Tensor`` of shape ``(batch_size)``, `optional`):
A one-hot representation of the correct answer
Returns:
"""
lxmert_output = self.lxmert(
input_ids=input_ids,
visual_feats=visual_feats,
visual_pos=visual_pos,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
visual_attention_mask=visual_attention_mask,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
pooled_output = lxmert_output[2]
answer_score = self.answer_head(pooled_output)
loss = None
if labels is not None:
loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1))
if not return_dict:
output = (answer_score,) + lxmert_output[3:]
return (loss,) + output if loss is not None else output
return LxmertForQuestionAnsweringOutput(
loss=loss,
question_answering_score=answer_score,
language_hidden_states=lxmert_output.language_hidden_states,
vision_hidden_states=lxmert_output.vision_hidden_states,
language_attentions=lxmert_output.language_attentions,
vision_attentions=lxmert_output.vision_attentions,
cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
)
| 44.52467 | 177 | 0.672114 |
4f7ce3d87ad08d4e15272441352031b84b18bdd9 | 10,688 | py | Python | depfixer.py | tp-m/meson | 2d1aa395e86848ca948d30d83cc5357777e5b490 | [
"Apache-2.0"
] | null | null | null | depfixer.py | tp-m/meson | 2d1aa395e86848ca948d30d83cc5357777e5b490 | [
"Apache-2.0"
] | null | null | null | depfixer.py | tp-m/meson | 2d1aa395e86848ca948d30d83cc5357777e5b490 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, struct
SHT_STRTAB = 3
DT_NEEDED = 1
DT_RPATH = 15
DT_STRTAB = 5
DT_SONAME = 14
class DataSizes():
def __init__(self, ptrsize, is_le):
if is_le:
p = '<'
else:
p = '>'
self.Half = p+'h'
self.HalfSize = 2
self.Word = p+'I'
self.WordSize = 4
self.Sword = p+'i'
self.SwordSize = 4
if ptrsize == 64:
self.Addr = p+'Q'
self.AddrSize = 8
self.Off = p+'Q'
self.OffSize = 8
self.XWord = p+'Q'
self.XWordSize = 8
self.Sxword = p+'q'
self.SxwordSize = 8
else:
self.Addr = p+'I'
self.AddrSize = 4
self.Off = p+'I'
self.OffSize = 4
class DynamicEntry(DataSizes):
def __init__(self, ifile, ptrsize, is_le):
super().__init__(ptrsize, is_le)
self.ptrsize = ptrsize
if ptrsize == 64:
self.d_tag = struct.unpack(self.Sxword, ifile.read(self.SxwordSize))[0];
self.val = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0];
else:
self.d_tag = struct.unpack(self.Sword, ifile.read(self.SwordSize))[0]
self.val = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
def write(self, ofile):
if self.ptrsize == 64:
ofile.write(struct.pack(self.Sxword, self.d_tag))
ofile.write(struct.pack(self.XWord, self.val))
else:
ofile.write(struct.pack(self.Sword, self.d_tag))
ofile.write(struct.pack(self.Word, self.val))
class SectionHeader(DataSizes):
def __init__(self, ifile, ptrsize, is_le):
super().__init__(ptrsize, is_le)
if ptrsize == 64:
is_64 = True
else:
is_64 = False
#Elf64_Word
self.sh_name = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Word
self.sh_type = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Xword
if is_64:
self.sh_flags = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_flags = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Addr
self.sh_addr = struct.unpack(self.Addr, ifile.read(self.AddrSize))[0];
#Elf64_Off
self.sh_offset = struct.unpack(self.Off, ifile.read(self.OffSize))[0]
#Elf64_Xword
if is_64:
self.sh_size = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_size = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Word
self.sh_link = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Word
self.sh_info = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Xword
if is_64:
self.sh_addralign = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_addralign = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Xword
if is_64:
self.sh_entsize = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_entsize = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
class Elf(DataSizes):
def __init__(self, bfile):
self.bfile = bfile
self.bf = open(bfile, 'r+b')
(self.ptrsize, self.is_le) = self.detect_elf_type()
super().__init__(self.ptrsize, self.is_le)
self.parse_header()
self.parse_sections()
self.parse_dynamic()
def detect_elf_type(self):
data = self.bf.read(6)
if data[1:4] != b'ELF':
# This script gets called to non-elf targets too
# so just ignore them.
print('File "%s" is not an ELF file.' % self.bfile)
sys.exit(0)
if data[4] == 1:
ptrsize = 32
elif data[4] == 2:
ptrsize = 64
else:
print('File "%s" has unknown ELF class.' % self.bfile)
sys.exit(1)
if data[5] == 1:
is_le = True
elif data[5] == 2:
is_le = False
else:
print('File "%s" has unknown ELF endianness.' % self.bfile)
sys.exit(1)
return (ptrsize, is_le)
def parse_header(self):
self.bf.seek(0)
self.e_ident = struct.unpack('16s', self.bf.read(16))[0]
self.e_type = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_machine = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_version = struct.unpack(self.Word, self.bf.read(self.WordSize))[0]
self.e_entry = struct.unpack(self.Addr, self.bf.read(self.AddrSize))[0]
self.e_phoff = struct.unpack(self.Off, self.bf.read(self.OffSize))[0]
self.e_shoff = struct.unpack(self.Off, self.bf.read(self.OffSize))[0]
self.e_flags = struct.unpack(self.Word, self.bf.read(self.WordSize))[0]
self.e_ehsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_phentsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_phnum = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shentsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shnum = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shstrndx = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
def parse_sections(self):
self.bf.seek(self.e_shoff)
self.sections = []
for i in range(self.e_shnum):
self.sections.append(SectionHeader(self.bf, self.ptrsize, self.is_le))
def read_str(self):
arr = []
x = self.bf.read(1)
while x != b'\0':
arr.append(x)
x = self.bf.read(1)
if x == b'':
raise RuntimeError('Tried to read past the end of the file')
return b''.join(arr)
def find_section(self, target_name):
section_names = self.sections[self.e_shstrndx]
for i in self.sections:
self.bf.seek(section_names.sh_offset + i.sh_name)
name = self.read_str()
if name == target_name:
return i
def parse_dynamic(self):
sec = self.find_section(b'.dynamic')
self.dynamic = []
self.bf.seek(sec.sh_offset)
while True:
e = DynamicEntry(self.bf, self.ptrsize, self.is_le)
self.dynamic.append(e)
if e.d_tag == 0:
break
def print_section_names(self):
section_names = self.sections[self.e_shstrndx]
for i in self.sections:
self.bf.seek(section_names.sh_offset + i.sh_name)
name = self.read_str()
print(name.decode())
def print_soname(self):
soname = None
strtab = None
for i in self.dynamic:
if i.d_tag == DT_SONAME:
soname = i
if i.d_tag == DT_STRTAB:
strtab = i
self.bf.seek(strtab.val + soname.val)
print(self.read_str())
def get_rpath_offset(self):
sec = self.find_section(b'.dynstr')
for i in self.dynamic:
if i.d_tag == DT_RPATH:
return sec.sh_offset + i.val
return None
def print_rpath(self):
offset = self.get_rpath_offset()
if offset is None:
print("This file does not have an rpath.")
else:
self.bf.seek(offset)
print(self.read_str())
def print_deps(self):
sec = self.find_section(b'.dynstr')
deps = []
for i in self.dynamic:
if i.d_tag == DT_NEEDED:
deps.append(i)
for i in deps:
offset = sec.sh_offset + i.val
self.bf.seek(offset)
name = self.read_str()
print(name)
def fix_deps(self, prefix):
sec = self.find_section(b'.dynstr')
deps = []
for i in self.dynamic:
if i.d_tag == DT_NEEDED:
deps.append(i)
for i in deps:
offset = sec.sh_offset + i.val
self.bf.seek(offset)
name = self.read_str()
if name.startswith(prefix):
basename = name.split(b'/')[-1]
padding = b'\0'*(len(name) - len(basename))
newname = basename + padding
assert(len(newname) == len(name))
self.bf.seek(offset)
self.bf.write(newname)
def fix_rpath(self, new_rpath):
rp_off = self.get_rpath_offset()
if rp_off is None:
print('File does not have rpath. It should be a fully static executable.')
return
self.bf.seek(rp_off)
old_rpath = self.read_str()
if len(old_rpath) < len(new_rpath):
print("New rpath must not be longer than the old one.")
self.bf.seek(rp_off)
self.bf.write(new_rpath)
self.bf.write(b'\0'*(len(old_rpath) - len(new_rpath) + 1))
if len(new_rpath) == 0:
self.remove_rpath_entry()
def remove_rpath_entry(self):
sec = self.find_section(b'.dynamic')
for (i, entry) in enumerate(self.dynamic):
if entry.d_tag == DT_RPATH:
rpentry = self.dynamic[i]
rpentry.d_tag = 0
self.dynamic = self.dynamic[:i] + self.dynamic[i+1:] + [rpentry]
break;
self.bf.seek(sec.sh_offset)
for entry in self.dynamic:
entry.write(self.bf)
return None
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('This application resets target rpath.')
print('Don\'t run this unless you know what you are doing.')
print('%s: <binary file> <prefix>' % sys.argv[0])
exit(1)
e = Elf(sys.argv[1])
if len(sys.argv) == 2:
e.print_rpath()
else:
new_rpath = sys.argv[2]
e.fix_rpath(new_rpath.encode('utf8'))
#e.fix_deps(prefix.encode())
| 35.626667 | 88 | 0.573915 |
b9f29201bc74a094ba9b7b47f9c64b07c1faf29a | 8,110 | py | Python | srctools/compiler/mdl_compiler.py | Smaedd/srctools | 7d2660aca84b3f8b70bc00478216b608d47c719b | [
"MIT"
] | null | null | null | srctools/compiler/mdl_compiler.py | Smaedd/srctools | 7d2660aca84b3f8b70bc00478216b608d47c719b | [
"MIT"
] | null | null | null | srctools/compiler/mdl_compiler.py | Smaedd/srctools | 7d2660aca84b3f8b70bc00478216b608d47c719b | [
"MIT"
] | null | null | null | """Manages potential models that are being generated.
Each comes with a key, used to identify a previously compiled version.
We can then reuse already compiled versions.
"""
import os
import pickle
import subprocess
import tempfile
import random
from typing import (
Optional, TypeVar,
Dict, Set, List, Hashable,
Callable, Tuple,
)
from pathlib import Path
from srctools import AtomicWriter
from srctools.bsp_transform import Context
from srctools.game import Game
from srctools.mdl import MDL_EXTS
from srctools.packlist import PackList, LOGGER
ModelKey = TypeVar('ModelKey', bound=Hashable)
AnyModelKey = Hashable
InT = TypeVar('InT')
OutT = TypeVar('OutT')
class GenModel:
"""Tracks information about this model."""
def __init__(self, mdl_name: str, result: OutT=None) -> None:
self.name = mdl_name # This is just the filename.
self.used = False
self.result = result # Return value from compile function.
def __repr__(self) -> str:
return f'<Model "{self.name}, used={self.used}>'
class ModelCompiler:
"""Manages the set of merged models that have been generated.
The version number can be incremented to invalidate previous compilations.
"""
def __init__(
self,
game: Game,
studiomdl_loc: Path,
pack: PackList,
map_name: str,
folder_name: str,
version: int=0,
) -> None:
# The models already constructed.
self._built_models: Dict[AnyModelKey, GenModel] = {}
# The random indexes we use to produce filenames.
self._mdl_names: Set[str] = set()
self.game: Game = game
self.model_folder = 'maps/{}/{}/'.format(map_name, folder_name)
self.model_folder_abs = game.path / 'models' / self.model_folder
self.pack: PackList = pack
self.version = version
self.studiomdl_loc = studiomdl_loc
@classmethod
def from_ctx(cls, ctx: Context, folder_name: str, version: int=0) -> 'ModelCompiler':
"""Convenience method to construct from the context's data."""
return cls(
ctx.game,
ctx.studiomdl,
ctx.pack,
ctx.bsp_path.stem,
folder_name,
version,
)
def use_count(self) -> int:
"""Return the number of used models."""
return sum(1 for mdl in self._built_models.values() if mdl.used)
def __enter__(self) -> 'ModelCompiler':
# Ensure the folder exists.
os.makedirs(self.model_folder, exist_ok=True)
data: List[Tuple[AnyModelKey, str, object]]
version = 0
try:
with (self.model_folder_abs / 'manifest.bin').open('rb') as f:
result = pickle.load(f)
if isinstance(result, tuple):
data, version = result
else: # V0, no number.
data = result
except FileNotFoundError:
return self
except Exception:
LOGGER.warning(
'Could not parse existing models file '
'models/{}/manifest.bin:',
self.model_folder,
exc_info=True,
)
return self
if version != self.version:
# Different version, ignore the data.
return self
for mdl_name in self.model_folder_abs.glob('*.mdl'):
self._mdl_names.add(str(mdl_name.stem).casefold())
for tup in data:
try:
key, name, mdl_result = tup
if not isinstance(name, str):
continue
except ValueError:
continue # Malformed, ignore.
if name in self._mdl_names:
self._built_models[key] = GenModel(name, mdl_result)
else:
LOGGER.warning('Model in manifest but not present: {}', name)
LOGGER.info('Found {} existing models/{}*', len(self._built_models), self.model_folder)
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""Write the constructed models to the cache file and remove unused models."""
if exc_type is not None or exc_val is not None:
return
data = []
used_mdls = set()
for key, mdl in self._built_models.items():
if mdl.used:
data.append((key, mdl.name, mdl.result))
used_mdls.add(mdl.name.casefold())
with AtomicWriter(self.model_folder_abs / 'manifest.bin', is_bytes=True) as f:
pickle.dump((data, self.version), f, pickle.HIGHEST_PROTOCOL)
for mdl_file in self.model_folder_abs.glob('*'):
if mdl_file.suffix not in {'.mdl', '.phy', '.vtx', '.vvd'}:
continue
# Strip all suffixes.
if mdl_file.name[:mdl_file.name.find('.')].casefold() in used_mdls:
continue
LOGGER.info('Culling {}...', mdl_file)
try:
mdl_file.unlink()
except FileNotFoundError:
pass
def get_model(
self,
key: ModelKey,
compile_func: Callable[[ModelKey, Path, str, InT], OutT],
args: InT,
) -> Tuple[str, OutT]:
"""Given a model key, either return the existing model, or compile it.
Either way the result is the new model name, which also has been packed.
The provided function will be called if it needs to be compiled, passing
in the following arguments:
* The key
* The temporary folder to write to
* The name of the model to generate.
* The args parameter, which can be anything.
It should create "mdl.qc" in the folder, and then
StudioMDL will be called on the model to comile it. The return value will
be passed back from this function.
If the model key is None, a new model will always be compiled.
The model key and return value must be pickleable, so they can be saved
for use in subsequent compiles.
"""
try:
model = self._built_models[key]
except KeyError:
# Need to build the model.
# Figure out a name to use.
while True:
mdl_name = 'mdl_{:04x}'.format(random.getrandbits(16))
if mdl_name not in self._mdl_names:
self._mdl_names.add(mdl_name)
break
model = self._built_models[key] = GenModel(mdl_name)
with tempfile.TemporaryDirectory(prefix='mdl_compile') as folder:
path = Path(folder)
model.result = compile_func(key, path, f'{self.model_folder}{mdl_name}.mdl', args)
studio_args = [
str(self.studiomdl_loc),
'-nop4',
'-game', str(self.game.path),
str(path / 'model.qc'),
]
res = subprocess.run(studio_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
LOGGER.debug(
'Executing {}:\n{}',
studio_args,
res.stdout.replace(b'\r\n', b'\n').decode('ascii', 'replace'),
)
res.check_returncode() # Or raise.
if not model.used:
# Pack it in.
model.used = True
full_model_path = self.model_folder_abs / model.name
LOGGER.debug('Packing model {}.mdl:', full_model_path)
for ext in MDL_EXTS:
try:
with open(str(full_model_path.with_suffix(ext)), 'rb') as fb:
self.pack.pack_file(
'models/{}{}{}'.format(
self.model_folder, model.name, ext,
),
data=fb.read(),
)
except FileNotFoundError:
pass
return f'models/{self.model_folder}{model.name}.mdl', model.result
| 35.414847 | 99 | 0.561406 |
66470e1dc1f32b46928ce51f7ac985da455f90c3 | 3,767 | py | Python | contrib/macdeploy/custom_dsstore.py | dongri-project/dongri | 2fd8d8d0ef039b0bd814edebb00c92b7f25f06b3 | [
"MIT"
] | 10 | 2018-05-15T16:13:44.000Z | 2018-05-15T16:18:20.000Z | contrib/macdeploy/custom_dsstore.py | dongri-project/dongri | 2fd8d8d0ef039b0bd814edebb00c92b7f25f06b3 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | dongri-project/dongri | 2fd8d8d0ef039b0bd814edebb00c92b7f25f06b3 | [
"MIT"
] | 2 | 2018-05-22T11:52:52.000Z | 2018-06-15T06:10:35.000Z | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00dongriuser:\x00Documents:\x00dongri:\x00dongri:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/dongriuser/Documents/dongri/dongri/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Dongri-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.754098 | 1,817 | 0.726838 |
4268565fc82d9c5147f6a073d4275b9463171553 | 2,861 | py | Python | synapse/replication/slave/storage/account_data.py | littlebenlittle/synapse | 0eccf531466d762ede0dd365284a8465bfb18d0f | [
"Apache-2.0"
] | 1 | 2021-04-27T19:04:56.000Z | 2021-04-27T19:04:56.000Z | synapse/replication/slave/storage/account_data.py | littlebenlittle/synapse | 0eccf531466d762ede0dd365284a8465bfb18d0f | [
"Apache-2.0"
] | null | null | null | synapse/replication/slave/storage/account_data.py | littlebenlittle/synapse | 0eccf531466d762ede0dd365284a8465bfb18d0f | [
"Apache-2.0"
] | 1 | 2020-09-23T12:36:11.000Z | 2020-09-23T12:36:11.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream
from synapse.storage.database import DatabasePool
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
class SlavedAccountDataStore(TagsWorkerStore, AccountDataWorkerStore, BaseSlavedStore):
def __init__(self, database: DatabasePool, db_conn, hs):
self._account_data_id_gen = SlavedIdTracker(
db_conn,
"account_data",
"stream_id",
extra_tables=[
("room_account_data", "stream_id"),
("room_tags_revisions", "stream_id"),
],
)
super().__init__(database, db_conn, hs)
def get_max_account_data_stream_id(self):
return self._account_data_id_gen.get_current_token()
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == TagAccountDataStream.NAME:
self._account_data_id_gen.advance(instance_name, token)
for row in rows:
self.get_tags_for_user.invalidate((row.user_id,))
self._account_data_stream_cache.entity_has_changed(row.user_id, token)
elif stream_name == AccountDataStream.NAME:
self._account_data_id_gen.advance(instance_name, token)
for row in rows:
if not row.room_id:
self.get_global_account_data_by_type_for_user.invalidate(
(row.data_type, row.user_id)
)
self.get_account_data_for_user.invalidate((row.user_id,))
self.get_account_data_for_room.invalidate((row.user_id, row.room_id))
self.get_account_data_for_room_and_type.invalidate(
(row.user_id, row.room_id, row.data_type)
)
self._account_data_stream_cache.entity_has_changed(row.user_id, token)
return super().process_replication_rows(stream_name, instance_name, token, rows)
| 46.145161 | 88 | 0.698707 |
d0c1a54ca84d8b1f9df8eb57a2a95449e268b71a | 4,098 | py | Python | pikudhaoref/abc.py | adam757521/PikudHaoref.py | 49e2bfcc67478ad80f681d8ad1e358d54466a384 | [
"MIT"
] | null | null | null | pikudhaoref/abc.py | adam757521/PikudHaoref.py | 49e2bfcc67478ad80f681d8ad1e358d54466a384 | [
"MIT"
] | null | null | null | pikudhaoref/abc.py | adam757521/PikudHaoref.py | 49e2bfcc67478ad80f681d8ad1e358d54466a384 | [
"MIT"
] | null | null | null | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, List, Dict
import json
from .city import City
from .base import EventManager
from .exceptions import AccessDenied
if TYPE_CHECKING:
from datetime import datetime
__all__ = ("HTTPClient", "Client")
class HTTPClient(ABC):
"""
Represents a HTTP client.
"""
__slots__ = ("session", "city_data", "proxy")
@staticmethod
def format_datetime(date: datetime) -> str:
"""
Formats the datetime.
:param datetime date: The datetime.
:return: The formatted datetime
:rtype: str
"""
return date.strftime("%d.%m.%Y")
@staticmethod
def parse_response(response: str) -> Any:
"""
Parses the API response.
:param str response: The response.
:raises: AccessDenied: You cannot access the pikudhaoref API from outside Israel.
:return: The parsed response.
:rtype: Optional[Dict]
"""
if "Access Denied" in response:
raise AccessDenied(
"You cannot access the pikudhaoref API from outside Israel."
)
if response == "": # ...
return {}
return json.loads(response)
def request(self, method: str, url: str, headers: Dict[str, str] = None) -> Any:
"""
|maybecoro|
Sends a request to the URL with the method.
:param str method: The method.
:param Dict[str, str] headers: The headers.
:param str url: The URL.
:return: The parsed response.
:rtype: Optional[Dict]
"""
@staticmethod
def _format_city_data(dictionary: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Formats the city data.
:param Dict[str, Any] dictionary: The dictionary.
:return: The formatted city data.
:rtype: List[Dict[str, Any]]
"""
areas = dictionary["areas"]
cities = list(dictionary["cities"].values())
for city in cities:
city.pop("id")
city["area"] = areas[str(city["area"])]
return cities
@abstractmethod
def initialize_city_data(self) -> None:
"""
|maybecoro|
Initializes the city data.
:return: None
:rtype: None
"""
@abstractmethod
def get_history(self, mode: int) -> List[dict]:
"""
|maybecoro|
Returns the history of sirens in the specific mode.
:param int mode: The mode.
:return: The list of sirens.
:rtype: List[dict]
"""
def get_range_history(self, start: datetime, end: datetime) -> List[dict]:
"""
|maybecoro|
Returns the history of sirens in the range.
:param datetime start: The start.
:param datetime end: The end.
:return: The list of sirens.
:rtype: List[dict]
"""
@abstractmethod
def get_current_sirens(self) -> List[str]:
"""
|maybecoro|
Returns the current sirens.
:return: The list of city names.
:rtype: List[str]
"""
class Client(ABC, EventManager):
"""
Represents a client
"""
__slots__ = (
"closed",
"http",
"update_interval",
"_known_sirens",
"city_cache",
"_initialized",
)
@staticmethod
def remove_duplicates(list_: list) -> list:
"""
Removes duplicate elements from the list.
:param list list_: The list.
:return: The removed duplicate list.
:rtype: list
"""
return list(dict.fromkeys(list_)) # Nice little cheat
def get_city(self, city_name: str) -> City | str:
# Get from city cache
for city in self.city_cache:
if city_name == city or city_name in city.name.languages:
return city
# Create an instance
city = City.from_city_name(city_name, self.http.city_data)
self.city_cache.append(city)
return city
| 23.825581 | 89 | 0.57101 |
e202eb75e6b2e39c95ce5f940c5dcc089a7f1a52 | 1,339 | py | Python | app/core/tests/test_admin.py | oscarmunoz1/recipe-app-api | 19f524d34522a9a1950658d294c1b3a324e43138 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | oscarmunoz1/recipe-app-api | 19f524d34522a9a1950658d294c1b3a324e43138 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | oscarmunoz1/recipe-app-api | 19f524d34522a9a1950658d294c1b3a324e43138 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@omunoz.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@omunoz.com',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
print(res)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 30.431818 | 68 | 0.636296 |
6c58c7ccf2cc01b1ccd2b2828566e6c4fb67de8b | 7,354 | py | Python | python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 4 | 2021-02-08T13:07:15.000Z | 2021-10-22T00:58:33.000Z | python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 2 | 2019-07-26T04:06:05.000Z | 2019-07-29T04:25:24.000Z | python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 5 | 2021-12-10T11:20:06.000Z | 2022-02-18T05:18:12.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
sys.path.append("..")
import unittest
import numpy as np
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test_xpu import OpTest, XPUOpTest
import paddle
from paddle.fluid import Program, program_guard
class TestClipOp(XPUOpTest):
def set_xpu(self):
self.__class__.use_xpu = True
self.place = paddle.XPUPlace(0)
def setUp(self):
self.set_xpu()
self.max_relative_error = 0.006
self.inputs = {}
self.initTestCase()
self.op_type = "clip"
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max
if 'Min' in self.inputs:
min_v = self.inputs['Min']
else:
min_v = self.attrs['min']
if 'Max' in self.inputs:
max_v = self.inputs['Max']
else:
max_v = self.attrs['max']
input = np.random.random(self.shape).astype("float32")
input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5
self.inputs['X'] = input
self.outputs = {'Out': np.clip(self.inputs['X'], min_v, max_v)}
def test_check_output(self):
paddle.enable_static()
self.check_output_with_place(self.place)
paddle.disable_static()
def test_check_grad_normal(self):
paddle.enable_static()
self.check_grad_with_place(self.place, ['X'], 'Out')
paddle.disable_static()
def initTestCase(self):
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.1]).astype('float32')
class TestCase1(TestClipOp):
def initTestCase(self):
self.shape = (8, 16, 8)
self.max = 0.7
self.min = 0.0
class TestCase2(TestClipOp):
def initTestCase(self):
self.shape = (8, 16)
self.max = 1.0
self.min = 0.0
class TestCase3(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max = 0.7
self.min = 0.2
class TestCase4(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 8)
self.max = 0.7
self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.3]).astype('float32')
class TestCase5(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max = 0.5
self.min = 0.5
class TestClipOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
input_data = np.random.random((2, 4)).astype("float32")
def test_Variable():
fluid.layers.clip(x=input_data, min=-1.0, max=1.0)
self.assertRaises(TypeError, test_Variable)
def test_dtype():
x2 = fluid.layers.data(name='x2', shape=[1], dtype='int32')
fluid.layers.clip(x=x2, min=-1.0, max=1.0)
self.assertRaises(TypeError, test_dtype)
paddle.disable_static()
class TestClipAPI(unittest.TestCase):
def _executed_api(self, x, min=None, max=None):
return paddle.clip(x, min, max)
def test_clip(self):
paddle.enable_static()
data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32')
images = fluid.data(name='image', shape=data_shape, dtype='float32')
min = fluid.data(name='min', shape=[1], dtype='float32')
max = fluid.data(name='max', shape=[1], dtype='float32')
place = fluid.XPUPlace(0) if fluid.core.is_compiled_with_xpu(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
out_1 = self._executed_api(images, min=min, max=max)
out_2 = self._executed_api(images, min=0.2, max=0.9)
out_3 = self._executed_api(images, min=0.3)
out_4 = self._executed_api(images, max=0.7)
out_5 = self._executed_api(images, min=min)
out_6 = self._executed_api(images, max=max)
out_7 = self._executed_api(images, max=-1.)
out_8 = self._executed_api(images)
res1, res2, res3, res4, res5, res6, res7, res8 = exe.run(
fluid.default_main_program(),
feed={
"image": data,
"min": np.array([0.2]).astype('float32'),
"max": np.array([0.8]).astype('float32')
},
fetch_list=[
out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8
])
self.assertTrue(np.allclose(res1, data.clip(0.2, 0.8)))
self.assertTrue(np.allclose(res2, data.clip(0.2, 0.9)))
self.assertTrue(np.allclose(res3, data.clip(min=0.3)))
self.assertTrue(np.allclose(res4, data.clip(max=0.7)))
self.assertTrue(np.allclose(res5, data.clip(min=0.2)))
self.assertTrue(np.allclose(res6, data.clip(max=0.8)))
self.assertTrue(np.allclose(res7, data.clip(max=-1)))
self.assertTrue(np.allclose(res8, data))
paddle.disable_static()
def test_clip_dygraph(self):
paddle.disable_static()
place = fluid.XPUPlace(0) if fluid.core.is_compiled_with_xpu(
) else fluid.CPUPlace()
paddle.disable_static(place)
data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32')
images = paddle.to_tensor(data, dtype='float32')
v_min = paddle.to_tensor(np.array([0.2], dtype=np.float32))
v_max = paddle.to_tensor(np.array([0.8], dtype=np.float32))
out_1 = self._executed_api(images, min=0.2, max=0.8)
images = paddle.to_tensor(data, dtype='float32')
out_2 = self._executed_api(images, min=0.2, max=0.9)
images = paddle.to_tensor(data, dtype='float32')
out_3 = self._executed_api(images, min=v_min, max=v_max)
self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8)))
self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9)))
self.assertTrue(np.allclose(out_3.numpy(), data.clip(0.2, 0.8)))
def test_errors(self):
paddle.enable_static()
x1 = fluid.data(name='x1', shape=[1], dtype="int16")
x2 = fluid.data(name='x2', shape=[1], dtype="int8")
self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8)
self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8)
paddle.disable_static()
class TestInplaceClipAPI(TestClipAPI):
def _executed_api(self, x, min=None, max=None):
return x.clip_(min, max)
if __name__ == '__main__':
unittest.main()
| 33.889401 | 76 | 0.612184 |
7b0b068adfdd3043d8e7d645c3a817e898614f6c | 1,099 | py | Python | virtual/lib/python3.6/site-packages/pylint/test/functional/too_many_branches.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 463 | 2015-01-15T08:17:42.000Z | 2022-03-28T15:10:20.000Z | virtual/lib/python3.6/site-packages/pylint/test/functional/too_many_branches.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 52 | 2015-01-06T02:43:59.000Z | 2022-03-14T11:15:21.000Z | virtual/lib/python3.6/site-packages/pylint/test/functional/too_many_branches.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 249 | 2015-01-07T22:49:49.000Z | 2022-03-18T02:32:06.000Z | """ Test for too many branches. """
# pylint: disable=using-constant-test
def wrong(): # [too-many-branches]
""" Has too many branches. """
if 1:
pass
elif 1:
pass
elif 1:
pass
elif 1:
pass
elif 1:
pass
elif 1:
pass
try:
pass
finally:
pass
if 2:
pass
while True:
pass
if 1:
pass
elif 2:
pass
elif 3:
pass
def good():
""" Too many branches only if we take
into consideration the nested functions.
"""
def nested_1():
""" empty """
if 1:
pass
elif 2:
pass
elif 3:
pass
elif 4:
pass
nested_1()
try:
pass
finally:
pass
try:
pass
finally:
pass
if 1:
pass
elif 2:
pass
elif 3:
pass
elif 4:
pass
elif 5:
pass
elif 6:
pass
elif 7:
pass
| 15.7 | 45 | 0.393085 |
7dc998a498c495e3cc17e7d9c6d543d0488ef61c | 92 | py | Python | src/easy_automation/utils/__init__.py | zhangjiapeng23/easy_automation_test | ad4c72572cb62bcca009aa06ea91ec4343b12def | [
"MIT"
] | null | null | null | src/easy_automation/utils/__init__.py | zhangjiapeng23/easy_automation_test | ad4c72572cb62bcca009aa06ea91ec4343b12def | [
"MIT"
] | null | null | null | src/easy_automation/utils/__init__.py | zhangjiapeng23/easy_automation_test | ad4c72572cb62bcca009aa06ea91ec4343b12def | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @author: James Zhang
# @data : 2021/7/20
| 18.4 | 25 | 0.586957 |
0c519e21f35f64078b5d58dcc57a34a77871a069 | 3,074 | py | Python | tfx/components/experimental/data_view/provider_executor_test.py | yifanmai/tfx | 56b4d373b8b777a780f4552b5834925e837c51e9 | [
"Apache-2.0"
] | 3 | 2020-07-20T18:37:16.000Z | 2021-11-17T11:24:27.000Z | tfx/components/experimental/data_view/provider_executor_test.py | yifanmai/tfx | 56b4d373b8b777a780f4552b5834925e837c51e9 | [
"Apache-2.0"
] | 2 | 2020-08-11T00:19:14.000Z | 2020-08-26T20:10:31.000Z | tfx/components/experimental/data_view/provider_executor_test.py | yifanmai/tfx | 56b4d373b8b777a780f4552b5834925e837c51e9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.data_view.provider_executor."""
import os
import unittest
import tensorflow as tf
from tfx.components.experimental.data_view import provider_executor
from tfx.components.testdata.module_file import data_view_module
from tfx.types import standard_artifacts
from tfx_bsl.coders import tf_graph_record_decoder
@unittest.skipIf(tf.__version__ < '2',
'tfx-bsl installed does not have modules required to run this '
'test or Tensorflow is not 2.x.')
class DataViewProviderExecutorTest(tf.test.TestCase):
def setUp(self):
super(DataViewProviderExecutorTest, self).setUp()
# ../../../testdata
self._source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'testdata')
self._output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
def testExecutorModuleFileProvided(self):
input_dict = {}
output = standard_artifacts.DataView()
output.uri = os.path.join(self._output_data_dir, 'output_data_view')
output_dict = {'data_view': [output]}
exec_properties = {
'module_file':
os.path.join(self._source_data_dir,
'module_file/data_view_module.py'),
'create_decoder_func':
'create_simple_decoder',
}
executor = provider_executor.TfGraphDataViewProviderExecutor()
executor.Do(input_dict, output_dict, exec_properties)
loaded_decoder = tf_graph_record_decoder.load_decoder(output.uri)
self.assertIsInstance(
loaded_decoder, tf_graph_record_decoder.TFGraphRecordDecoder)
def testExecutorModuleFileNotProvided(self):
input_dict = {}
output = standard_artifacts.DataView()
output.uri = os.path.join(self._output_data_dir, 'output_data_view')
output_dict = {'data_view': [output]}
exec_properties = {
'module_file': None,
'create_decoder_func':
'%s.%s' % (data_view_module.create_simple_decoder.__module__,
data_view_module.create_simple_decoder.__name__),
}
executor = provider_executor.TfGraphDataViewProviderExecutor()
executor.Do(input_dict, output_dict, exec_properties)
loaded_decoder = tf_graph_record_decoder.load_decoder(output.uri)
self.assertIsInstance(
loaded_decoder, tf_graph_record_decoder.TFGraphRecordDecoder)
if __name__ == '__main__':
tf.test.main()
| 39.922078 | 80 | 0.727716 |
26f0603e6f4eab17e0c1abe88e2285905a67f3b0 | 34,450 | py | Python | test/run_tests.py | etiennekintzler/vowpal_wabb | 6b45001e89ed9b732fee0288d046c155f1c69e6d | [
"BSD-3-Clause"
] | 2 | 2021-06-03T22:30:52.000Z | 2021-07-18T15:45:44.000Z | test/run_tests.py | etiennekintzler/vowpal_wabb | 6b45001e89ed9b732fee0288d046c155f1c69e6d | [
"BSD-3-Clause"
] | null | null | null | test/run_tests.py | etiennekintzler/vowpal_wabb | 6b45001e89ed9b732fee0288d046c155f1c69e6d | [
"BSD-3-Clause"
] | 1 | 2021-07-18T15:45:51.000Z | 2021-07-18T15:45:51.000Z | import shutil
import threading
import argparse
import difflib
from pathlib import Path
import re
import os
import os.path
import subprocess
import sys
import traceback
import json
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
import socket
import runtests_parser
import runtests_flatbuffer_converter as fb_converter
class Color():
LIGHT_CYAN = '\033[96m'
LIGHT_GREEN = '\033[92m'
LIGHT_PURPLE = '\033[95m'
LIGHT_RED = '\033[91m'
ENDC = '\033[0m'
class NoColor():
LIGHT_CYAN = ''
LIGHT_GREEN = ''
LIGHT_PURPLE = ''
LIGHT_RED = ''
ENDC = ''
class Result(Enum):
SUCCESS = 1
FAIL = 2
SKIPPED = 3
def try_decode(binary_object):
return binary_object.decode("utf-8") if binary_object is not None else ""
# Returns true if they are close enough to be considered equal.
def fuzzy_float_compare(float_one, float_two, epsilon):
float_one = float(float_one)
float_two = float(float_two)
delta = abs(float_one - float_two)
if delta < epsilon:
return True
# Large number comparison code migrated from Perl RunTests
# We have a 'big enough' difference, but this difference
# may still not be meaningful in all contexts. Big numbers should be compared by ratio rather than
# by difference
# Must ensure we can divide (avoid div-by-0)
# If numbers are so small (close to zero),
# ($delta > $Epsilon) suffices for deciding that
# the numbers are meaningfully different
if abs(float_two) <= 1.0:
return False
# Now we can safely divide (since abs($word2) > 0) and determine the ratio difference from 1.0
ratio_delta = abs(float_one/float_two - 1.0)
return ratio_delta < epsilon
def find_in_path(paths, file_matcher, debug_file_name):
for path in paths:
absolute_path = os.path.abspath(str(path))
if os.path.isdir(absolute_path):
for file in os.listdir(absolute_path):
absolute_file = os.path.join(absolute_path, file)
if file_matcher(absolute_file):
return absolute_file
elif os.path.isfile(absolute_path):
if file_matcher(absolute_path):
return absolute_path
else:
# path does not exist
continue
raise ValueError("Couldn't find {}".format(debug_file_name))
def line_diff_text(text_one, file_name_one, text_two, file_name_two):
text_one = [line.strip() for line in text_one.strip().splitlines()]
text_two = [line.strip() for line in text_two.strip().splitlines()]
diff = difflib.unified_diff(
text_two, text_one, fromfile=file_name_two, tofile=file_name_one, lineterm='')
output_lines = []
for line in diff:
output_lines.append(line)
return len(output_lines) != 0, output_lines
def is_line_different(output_line, ref_line, epsilon):
output_tokens = re.split('[ \t:,@]+', output_line)
ref_tokens = re.split('[ \t:,@]+', ref_line)
if len(output_tokens) != len(ref_tokens):
return True, "Number of tokens different", False
found_close_floats = False
for output_token, ref_token in zip(output_tokens, ref_tokens):
output_is_float = is_float(output_token)
ref_is_float = is_float(ref_token)
if output_is_float and ref_is_float:
close = fuzzy_float_compare(output_token, ref_token, epsilon)
if close:
found_close_floats = True
continue
return True, "Floats don't match {} {}".format((output_token), (ref_token)), found_close_floats
else:
if output_token != ref_token:
return True, "Mismatch at token {} {}".format((output_token), (ref_token)), found_close_floats
return False, "", found_close_floats
def are_lines_different(output_lines, ref_lines, epsilon, fuzzy_compare=False):
if len(output_lines) != len(ref_lines):
return True, "Diff mismatch"
found_close_floats = False
for output_line, ref_line in zip(output_lines, ref_lines):
if fuzzy_compare:
# Some output contains '...', remove this for comparison.
output_line = output_line.replace("...", "")
ref_line = ref_line.replace("...", "")
is_different, reason, found_close_floats_temp = is_line_different(
output_line, ref_line, epsilon)
found_close_floats = found_close_floats or found_close_floats_temp
if is_different:
return True, reason
else:
if output_line != ref_line:
return True, "Lines differ - ref vs output: '{}' vs '{}'".format((ref_line), (output_line))
return False, "Minor float difference ignored" if found_close_floats else ""
def is_diff_different(output_content, output_file_name, ref_content, ref_file_name, epsilon, fuzzy_compare=False):
is_different, diff = line_diff_text(
output_content, output_file_name, ref_content, ref_file_name)
if not is_different:
return False, [], ""
output_lines = [line[1:] for line in diff if line.startswith(
'+') and not line.startswith('+++')]
ref_lines = [line[1:] for line in diff if line.startswith(
'-') and not line.startswith('---')]
# if number of lines different it is a fail
# if lines are the same, check if number of tokens the same
# if number of tokens the same, check if they pass float equality
is_different, reason = are_lines_different(
output_lines, ref_lines, epsilon, fuzzy_compare=fuzzy_compare)
diff = diff if is_different else []
return is_different, diff, reason
def are_outputs_different(output_content, output_file_name, ref_content, ref_file_name, overwrite, epsilon, fuzzy_compare=False):
is_different, diff, reason = is_diff_different(
output_content, output_file_name, ref_content, ref_file_name, epsilon, fuzzy_compare=fuzzy_compare)
if is_different and overwrite:
with open(ref_file_name, 'w') as writer:
writer.write(output_content)
if not is_different:
return False, [], reason
# If diff difference fails, fall back to a line by line compare to double check.
output_lines = [line.strip()
for line in output_content.strip().splitlines()]
ref_lines = [line.strip() for line in ref_content.strip().splitlines()]
is_different, reason = are_lines_different(
output_lines, ref_lines, epsilon, fuzzy_compare=fuzzy_compare)
diff = diff if is_different else []
return is_different, diff, reason
def is_float(value):
try:
float(value)
return True
except ValueError:
return False
def print_colored_diff(diff, color_enum):
for line in diff:
if line.startswith('+'):
print(color_enum.LIGHT_GREEN + line + color_enum.ENDC)
elif line.startswith('-'):
print(color_enum.LIGHT_RED + line + color_enum.ENDC)
elif line.startswith('^'):
print(line)
else:
print(line)
def is_valgrind_available():
return shutil.which("valgrind") is not None
def run_command_line_test(test_id,
command_line,
comparison_files,
overwrite,
epsilon,
is_shell,
input_files,
base_working_dir,
ref_dir,
completed_tests,
dependencies=None,
fuzzy_compare=False,
skip=False,
valgrind=False):
if skip:
completed_tests.report_completion(test_id, False)
return (test_id, {
"result": Result.SKIPPED,
"checks": {}
})
if dependencies is not None:
for dep in dependencies:
success = completed_tests.wait_for_completion_get_success(dep)
if not success:
completed_tests.report_completion(test_id, False)
return (test_id, {
"result": Result.SKIPPED,
"checks": {}
})
try:
if is_shell:
# Because we don't really know what shell scripts do, we need to run them in the tests dir.
working_dir = ref_dir
else:
working_dir = str(create_test_dir(
test_id, input_files, base_working_dir, ref_dir, dependencies=dependencies))
if valgrind:
valgrind_log_file_name = "test-{}.valgrind-err".format(test_id)
valgrind_log_file_path = os.path.join(
working_dir, valgrind_log_file_name)
command_line = "valgrind --quiet --error-exitcode=100 --track-origins=yes --leak-check=full --log-file='{}' {}".format(valgrind_log_file_name, command_line)
if is_shell:
cmd = command_line
else:
cmd = "{}".format((command_line)).split()
try:
result = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir,
shell=is_shell,
timeout=100)
except subprocess.TimeoutExpired as e:
stdout = try_decode(e.stdout)
stderr = try_decode(e.stderr)
checks = dict()
checks["timeout"] = {
"success": False,
"message": "{} timed out".format((e.cmd)),
"stdout": stdout,
"stderr": stderr
}
return (test_id, {
"result": Result.FAIL,
"checks": checks
})
return_code = result.returncode
stdout = try_decode(result.stdout)
stderr = try_decode(result.stderr)
checks = dict()
success = return_code == 0 or (return_code == 100 and is_shell and valgrind)
message = "Exited with {}".format((return_code))
if return_code == 100 and is_shell and valgrind:
message += " - valgrind failure ignored in shell test"
checks["exit_code"] = {
"success": success,
"message": message,
"stdout": stdout,
"stderr": stderr
}
if valgrind:
success = True
message = "OK"
diff = []
if return_code == 100:
if is_shell:
message = "valgrind failure ignored for a shell based test"
else:
success = False
message = "valgrind failed with command: '{}'".format(command_line)
diff = open(valgrind_log_file_path, 'r', encoding='utf-8').read().split("\n")
elif return_code != 0:
success = False
message = "non-valgrind failure error code",
checks["valgrind"] = {
"success": success,
"message": message,
"diff": diff
}
for output_file, ref_file in comparison_files.items():
if output_file == "stdout":
output_content = stdout
elif output_file == "stderr":
output_content = stderr
else:
output_file_working_dir = os.path.join(
working_dir, output_file)
if os.path.isfile(output_file_working_dir):
output_content = open(output_file_working_dir, 'r', encoding='utf-8').read()
else:
checks[output_file] = {
"success": False,
"message": "Failed to open output file: {}".format((output_file)),
"diff": []
}
continue
ref_file_ref_dir = os.path.join(ref_dir, ref_file)
if os.path.isfile(ref_file_ref_dir):
ref_content = open(ref_file_ref_dir, 'r', encoding='utf-8').read()
else:
checks[output_file] = {
"success": False,
"message": "Failed to open ref file: {}".format((ref_file)),
"diff": []
}
continue
are_different, diff, reason = are_outputs_different(output_content, output_file,
ref_content, ref_file, overwrite, epsilon, fuzzy_compare=fuzzy_compare)
if are_different:
message = "Diff not OK, {}".format((reason))
else:
message = "Diff OK, {}".format((reason))
checks[output_file] = {
"success": are_different == False,
"message": message,
"diff": diff
}
except:
completed_tests.report_completion(test_id, False)
raise
success = all(check["success"] == True for name, check in checks.items())
completed_tests.report_completion(test_id, success)
return (test_id, {
"result": Result.SUCCESS if success else Result.FAIL,
"checks": checks
})
class Completion():
def __init__(self):
self.lock = threading.Lock()
self.condition = threading.Condition(self.lock)
self.completed = dict()
def report_completion(self, id, success):
self.lock.acquire()
self.completed[id] = success
self.condition.notify_all()
self.lock.release()
def wait_for_completion_get_success(self, id):
def is_complete():
return id in self.completed
self.lock.acquire()
if not is_complete():
self.condition.wait_for(is_complete)
success = self.completed[id]
self.lock.release()
return success
def create_test_dir(test_id, input_files, test_base_dir, test_ref_dir, dependencies=None):
test_working_dir = Path(test_base_dir).joinpath(
"test_{}".format((test_id)))
Path(test_working_dir).mkdir(parents=True, exist_ok=True)
# Required as workaround until #2686 is fixed.
Path(test_working_dir.joinpath("models")).mkdir(
parents=True, exist_ok=True)
for f in input_files:
file_to_copy = None
search_paths = [Path(test_ref_dir).joinpath(f)]
if dependencies is not None:
search_paths.extend([Path(test_base_dir).joinpath(
"test_{}".format((x)), f) for x in dependencies])
search_paths.extend([Path(test_base_dir).joinpath(
"test_{}".format((x)), os.path.basename(f)) for x in dependencies]) # for input_files with a full path
for search_path in search_paths:
if search_path.exists() and not search_path.is_dir():
file_to_copy = search_path
break
if file_to_copy is None:
raise ValueError(
"{} couldn't be found for test {}".format((f), (test_id)))
test_dest_file = Path(test_working_dir).joinpath(f)
if file_to_copy == test_dest_file:
continue
Path(test_dest_file.parent).mkdir(parents=True, exist_ok=True)
# We always want to replace this file in case it is the output of another test
if test_dest_file.exists():
test_dest_file.unlink()
shutil.copyfile(str(file_to_copy), str(test_dest_file))
return test_working_dir
def find_vw_binary(test_base_ref_dir, user_supplied_bin_path):
vw_search_paths = [
Path(test_base_ref_dir).joinpath("../build/vowpalwabbit")
]
def is_vw_binary(file_path):
file_name = os.path.basename(file_path)
return file_name == "vw"
return find_or_use_user_supplied_path(
test_base_ref_dir=test_base_ref_dir,
user_supplied_bin_path=user_supplied_bin_path,
search_paths=vw_search_paths,
is_correct_bin_func=is_vw_binary,
debug_file_name="vw")
def find_spanning_tree_binary(test_base_ref_dir, user_supplied_bin_path):
spanning_tree_search_path = [
Path(test_base_ref_dir).joinpath("../build/cluster")
]
def is_spanning_tree_binary(file_path):
file_name = os.path.basename(file_path)
return file_name == "spanning_tree"
return find_or_use_user_supplied_path(
test_base_ref_dir=test_base_ref_dir,
user_supplied_bin_path=user_supplied_bin_path,
search_paths=spanning_tree_search_path,
is_correct_bin_func=is_spanning_tree_binary,
debug_file_name="spanning_tree")
def find_to_flatbuf_binary(test_base_ref_dir, user_supplied_bin_path):
to_flatbuff_search_path = [
Path(test_base_ref_dir).joinpath("../build/utl/flatbuffer")
]
def is_to_flatbuff_binary(file_path):
file_name = os.path.basename(file_path)
return file_name == "to_flatbuff"
return find_or_use_user_supplied_path(
test_base_ref_dir=test_base_ref_dir,
user_supplied_bin_path=user_supplied_bin_path,
search_paths=to_flatbuff_search_path,
is_correct_bin_func=is_to_flatbuff_binary,
debug_file_name="to_flatbuff")
def find_or_use_user_supplied_path(test_base_ref_dir, user_supplied_bin_path, search_paths, is_correct_bin_func, debug_file_name):
if user_supplied_bin_path is None:
return find_in_path(search_paths, is_correct_bin_func, debug_file_name)
else:
if not Path(user_supplied_bin_path).exists() or not Path(user_supplied_bin_path).is_file():
raise ValueError("Invalid {debug_file_name} binary path: {}".format(
(user_supplied_bin_path)))
return user_supplied_bin_path
def find_runtests_file(test_base_ref_dir):
def is_runtests_file(file_path):
file_name = os.path.basename(file_path)
return file_name == "RunTests"
possible_runtests_paths = [
Path(test_base_ref_dir)
]
return find_in_path(possible_runtests_paths, is_runtests_file, "RunTests")
def do_dirty_check(test_base_ref_dir):
result = subprocess.run(
"git clean --dry-run -d -x -e __pycache__".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=test_base_ref_dir,
timeout=10)
return_code = result.returncode
if return_code != 0:
print("Failed to run 'git clean --dry-run -d -x -e __pycache__'")
stdout = try_decode(result.stdout)
if len(stdout) != 0:
print("Error: Test dir is not clean, this can result in false negatives. To ignore this and continue anyway pass --ignore_dirty or pass --clean_dirty to clean")
print("'git clean --dry-run -d -x -e __pycache__' output:\n---")
print(stdout)
sys.exit(1)
def clean_dirty(test_base_ref_dir):
git_command = "git clean --force -d -x --exclude __pycache__"
result = subprocess.run(
git_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=test_base_ref_dir,
timeout=10)
if result.returncode != 0:
print("Failed to run {}".format(git_command))
def calculate_test_to_run_explicitly(explicit_tests, tests):
def get_deps(test_number, tests):
deps = set()
test_index = test_number - 1
if "depends_on" in tests[test_index]:
for dep in tests[test_index]["depends_on"]:
deps.add(dep)
deps = set.union(deps, get_deps(dep, tests))
return deps
tests_to_run_explicitly = set()
for test_number in explicit_tests:
if test_number > len(tests):
print("Error: Test number {} does not exist. There are {} tests in total.".format(test_number, len(tests)))
sys.exit(1)
tests_to_run_explicitly.add(test_number)
tests_to_run_explicitly = set.union(
tests_to_run_explicitly, get_deps(test_number, tests))
return list(tests_to_run_explicitly)
def convert_tests_for_flatbuffers(tests, to_flatbuff, working_dir, color_enum):
test_base_working_dir = str(working_dir)
if not Path(test_base_working_dir).exists():
Path(test_base_working_dir).mkdir(parents=True, exist_ok=True)
for test in tests:
test_id = test['id']
if 'vw_command' not in test:
print("{}Skipping test {} for flatbuffers, no vw command available{}".format(color_enum.LIGHT_CYAN, test_id, color_enum.ENDC))
test['skip'] = True
continue
if 'flatbuffer' in test['vw_command']:
print("{}Skipping test {} for flatbuffers, already a flatbuffer test{}".format(color_enum.LIGHT_CYAN, test_id, color_enum.ENDC))
test['skip'] = True
continue
if 'malformed' in test['vw_command']:
print("{}Skipping test {} for flatbuffers, malformed input{}".format(color_enum.LIGHT_CYAN, test_id, color_enum.ENDC))
test['skip'] = True
continue
if 'input_files' not in test:
print("{}Skipping test {} for flatbuffers, no input files{}".format(color_enum.LIGHT_CYAN, test_id, color_enum.ENDC))
test['skip'] = True
continue
if 'dictionary' in test['vw_command']:
print("{}Skipping test {} for flatbuffers, currently dictionaries are not supported{}".format(color_enum.LIGHT_CYAN, test_id, color_enum.ENDC))
test['skip'] = True
continue
if 'help' in test['vw_command']:
print("{}Skipping test {} for flatbuffers, --help test{}".format(color_enum.LIGHT_CYAN, test_id, color_enum.ENDC))
test['skip'] = True
continue
#todo: 300 understand why is it failing
# test 189, 312, 316, 318 and 319 depend on dsjson parser behaviour
# they can be enabled if we ignore diffing the --extra_metrics
# (324-326) deals with corrupted data, so cannot be translated to fb
# pdrop is not supported in fb, so 327-331 are excluded
if str(test_id) in ('300', '189', '312', '316', '318', '319', '324', '325', '326', '327', '328', '329', '330', '331'):
continue
# test id is being used as an index here, not necessarily a contract
depends_on_test = (
tests[int(test["depends_on"][0]) - 1] if "depends_on" in test else None
)
fb_test_converter = fb_converter.FlatbufferTest(test, working_dir, depends_on_test=depends_on_test)
fb_test_converter.to_flatbuffer(to_flatbuff, color_enum)
return tests
def main():
working_dir = Path.home().joinpath(".vw_runtests_working_dir")
test_ref_dir = Path(os.path.dirname(os.path.abspath(__file__)))
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-t', "--test", type=int,
action='append', nargs='+', help="Run specific tests and ignore all others")
parser.add_argument('-E', "--epsilon", type=float, default=1e-4,
help="Tolerance used when comparing floats. Only used if --fuzzy_compare is also supplied")
parser.add_argument('-e', "--exit_first_fail", action='store_true',
help="If supplied, will exit after the first failure")
parser.add_argument('-o', "--overwrite", action='store_true',
help="If test output differs from the reference file, overwrite the contents")
parser.add_argument('-f', "--fuzzy_compare", action='store_true',
help="Allow for some tolerance when comparing floats")
parser.add_argument("--ignore_dirty", action='store_true',
help="The test ref dir is checked for dirty files which may cause false negatives. Pass this flag to skip this check.")
parser.add_argument("--clean_dirty", action='store_true',
help="The test ref dir is checked for dirty files which may cause false negatives. Pass this flag to remove those files.")
parser.add_argument("--working_dir", default=working_dir,
help="Directory to save test outputs to")
parser.add_argument("--ref_dir", default=test_ref_dir,
help="Directory to read test input files from")
parser.add_argument('-j', "--jobs", type=int, default=4,
help="Number of tests to run in parallel")
parser.add_argument(
'--vw_bin_path', help="Specify VW binary to use. Otherwise, binary will be searched for in build directory")
parser.add_argument('--spanning_tree_bin_path',
help="Specify spanning tree binary to use. Otherwise, binary will be searched for in build directory")
parser.add_argument("--test_spec", type=str,
help="Optional. If passed the given JSON test spec will be used, " +
"otherwise a test spec will be autogenerated from the RunTests test definitions")
parser.add_argument('--no_color', action='store_true',
help="Don't print color ANSI escape codes")
parser.add_argument('--for_flatbuffers', action='store_true', help='Transform all of the test inputs into flatbuffer format and run tests')
parser.add_argument('--to_flatbuff_path', help="Specify to_flatbuff binary to use. Otherwise, binary will be searched for in build directory")
parser.add_argument('--include_flatbuffers', action='store_true', help="Don't skip the explicit flatbuffer tests from default run_tests run")
parser.add_argument('--valgrind', action='store_true', help="Run tests with Valgrind")
args = parser.parse_args()
if args.for_flatbuffers and args.working_dir == working_dir: # user did not supply dir
args.working_dir = Path.home().joinpath(".vw_fb_runtests_working_dir")
test_base_working_dir = str(args.working_dir)
test_base_ref_dir = str(args.ref_dir)
color_enum = NoColor if args.no_color else Color
if args.valgrind and not is_valgrind_available():
print("Can't find valgrind")
sys.exit(1)
# Flatten nested lists for arg.test argument.
# Ideally we would have used action="extend", but that was added in 3.8
if args.test is not None:
args.test = [item for sublist in args.test for item in sublist]
if Path(test_base_working_dir).is_file():
print("--working_dir='{}' cannot be a file".format((test_base_working_dir)))
sys.exit(1)
if not Path(test_base_working_dir).exists():
Path(test_base_working_dir).mkdir(parents=True, exist_ok=True)
if not Path(test_base_ref_dir):
print("--ref_dir='{}' doesn't exist".format((test_base_ref_dir)))
sys.exit(1)
if args.clean_dirty:
clean_dirty(test_base_ref_dir)
if not args.ignore_dirty:
do_dirty_check(test_base_ref_dir)
print("Testing on: hostname={}, OS={}, num_jobs={}".format(
(socket.gethostname()), (sys.platform), (args.jobs)))
vw_bin = find_vw_binary(test_base_ref_dir, args.vw_bin_path)
print("Using VW binary: {}".format((vw_bin)))
spanning_tree_bin = find_spanning_tree_binary(
test_base_ref_dir, args.spanning_tree_bin_path)
print("Using spanning tree binary: {}".format((spanning_tree_bin)))
if args.test_spec is None:
runtests_file = find_runtests_file(test_base_ref_dir)
tests = runtests_parser.file_to_obj(runtests_file)
tests = [x.__dict__ for x in tests]
print("Tests parsed from RunTests file: {}".format((runtests_file)))
else:
json_test_spec_content = open(args.test_spec).read()
tests = json.loads(json_test_spec_content)
print("Tests read from test spec file: {}".format((args.test_spec)))
print()
if args.for_flatbuffers:
to_flatbuff = find_to_flatbuf_binary(test_base_ref_dir, args.to_flatbuff_path)
tests = convert_tests_for_flatbuffers(tests, to_flatbuff, args.working_dir, color_enum)
# Because bash_command based tests don't specify all inputs and outputs they must operate in the test directory directly.
# This means that if they run in parallel they can break each other by touching the same files.
# Until we can move to a test spec which allows us to specify the input/output we need to add dependencies between them here.
prev_bash_test = None
for test in tests:
test_number = test["id"]
if "bash_command" in test:
if prev_bash_test is not None:
if "depends_on" not in tests[test_number - 1]:
tests[test_number - 1]["depends_on"] = []
tests[test_number - 1]["depends_on"].append(prev_bash_test)
prev_bash_test = test_number
tasks = []
completed_tests = Completion()
tests_to_run_explicitly = None
if args.test is not None:
tests_to_run_explicitly = calculate_test_to_run_explicitly(args.test, tests)
print("Running tests: {}".format((list(tests_to_run_explicitly))))
if len(args.test) != len(tests_to_run_explicitly):
print(
"Note: due to test dependencies, more than just tests {} must be run".format((args.test)))
executor = ThreadPoolExecutor(max_workers=args.jobs)
for test in tests:
test_number = test["id"]
if tests_to_run_explicitly is not None and test_number not in tests_to_run_explicitly:
continue
dependencies = None
if "depends_on" in test:
dependencies = test["depends_on"]
input_files = []
if "input_files" in test:
input_files = test["input_files"]
is_shell = False
if "bash_command" in test:
if sys.platform == "win32":
print(
"Skipping test number '{}' as bash_command is unsupported on Windows.".format((test_number)))
continue
command_line = test['bash_command'].format(
VW=vw_bin, SPANNING_TREE=spanning_tree_bin)
is_shell = True
elif "vw_command" in test:
command_line = "{} {}".format((vw_bin), (test['vw_command']))
if not args.include_flatbuffers and not args.for_flatbuffers:
if '--flatbuffer' in test['vw_command']:
print("{} is a flatbuffer test, can be run with --include_flatbuffers flag, Skipping...".format(test_number))
continue
else:
print("{} is an unknown type. Skipping...".format((test_number)))
continue
tasks.append(executor.submit(run_command_line_test,
test_number,
command_line,
test["diff_files"],
overwrite=args.overwrite,
epsilon=args.epsilon,
is_shell=is_shell,
input_files=input_files,
base_working_dir=test_base_working_dir,
ref_dir=test_base_ref_dir,
completed_tests=completed_tests,
dependencies=dependencies,
fuzzy_compare=args.fuzzy_compare,
skip=test['skip'] if "skip" in test else False,
valgrind=args.valgrind))
num_success = 0
num_fail = 0
num_skip = 0
while len(tasks) > 0:
try:
test_number, result = tasks[0].result()
except Exception:
print("----------------")
traceback.print_exc()
num_fail += 1
print("----------------")
if args.exit_first_fail:
for task in tasks:
task.cancel()
sys.exit(1)
continue
finally:
tasks.pop(0)
success_text = "{}Success{}".format(
(color_enum.LIGHT_GREEN), (color_enum.ENDC))
fail_text = "{}Fail{}".format(
(color_enum.LIGHT_RED), (color_enum.ENDC))
skipped_text = "{}Skip{}".format(
(color_enum.LIGHT_CYAN), (color_enum.ENDC))
num_success += result['result'] == Result.SUCCESS
num_fail += result['result'] == Result.FAIL
num_skip += result['result'] == Result.SKIPPED
if result['result'] == Result.SUCCESS:
result_text = success_text
elif result['result'] == Result.FAIL:
result_text = fail_text
else:
result_text = skipped_text
print("Test {}: {}".format((test_number), (result_text)))
if not result['result'] == Result.SUCCESS:
test = tests[test_number - 1]
print("\tDescription: {}".format((test['desc'])))
if 'vw_command' in test:
print("\tvw_command: \"{}\"".format((test['vw_command'])))
if 'bash_command' in test:
print("\tbash_command: \"{}\"".format((test['bash_command'])))
for name, check in result["checks"].items():
# Don't print exit_code check as it is too much noise.
if check['success'] and name == "exit_code":
continue
print(
"\t[{}] {}: {}".format((name), (success_text if check['success'] else fail_text), (check['message'])))
if not check['success']:
if name == "exit_code":
print("---- stdout ----")
print(result["checks"]["exit_code"]["stdout"])
print("---- stderr ----")
print(result["checks"]["exit_code"]["stderr"])
if "diff" in check:
print()
print_colored_diff(check["diff"], color_enum)
print()
if args.exit_first_fail:
for task in tasks:
task.cancel()
sys.exit(1)
print("-----")
print("# Success: {}".format((num_success)))
print("# Fail: {}".format((num_fail)))
print("# Skip: {}".format((num_skip)))
if num_fail > 0:
sys.exit(1)
if __name__ == "__main__":
main()
| 39.7806 | 168 | 0.608331 |
7522bef6950b9c81a129690880bc9f90330c9775 | 18 | py | Python | tests/__init__.py | Parquery/mapry | 93515307f9eba8447fe64b0ac7cc68b2d07205a7 | [
"MIT"
] | 11 | 2019-06-26T05:56:41.000Z | 2021-03-28T16:44:16.000Z | tests/__init__.py | Parquery/mapry | 93515307f9eba8447fe64b0ac7cc68b2d07205a7 | [
"MIT"
] | 4 | 2019-10-18T14:43:59.000Z | 2020-04-02T19:12:07.000Z | tests/__init__.py | Parquery/mapry | 93515307f9eba8447fe64b0ac7cc68b2d07205a7 | [
"MIT"
] | 3 | 2019-06-17T07:39:03.000Z | 2020-04-01T14:01:23.000Z | """Test mapry."""
| 9 | 17 | 0.5 |
4fad0dae968a9706ea5623c8122e008bfe03a283 | 259 | py | Python | maro/cli/k8s/template.py | KangFengjian/maro | 2694a75731d5174ba5b33780670ba38d776d8c5a | [
"MIT"
] | 1 | 2020-09-30T09:31:05.000Z | 2020-09-30T09:31:05.000Z | maro/cli/k8s/template.py | KangFengjian/maro | 2694a75731d5174ba5b33780670ba38d776d8c5a | [
"MIT"
] | 2 | 2020-12-15T09:13:43.000Z | 2020-12-16T08:02:41.000Z | maro/cli/k8s/template.py | KangFengjian/maro | 2694a75731d5174ba5b33780670ba38d776d8c5a | [
"MIT"
] | 1 | 2021-10-01T09:17:43.000Z | 2021-10-01T09:17:43.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.cli.k8s.executors.k8s_azure_executor import K8sAzureExecutor
def template(export_path: str, **kwargs):
K8sAzureExecutor.template(
export_path=export_path
)
| 21.583333 | 70 | 0.756757 |
140754bde0eab1ea0d1dc81567f06ea6f7a7d43b | 12,479 | py | Python | tests/test_unique_queues.py | ondrejkajinek/py-rq | 0fb89b5cb23f3410057e23b85fa9deb5ddebd275 | [
"MIT"
] | 2 | 2017-01-10T06:32:40.000Z | 2018-11-17T05:42:54.000Z | tests/test_unique_queues.py | ondrejkajinek/py-rq | 0fb89b5cb23f3410057e23b85fa9deb5ddebd275 | [
"MIT"
] | 3 | 2016-05-13T09:02:16.000Z | 2021-05-07T10:34:21.000Z | tests/test_unique_queues.py | ondrejkajinek/py-rq | 0fb89b5cb23f3410057e23b85fa9deb5ddebd275 | [
"MIT"
] | 3 | 2017-09-13T15:17:44.000Z | 2021-05-07T07:02:44.000Z | import unittest
import time
import socket
import os
from unittest.mock import patch
from redis import Redis
from pyrq.unique_queues import UniqueQueue, CHUNK_SIZE
QUEUE_NAME = os.getenv('QUEUE_NAME', 'test-queue')
PROCESSING_QUEUE_SCHEMA = QUEUE_NAME + '-processing-{}[{}][{}]'
TIMEOUT_QUEUE = QUEUE_NAME + '-timeouts'
SET_QUEUE_NAME = QUEUE_NAME + '-unique'
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = int(os.getenv('REDIS_PORT', 6379))
REDIS_DB = int(os.getenv('REDIS_DB', 0))
REDIS_PASSWORD = os.getenv('REDIS_PASS', None)
@patch('pyrq.helpers.wait_for_synced_slaves')
class TestUniqueQueue(unittest.TestCase):
def setUp(self):
synced_slaves_count = 1
synced_slaves_timeout = 2
self.client = Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, password=REDIS_PASSWORD,
decode_responses=True)
self.client.delete(QUEUE_NAME)
self.queue_instance = UniqueQueue(QUEUE_NAME, self.client, synced_slaves_enabled=True,
synced_slaves_count=synced_slaves_count,
synced_slaves_timeout=synced_slaves_timeout)
self.processing_queue = self.queue_instance.processing_queue_name
self.timeouts_hash = self.queue_instance.timeouts_hash_name
def tearDown(self):
self.client.eval("""
local keys = redis.call("keys", ARGV[1])
for i, key in ipairs(keys) do
redis.call("del", key)
end
""", 0, QUEUE_NAME + '*')
def test_add_items(self, slaves_mock):
items = ['first-message', 'second-message', 'first-message']
self.queue_instance.add_items(items)
self.assertEqual(items[0], self.client.rpop(QUEUE_NAME))
self.assertEqual(items[1], self.client.rpop(QUEUE_NAME))
self.assertEqual(None, self.client.rpop(QUEUE_NAME))
self.assertEqual(1, slaves_mock.call_count)
def test_add_items_with_multiple_chunks(self, slaves_mock):
chunks_count = 3
items = ['message-{}'.format(i) for i in range(chunks_count * CHUNK_SIZE)]
self.queue_instance.add_items(items)
self.assertEqual(1, slaves_mock.call_count)
def test_add_item(self, slaves_mock):
items = [3, 5, 3, 1]
for i in items:
self.queue_instance.add_item(i)
actual_items = self.client.lrange(QUEUE_NAME, 0, 5)
for item in ['5', '3', '1']:
self.assertTrue(item in actual_items)
self.assertEqual(4, slaves_mock.call_count)
def test_get_items(self, slaves_mock):
for i in [3, 5, 3, 1]:
self.client.lpush(QUEUE_NAME, i)
self.assertEqual(['3', '5', '3'], self.queue_instance.get_items(3))
self.assertEqual(['1'], self.queue_instance.get_items(1))
self.assertEqual([], self.queue_instance.get_items(1))
self.client.delete(self.queue_instance.processing_queue_name)
self.client.delete(self.timeouts_hash)
self.assertEqual(0, slaves_mock.call_count)
def test_ack_item(self, slaves_mock):
self.client.lpush(self.processing_queue, *[1, 5, 5, 3])
saved_time = int(time.time())
self.client.hset(self.timeouts_hash, self.processing_queue, saved_time)
for i in [1, 5, 1]:
self.queue_instance.ack_item(i)
self.assertEqual(['3', '5'], self.client.lrange(self.processing_queue, 0, 5))
self.assertEqual({self.processing_queue: str(saved_time)}, self.client.hgetall(self.timeouts_hash))
for i in [5, 3]:
self.queue_instance.ack_item(i)
self.assertEqual(0, self.client.llen(self.processing_queue))
self.assertEqual(5, slaves_mock.call_count)
def test_ack_items(self, slaves_mock):
self.client.lpush(self.processing_queue, *[1, 5, 5, 3, 6, 7])
saved_time = int(time.time())
self.client.hset(self.timeouts_hash, self.processing_queue, saved_time)
self.queue_instance.ack_items([1, 5])
self.queue_instance.ack_items([1])
self.assertEqual(['7', '6', '3', '5'], self.client.lrange(self.processing_queue, 0, 5))
self.assertEqual({self.processing_queue: str(saved_time)}, self.client.hgetall(self.timeouts_hash))
self.queue_instance.ack_items([5, 3, 6])
self.queue_instance.ack_items([7])
self.assertEqual(0, self.client.llen(self.processing_queue))
self.assertEqual(4, slaves_mock.call_count)
def test_reject_item(self, slaves_mock):
self.client.lpush(self.processing_queue, *[1, 5, 5, 3])
saved_time = int(time.time())
self.client.hset(self.timeouts_hash, self.processing_queue, saved_time)
self.queue_instance.reject_item(1)
self.queue_instance.reject_item(5)
self.queue_instance.reject_item(1)
self.assertEqual(['1', '5'], self.client.lrange(QUEUE_NAME, 0, 5))
self.assertEqual(['3', '5'], self.client.lrange(self.processing_queue, 0, 5))
self.assertEqual({self.processing_queue: str(saved_time)}, self.client.hgetall(self.timeouts_hash))
self.queue_instance.reject_item(3)
self.queue_instance.reject_item(5)
self.assertEqual(['1', '5', '3'], self.client.lrange(QUEUE_NAME, 0, 5))
self.assertEqual(0, self.client.llen(self.processing_queue))
self.assertEqual(5, slaves_mock.call_count)
def test_reject_items(self, slaves_mock):
self.client.lpush(self.processing_queue, *[1, 5, 5, 3, 6, 7])
saved_time = int(time.time())
self.client.hset(self.timeouts_hash, self.processing_queue, saved_time)
self.queue_instance.reject_items([1, 5])
self.queue_instance.reject_items([5])
self.queue_instance.reject_items([9])
self.assertEqual(['5', '1'], self.client.lrange(QUEUE_NAME, 0, 5))
self.assertEqual(['7', '6', '3'], self.client.lrange(self.processing_queue, 0, 5))
self.assertEqual({self.processing_queue: str(saved_time)}, self.client.hgetall(self.timeouts_hash))
self.queue_instance.reject_items([3, 6, 7])
self.assertEqual(['5', '1', '7', '6', '3'], self.client.lrange(QUEUE_NAME, 0, 10))
self.assertEqual(0, self.client.llen(self.processing_queue))
self.assertEqual(4, slaves_mock.call_count)
def test_integration(self, slaves_mock):
self.queue_instance.add_items([1, 5, 2, 6, 7])
self.assertEqual(['1', '5', '2', '6', '7'], self.queue_instance.get_items(5))
self.assertEqual([], self.queue_instance.get_items(1))
self.queue_instance.ack_items([1, 5])
self.assertEqual([], self.queue_instance.get_items(1))
self.queue_instance.reject_items([2, 6, 7])
self.assertEqual(['2', '6', '7'], self.queue_instance.get_items(5))
self.queue_instance.ack_items([2, 6, 7])
self.assertEqual(0, self.client.llen(QUEUE_NAME))
self.assertEqual(4, slaves_mock.call_count)
def test_re_enqueue_timeout_items(self, slaves_mock):
microtimestamp = time.time()
timestamp = int(microtimestamp)
processing_queue1 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 15)
self.client.lpush(processing_queue1, 1, 5, 3)
self.client.hset(TIMEOUT_QUEUE, processing_queue1, microtimestamp - 15)
processing_queue2 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 10)
self.client.lpush(processing_queue2, 1, 4, 6)
self.client.hset(TIMEOUT_QUEUE, processing_queue2, microtimestamp - 10)
processing_queue3 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 5)
self.client.lpush(processing_queue3, 4, 7, 8)
self.client.hset(TIMEOUT_QUEUE, processing_queue3, microtimestamp - 5)
self.queue_instance.re_enqueue_timeout_items(7)
self.assertEqual(['6', '4', '3', '5', '1'], self.client.lrange(QUEUE_NAME, 0, 10))
self.assertEqual(['8', '7', '4'], self.client.lrange(processing_queue3, 0, 5))
self.assertEqual({processing_queue3: str(microtimestamp - 5)}, self.client.hgetall(TIMEOUT_QUEUE))
self.assertEqual([QUEUE_NAME, processing_queue3, TIMEOUT_QUEUE, SET_QUEUE_NAME],
sorted(self.client.keys(QUEUE_NAME + '*')))
self.queue_instance.re_enqueue_timeout_items(0)
self.assertEqual(['6', '3', '5', '1', '8', '7', '4'], self.client.lrange(QUEUE_NAME, 0, 10))
self.assertEqual([QUEUE_NAME, SET_QUEUE_NAME], sorted(self.client.keys(QUEUE_NAME + '*')))
self.assertEqual(2, slaves_mock.call_count)
def test_re_enqueue_all_times(self, slaves_mock):
microtimestamp = time.time()
timestamp = int(microtimestamp)
processing_queue1 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 15)
self.client.lpush(processing_queue1, 1, 5, 3)
self.client.hset(TIMEOUT_QUEUE, processing_queue1, microtimestamp - 15)
processing_queue2 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 10)
self.client.lpush(processing_queue2, 1, 4, 6)
self.client.hset(TIMEOUT_QUEUE, processing_queue2, microtimestamp - 10)
processing_queue3 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 5)
self.client.lpush(processing_queue3, 4, 7, 8)
self.client.hset(TIMEOUT_QUEUE, processing_queue3, microtimestamp - 5)
self.queue_instance.re_enqueue_all_items()
self.assertEqual(['8', '7', '6', '4', '3', '5', '1'], self.client.lrange(QUEUE_NAME, 0, 10))
self.assertEqual([QUEUE_NAME, SET_QUEUE_NAME], sorted(self.client.keys(QUEUE_NAME + '*')))
self.assertEqual(1, slaves_mock.call_count)
def test_drop_timeout_items(self, slaves_mock):
microtimestamp = time.time()
timestamp = int(microtimestamp)
processing_queue1 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 15)
self.client.lpush(processing_queue1, 1, 5, 3)
self.client.hset(TIMEOUT_QUEUE, processing_queue1, microtimestamp - 15)
processing_queue2 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 10)
self.client.lpush(processing_queue2, 1, 4, 6)
self.client.hset(TIMEOUT_QUEUE, processing_queue2, microtimestamp - 10)
processing_queue3 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 5)
self.client.lpush(processing_queue3, 4, 7, 8)
self.client.hset(TIMEOUT_QUEUE, processing_queue3, microtimestamp - 5)
self.queue_instance.drop_timeout_items(7)
self.assertEqual([], self.client.lrange(QUEUE_NAME, 0, 5))
self.assertEqual(['8', '7', '4'], self.client.lrange(processing_queue3, 0, 5))
self.assertEqual({processing_queue3: str(microtimestamp - 5)}, self.client.hgetall(TIMEOUT_QUEUE))
self.assertEqual([processing_queue3, TIMEOUT_QUEUE], sorted(self.client.keys(QUEUE_NAME + '*')))
self.queue_instance.drop_timeout_items(0)
self.assertEqual([], self.client.lrange(QUEUE_NAME, 0, 10))
self.assertEqual([], self.client.keys(QUEUE_NAME + '*'))
self.assertEqual(2, slaves_mock.call_count)
def test_drop_all_items(self, slaves_mock):
microtimestamp = time.time()
timestamp = int(microtimestamp)
processing_queue1 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 15)
self.client.lpush(processing_queue1, 1, 5, 3)
self.client.hset(TIMEOUT_QUEUE, processing_queue1, microtimestamp - 15)
processing_queue2 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 10)
self.client.lpush(processing_queue2, 1, 4, 6)
self.client.hset(TIMEOUT_QUEUE, processing_queue2, microtimestamp - 10)
processing_queue3 = PROCESSING_QUEUE_SCHEMA.format(socket.gethostname(), os.getpid(), timestamp - 5)
self.client.lpush(processing_queue3, 4, 7, 8)
self.client.hset(TIMEOUT_QUEUE, processing_queue3, microtimestamp - 5)
self.queue_instance.drop_all_items()
self.assertEqual([], self.client.lrange(QUEUE_NAME, 0, 10))
self.assertEqual([], self.client.keys(QUEUE_NAME + '*'))
self.assertEqual(1, slaves_mock.call_count)
if __name__ == 'main':
unittest.main()
| 46.218519 | 109 | 0.675134 |
530affd163d5fc899995e9d151d8956b5bebfb25 | 2,065 | py | Python | zql-backend/grammar_persistence/graph.py | Zubdano/zql | 2e1ec981da60cfd41ad743bc38950c97e506c2c8 | [
"MIT"
] | null | null | null | zql-backend/grammar_persistence/graph.py | Zubdano/zql | 2e1ec981da60cfd41ad743bc38950c97e506c2c8 | [
"MIT"
] | 4 | 2017-09-25T04:35:01.000Z | 2017-12-18T04:15:04.000Z | zql-backend/grammar_persistence/graph.py | Zubdano/zql | 2e1ec981da60cfd41ad743bc38950c97e506c2c8 | [
"MIT"
] | null | null | null | def construct_graph(grammar):
"""Constructs a graph representation of the input grammar.
The graph contains only the LHS rules, and doesn't containt any free variables.
Returns in an adjacency list format.
"""
graph = {}
rules = filter(lambda lhs: grammar[lhs]['type'] == 'rule', grammar.keys())
rules = set(rules)
for rule in rules:
graph[rule] = []
for value in iter_rhs(grammar[rule]['value']):
if value in rules:
graph[rule].append(value)
return dict(graph)
def verify_structure(graph):
"""Verifies the structure of the graph, if it is a connected DAG."""
ordering = []
try:
ordering = topological_sort(graph)
except CycleFound as e:
return False, 'Cycle found with rules {}'.format(', '.join(e.cycle)), None
# Now, run a DFS to get order from a root node.
ordering2 = []
dfs(ordering[0], graph, set(), ordering2, set())
if len(ordering) != len(ordering2):
return False, 'Multiple root nodes', ordering
return True, '', ordering
def iter_rhs(values):
"""Flattens the RHS values array which is passed by the frontend."""
for value in values:
if nested(value):
yield from iter_rhs(value)
else:
yield value
###
### PRIVATE HELPER FUNCTIONS
###
class CycleFound(Exception):
def __init__(self, cycle):
super().__init__()
self.cycle = cycle
def nested(obj):
return hasattr(obj, '__iter__') and not isinstance(obj, str)
def dfs(v, graph, visited, ordering, current):
if v in current:
raise CycleFound(current)
current.add(v)
for u in graph[v]:
if u not in visited:
dfs(u, graph, visited, ordering, current)
current.remove(v)
visited.add(v)
ordering.append(v)
def topological_sort(graph):
visited = set()
ordering = []
for v in graph:
if v not in visited:
dfs(v, graph, visited, ordering, set())
return ordering[::-1]
| 23.465909 | 83 | 0.602906 |
342f191d50a47288d145ba3b82b0d89455fcf4a4 | 317 | py | Python | Languages/Python3/Practise/0004/0004.py | PaddyHuang/Basic | b304a8b7a24e216486293182fe86325e6a7746ff | [
"MIT"
] | null | null | null | Languages/Python3/Practise/0004/0004.py | PaddyHuang/Basic | b304a8b7a24e216486293182fe86325e6a7746ff | [
"MIT"
] | null | null | null | Languages/Python3/Practise/0004/0004.py | PaddyHuang/Basic | b304a8b7a24e216486293182fe86325e6a7746ff | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
# Description: 任一个英文的纯文本文件,统计其中的单词出现的个数。
# Author: Paddy Huang
# Date: 2020-03-28 22:35
import re
def func(file_path):
file = open(file_path, 'r').read()
file = re.findall(r'[\w\-\_\'\.]+', file)
print(len(file))
if __name__ == '__main__':
file_path = 'English.txt'
func(file_path)
| 17.611111 | 42 | 0.66877 |
58392a7a0d3ceb90fa9b59bde18d6b6d31257982 | 4,028 | py | Python | processing/dbmanageragent.py | paganellif/sctm-project-paganelli-2021 | 810d9a82c653db8ec9db117c57f35b64cc14aaaf | [
"MIT"
] | null | null | null | processing/dbmanageragent.py | paganellif/sctm-project-paganelli-2021 | 810d9a82c653db8ec9db117c57f35b64cc14aaaf | [
"MIT"
] | null | null | null | processing/dbmanageragent.py | paganellif/sctm-project-paganelli-2021 | 810d9a82c653db8ec9db117c57f35b64cc14aaaf | [
"MIT"
] | null | null | null | import json
from spade.agent import Agent
from spade.behaviour import State
from spade.message import Message
from spade.template import Template
from processing.dbmanagerstrategy import DBManagerStrategy
from base.fsm import BaseFSM
from processing.dbconnfactory import DBConnFactory
import util.config as cfg
from util.logger import LoggerImpl
class InsertSensorValue(State):
"""
Behavior where the agent actually stores the detections made by the SensorAgents of the network
"""
async def run(self):
msg = await self.receive(timeout=10)
if msg:
if self.agent.sensor_values.match(msg):
body: dict = json.loads(str(msg.body))
if self.agent.check_strategy.check_values(body):
if cfg.logging["enabled"]:
self.agent.log.log("Message received from " + str(msg.sender) + ": " + str(msg.body))
db_coll = cfg.collections["sensors_detections"]
res = self.agent.db_conn.insert_sensor_value(str(msg.sender), body, db_coll)
if body.get(cfg.trigger_events["event"]):
if cfg.logging["enabled"]:
self.agent.log.log("EVENT DETECTED -----> TO BE CHECKED")
self.set("_id_read", res.inserted_id)
self.set("agent_jid", str(msg.sender))
self.set_next_state("STATE_TWO")
else:
self.set_next_state("STATE_ONE")
else:
self.set_next_state("STATE_ONE")
elif self.agent.config_update.match(msg):
if cfg.logging["enabled"]:
self.agent.log.log("Message received from " + str(msg.sender) + ": " + str(msg.body))
self.agent.db_conn.insert_node_config(str(msg.sender), json.loads(str(msg.body)))
self.set_next_state("STATE_ONE")
else:
self.set_next_state("STATE_ONE")
else:
self.set_next_state("STATE_ONE")
class InformTrigger(State):
"""
Behavior where the agent informs the TriggerAgent agent when an interesting event is detected
"""
async def run(self):
msg = Message(to=cfg.jid["trigger_agent"])
msg.set_metadata("performative", "inform")
document: dict = {"_id_read": str(self.get("_id_read")), "agent_jid": self.get("agent_jid")}
msg.body = json.dumps(document)
if cfg.logging["enabled"]:
self.agent.log.log("Message sent to trigger")
await self.send(msg)
self.set_next_state("STATE_ONE")
class DBManagerAgentBehav(BaseFSM):
"""
General agent behavior
"""
async def on_start(self):
await super().on_start()
self.add_state(name="STATE_ONE", state=InsertSensorValue(), initial=True)
self.add_state(name="STATE_TWO", state=InformTrigger())
self.add_transition(source="STATE_ONE", dest="STATE_ONE")
self.add_transition(source="STATE_ONE", dest="STATE_TWO")
self.add_transition(source="STATE_TWO", dest="STATE_ONE")
class DBManagerAgent(Agent):
def __init__(self, agent_jid: str, password: str):
"""
:param agent_jid: unique identifier of the agent
:param password: string used for agent authentication on the xmpp server
"""
super().__init__(agent_jid, password)
self.db_conn = DBConnFactory().create_manager_db_connector()
self.check_strategy: DBManagerStrategy
self.log = LoggerImpl(str(self.jid))
self.sensor_values = Template()
self.config_update = Template()
async def setup(self):
self.sensor_values.set_metadata("performative", "inform")
self.sensor_values.set_metadata("service", "detection")
self.config_update.set_metadata("performative", "inform")
self.config_update.set_metadata("service", "config")
self.add_behaviour(DBManagerAgentBehav())
| 40.686869 | 109 | 0.623883 |
69f19c069a82094ff2aa84fb190b86fd553e913c | 1,885 | py | Python | pyQBTNs/src/Tensor_Train_Recursive.py | MaksimEkin/pyQBTNs | fab4db14c0b7c883f51efa4a79d8f9b3b6a6d6f8 | [
"BSD-3-Clause"
] | null | null | null | pyQBTNs/src/Tensor_Train_Recursive.py | MaksimEkin/pyQBTNs | fab4db14c0b7c883f51efa4a79d8f9b3b6a6d6f8 | [
"BSD-3-Clause"
] | null | null | null | pyQBTNs/src/Tensor_Train_Recursive.py | MaksimEkin/pyQBTNs | fab4db14c0b7c883f51efa4a79d8f9b3b6a6d6f8 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import math
from .Matrix_Factorization import Matrix_Factorization
from .tensor_utils import split_TT
class Tensor_Train_Recursive():
def __init__(self, **parameters):
"""
Parameters
----------
**parameters : dictionary
Passed from pyQBTNs from initialization.
Returns
-------
None.
"""
self.MF = Matrix_Factorization(**parameters)
def train(self, T, dimensions, ranks):
"""
Factor the input tensor using the Tensor_Train_Recursive algorithm
Parameters
----------
T : numpy array
Tensor to be factored.
dimensions : list
tensor dimensions.
ranks : list
factorization ranks.
Returns
-------
TTlist : list
List of factors.
"""
ord = len(dimensions)
split_point = math.ceil((ord-int(dimensions[-1]==ranks[-1]))/2)
d1, dims1, ranks1 = split_TT(T, dimensions, ranks, range(split_point))
d2, dims2, ranks2 = split_TT(T, dimensions, ranks, range(split_point,ord))
reshaped_M = np.reshape(T, (d1, d2))
M1, M2 = self.MF.train(reshaped_M, ranks1[-1]) # M1(n0*..*n_(split-1),r), M2(r,n_split*...*n_ord)
dims1.append(ranks[split_point-1])
ranks1.append(ranks[split_point-1])
if (split_point) > 2 or (split_point>1 and dims1[0]>ranks[0]):
TTlist1 = self.train(M1, dims1, ranks1)
else:
TTlist1 = [np.reshape(M1, dims1)]
dims2 = [ranks[split_point-1]] + dims2
ranks2 = [ranks[split_point-1]] + ranks2
if (len(dims2) > 3) or (len(dims2) > 2 and dims2[-1]>ranks[-1]):
TTlist2 = self.train(M2, dims2, ranks2)
else:
TTlist2 = [np.reshape(M2, dims2)]
return(TTlist1 + TTlist2)
| 29.453125 | 106 | 0.561804 |
1a4315fc54e284bc8aff3b8ae0e6f942a3cfc74d | 3,279 | py | Python | topology_sdk/model/container/resource_requirements_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | topology_sdk/model/container/resource_requirements_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | topology_sdk/model/container/resource_requirements_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: resource_requirements.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from topology_sdk.model.container import resource_list_pb2 as topology__sdk_dot_model_dot_container_dot_resource__list__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='resource_requirements.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x1bresource_requirements.proto\x12\tcontainer\x1a\x30topology_sdk/model/container/resource_list.proto\"j\n\x14ResourceRequirements\x12\'\n\x06limits\x18\x01 \x01(\x0b\x32\x17.container.ResourceList\x12)\n\x08requests\x18\x02 \x01(\x0b\x32\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[topology__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,])
_RESOURCEREQUIREMENTS = _descriptor.Descriptor(
name='ResourceRequirements',
full_name='container.ResourceRequirements',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='limits', full_name='container.ResourceRequirements.limits', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='requests', full_name='container.ResourceRequirements.requests', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=198,
)
_RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = topology__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
_RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = topology__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEREQUIREMENTS,
'__module__' : 'resource_requirements_pb2'
# @@protoc_insertion_point(class_scope:container.ResourceRequirements)
})
_sym_db.RegisterMessage(ResourceRequirements)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.506024 | 372 | 0.798719 |
8027b5d9e74ab6e57a4eccb125f723d80b4e9d24 | 453 | py | Python | dgcrm/migrations/0023_auto_20170702_1919.py | DenysGurin/solnce | 233a7b9fb6b717c8e421552ba16b353700afc1b0 | [
"MIT"
] | null | null | null | dgcrm/migrations/0023_auto_20170702_1919.py | DenysGurin/solnce | 233a7b9fb6b717c8e421552ba16b353700afc1b0 | [
"MIT"
] | null | null | null | dgcrm/migrations/0023_auto_20170702_1919.py | DenysGurin/solnce | 233a7b9fb6b717c8e421552ba16b353700afc1b0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-02 19:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dgcrm', '0022_auto_20170702_1917'),
]
operations = [
migrations.AlterField(
model_name='task',
name='time',
field=models.TimeField(blank=True, null=True),
),
]
| 21.571429 | 58 | 0.611479 |
0db76ee5e1ecd4728efe57b19c9cfc3d4dc3ecc2 | 51,838 | py | Python | pytest_cases/fixture_parametrize_plus.py | AnesBenmerzoug/python-pytest-cases | 8febdeeb52f6dcfcb54aa1e8e7d966427d030ad5 | [
"BSD-3-Clause"
] | null | null | null | pytest_cases/fixture_parametrize_plus.py | AnesBenmerzoug/python-pytest-cases | 8febdeeb52f6dcfcb54aa1e8e7d966427d030ad5 | [
"BSD-3-Clause"
] | null | null | null | pytest_cases/fixture_parametrize_plus.py | AnesBenmerzoug/python-pytest-cases | 8febdeeb52f6dcfcb54aa1e8e7d966427d030ad5 | [
"BSD-3-Clause"
] | null | null | null | # Authors: Sylvain MARIE <sylvain.marie@se.com>
# + All contributors to <https://github.com/smarie/python-pytest-cases>
#
# License: 3-clause BSD, <https://github.com/smarie/python-pytest-cases/blob/master/LICENSE>
from collections import Iterable
from inspect import isgeneratorfunction
from warnings import warn
try: # python 3.3+
from inspect import signature, Parameter
except ImportError:
from funcsigs import signature, Parameter # noqa
try:
from typing import Union, Callable, List, Any, Sequence, Optional # noqa
except ImportError:
pass
import pytest
from makefun import with_signature, remove_signature_parameters, add_signature_parameters, wraps
from .common_mini_six import string_types
from .common_others import AUTO
from .common_pytest_marks import has_pytest_param, get_param_argnames_as_list
from .common_pytest_lazy_values import is_lazy_value, is_lazy, get_lazy_args
from .common_pytest import get_fixture_name, remove_duplicates, mini_idvalset, is_marked_parameter_value, \
extract_parameterset_info, ParameterSet, cart_product_pytest, mini_idval, inject_host, \
get_marked_parameter_values, resolve_ids
from .fixture__creation import check_name_available, CHANGE, WARN
from .fixture_core1_unions import InvalidParamsList, NOT_USED, UnionFixtureAlternative, _make_fixture_union, \
_make_unpack_fixture, UnionIdMakers
from .fixture_core2 import _create_param_fixture, fixture_plus
def _fixture_product(fixtures_dest,
name, # type: str
fixtures_or_values,
fixture_positions,
scope="function", # type: str
ids=None, # type: Union[Callable, Iterable[str]]
unpack_into=None, # type: Iterable[str]
autouse=False, # type: bool
hook=None, # type: Callable[[Callable], Callable]
caller=None, # type: Callable
**kwargs):
"""
Internal implementation for fixture products created by pytest parametrize plus.
:param fixtures_dest:
:param name:
:param fixtures_or_values:
:param fixture_positions:
:param idstyle:
:param scope:
:param ids:
:param unpack_into:
:param autouse:
:param kwargs:
:return:
"""
# test the `fixtures` argument to avoid common mistakes
if not isinstance(fixtures_or_values, (tuple, set, list)):
raise TypeError("fixture_product: the `fixtures_or_values` argument should be a tuple, set or list")
else:
has_lazy_vals = any(is_lazy_value(v) for v in fixtures_or_values)
_tuple_size = len(fixtures_or_values)
# first get all required fixture names
f_names = [None] * _tuple_size
for f_pos in fixture_positions:
# possibly get the fixture name if the fixture symbol was provided
f = fixtures_or_values[f_pos]
if isinstance(f, fixture_ref):
f = f.fixture
# and remember the position in the tuple
f_names[f_pos] = get_fixture_name(f)
# remove duplicates by making it an ordered set
all_names = remove_duplicates((n for n in f_names if n is not None))
if len(all_names) < 1:
raise ValueError("Empty fixture products are not permitted")
def _tuple_generator(request, all_fixtures):
for i in range(_tuple_size):
fix_at_pos_i = f_names[i]
if fix_at_pos_i is None:
# fixed value
# note: wouldnt it be almost as efficient but more readable to *always* call handle_lazy_args?
yield get_lazy_args(fixtures_or_values[i], request) if has_lazy_vals else fixtures_or_values[i]
else:
# fixture value
yield all_fixtures[fix_at_pos_i]
# then generate the body of our product fixture. It will require all of its dependent fixtures
@with_signature("(request, %s)" % ', '.join(all_names))
def _new_fixture(request, **all_fixtures):
return tuple(_tuple_generator(request, all_fixtures))
_new_fixture.__name__ = name
# finally create the fixture per se.
# WARNING we do not use pytest.fixture but fixture_plus so that NOT_USED is discarded
f_decorator = fixture_plus(scope=scope, autouse=autouse, ids=ids, hook=hook, **kwargs)
fix = f_decorator(_new_fixture)
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
check_name_available(fixtures_dest, name, if_name_exists=WARN, caller=caller)
setattr(fixtures_dest, name, fix)
# if unpacking is requested, do it here
if unpack_into is not None:
_make_unpack_fixture(fixtures_dest, argnames=unpack_into, fixture=name, hook=hook)
return fix
_make_fixture_product = _fixture_product
"""A readable alias for callers not using the returned symbol"""
class fixture_ref(object): # noqa
"""
A reference to a fixture, to be used in `@parametrize_plus`.
You can create it from a fixture name or a fixture object (function).
"""
__slots__ = 'fixture', 'theoretical_size'
def __init__(self, fixture):
self.fixture = get_fixture_name(fixture)
self.theoretical_size = None # we dont know yet, will be filled by @parametrize
def __repr__(self):
return "fixture_ref<%s>" % self.fixture
def _check_iterable(self):
"""Raise a TypeError if this fixture reference is not iterable, that is, it does not represent a tuple"""
if self.theoretical_size is None:
raise TypeError("This fixture_ref has not yet been initialized")
if self.theoretical_size == 1:
raise TypeError("This fixture_ref does not represent a tuple of arguments, it is not iterable")
def __len__(self):
self._check_iterable()
return self.theoretical_size
def __getitem__(self, item):
"""
Returns an item in the tuple described by this fixture_ref.
This is just a facade, a FixtureRefItem.
Note: this is only used when a custom `idgen` is passed to @parametrized
"""
self._check_iterable()
return FixtureRefItem(self, item)
class FixtureRefItem(object):
"""An item in a fixture_ref when this fixture_ref is used as a tuple."""
__slots__ = 'host', 'item'
def __init__(self,
host, # type: fixture_ref
item # type: int
):
self.host = host
self.item = item
def __repr__(self):
return "%r[%s]" % (self.host, self.item)
# Fix for https://github.com/smarie/python-pytest-cases/issues/71
# In order for pytest to allow users to import this symbol in conftest.py
# they should be declared as optional plugin hooks.
# A workaround otherwise would be to remove the 'pytest_' name prefix
# See https://github.com/pytest-dev/pytest/issues/6475
@pytest.hookimpl(optionalhook=True)
def pytest_parametrize_plus(*args,
**kwargs):
warn("`pytest_parametrize_plus` is deprecated. Please use the new alias `parametrize_plus`. "
"See https://github.com/pytest-dev/pytest/issues/6475", category=DeprecationWarning, stacklevel=2)
return parametrize_plus(*args, **kwargs)
class ParamAlternative(UnionFixtureAlternative):
"""Defines an "alternative", used to parametrize a fixture union in the context of parametrize_plus"""
__slots__ = ('argnames', )
def __init__(self,
union_name,
alternative_name,
argnames,
):
super(ParamAlternative, self).__init__(union_name=union_name, alternative_name=alternative_name)
self.argnames = argnames
@property
def argnames_str(self):
return '_'.join(self.argnames)
class SingleParamAlternative(ParamAlternative):
"""alternative class for single parameter value"""
__slots__ = 'argvalues_index', 'argvalues'
def __init__(self,
union_name,
alternative_name,
argnames,
argvalues_index,
argvalues
):
super(SingleParamAlternative, self).__init__(union_name=union_name, alternative_name=alternative_name,
argnames=argnames)
self.argvalues_index = argvalues_index
self.argvalues = argvalues
def get_id(self):
# return "-".join(self.argvalues)
return mini_idvalset(self.argnames, self.argvalues, idx=self.argvalues_index)
class MultiParamAlternative(ParamAlternative):
"""alternative class for multiple parameter values"""
__slots__ = 'argvalues_index_from', 'argvalues_index_to'
def __init__(self,
union_name,
alternative_name,
argnames,
argvalues_index_from,
argvalues_index_to
):
super(MultiParamAlternative, self).__init__(union_name=union_name, alternative_name=alternative_name,
argnames=argnames)
self.argvalues_index_from = argvalues_index_from
self.argvalues_index_to = argvalues_index_to
class FixtureParamAlternative(ParamAlternative):
"""alternative class for a single parameter containing a fixture ref"""
__slots__ = 'argvalues_index',
def __init__(self,
union_name,
alternative_name,
argnames,
argvalues_index,
):
super(FixtureParamAlternative, self).__init__(union_name=union_name, alternative_name=alternative_name,
argnames=argnames)
self.argvalues_index = argvalues_index
class ProductParamAlternative(ParamAlternative):
"""alternative class for a single product parameter containing fixture refs"""
__slots__ = 'argvalues_index'
def __init__(self,
union_name,
alternative_name,
argnames,
argvalues_index,
):
super(ProductParamAlternative, self).__init__(union_name=union_name, alternative_name=alternative_name,
argnames=argnames)
self.argvalues_index = argvalues_index
class ParamIdMakers(UnionIdMakers):
""" 'Enum' of id styles for param ids
It extends UnionIdMakers so that the 'explicit' style can properly handle the special fixture alternatives we create
in @parametrize
"""
@classmethod
def compact(cls, param):
"""Overriden to replace the P with an U."""
return "P%s" % param.alternative_name
@classmethod
def explicit(cls, param # type: ParamAlternative
):
"""Overridden to handle the fixtures we create for parameters in @parametrize """
if isinstance(param, SingleParamAlternative):
# return "%s_is_P%s" % (param.param_name, param.argvalues_index)
return "%s_is_%s" % (param.argnames_str, param.get_id())
elif isinstance(param, MultiParamAlternative):
return "%s_is_P%stoP%s" % (param.argnames_str, param.argvalues_index_from, param.argvalues_index_to - 1)
elif isinstance(param, FixtureParamAlternative):
return "%s_is_%s" % (param.argnames_str, param.alternative_name)
elif isinstance(param, ProductParamAlternative):
return "%s_is_P%s" % (param.argnames_str, param.argvalues_index)
else:
raise TypeError("Unsupported alternative: %r" % param)
_IDGEN = object()
def parametrize_plus(argnames=None, # type: str
argvalues=None, # type: Iterable[Any]
indirect=False, # type: bool
ids=None, # type: Union[Callable, Iterable[str]]
idstyle='explicit', # type: Optional[str]
idgen=_IDGEN, # type: Union[str, Callable]
scope=None, # type: str
hook=None, # type: Callable[[Callable], Callable]
debug=False, # type: bool
**args):
"""
Equivalent to `@pytest.mark.parametrize` but also supports
(1) new alternate style for argnames/argvalues. One can also use `**args` to pass additional `{argnames: argvalues}`
in the same parametrization call. This can be handy in combination with `idgen` to master the whole id template
associated with several parameters. Note that you can pass coma-separated argnames too, by de-referencing a dict:
e.g. `**{'a,b': [(0, True), (1, False)], 'c': [-1, 2]}`.
(2) new alternate style for ids. One can use `idgen` instead of `ids`. `idgen` can be a callable receiving all
parameters at once (`**args`) and returning an id ; or it can be a string template using the new-style string
formatting where the argnames can be used as variables (e.g. `idgen=lambda **args: "a={a}".format(**args)` or
`idgen="my_id where a={a}"`). The special `idgen=AUTO` symbol can be used to generate a default string template
equivalent to `lambda **args: "-".join("%s=%s" % (n, v) for n, v in args.items())`. This is enabled by default
if you use the alternate style for argnames/argvalues (e.g. if `len(args) > 0`).
(3) new possibilities in argvalues:
- one can include references to fixtures with `fixture_ref(<fixture>)` where <fixture> can be the fixture name or
fixture function. When such a fixture reference is detected in the argvalues, a new function-scope "union" fixture
will be created with a unique name, and the test function will be wrapped so as to be injected with the correct
parameters from this fixture. Special test ids will be created to illustrate the switching between the various
normal parameters and fixtures. You can see debug print messages about all fixtures created using `debug=True`
- one can include lazy argvalues with `lazy_value(<valuegetter>, [id=..., marks=...])`. A `lazy_value` is the same
thing than a function-scoped fixture, except that the value getter function is not a fixture and therefore can
neither be parametrized nor depend on fixtures. It should have no mandatory argument.
Both `fixture_ref` and `lazy_value` can be used to represent a single argvalue, or a whole tuple of argvalues when
there are several argnames. Several of them can be used in a tuple.
Finally, `pytest.param` is supported even when there are `fixture_ref` and `lazy_value`.
An optional `hook` can be passed, to apply on each fixture function that is created during this call. The hook
function will be called everytime a fixture is about to be created. It will receive a single argument (the
function implementing the fixture) and should return the function to use. For example you can use `saved_fixture`
from `pytest-harvest` as a hook in order to save all such created fixtures in the fixture store.
:param argnames: same as in pytest.mark.parametrize
:param argvalues: same as in pytest.mark.parametrize except that `fixture_ref` and `lazy_value` are supported
:param indirect: same as in pytest.mark.parametrize. Note that it is not recommended and is not guaranteed to work
in complex parametrization scenarii.
:param ids: same as in pytest.mark.parametrize. Note that an alternative way to create ids exists with `idgen`. Only
one non-None `ids` or `idgen should be provided.
:param idgen: an id formatter. Either a string representing a template, or a callable receiving all argvalues
at once (as opposed to the behaviour in pytest ids). This alternative way to generate ids can only be used when
`ids` is not provided (None). You can use the special `AUTO` formatter to generate an automatic id with
template <name>=<value>-<name2>=<value2>-etc.
:param idstyle: style of ids to be used in the "union" fixtures generated by `@parametrize` when some cases require
fixtures. One of 'compact', 'explicit' or None/'nostyle'. See `ParamIdMakers` for details
:param scope: same as in pytest.mark.parametrize
:param hook: an optional hook to apply to each fixture function that is created during this call. The hook function
will be called everytime a fixture is about to be created. It will receive a single argument (the function
implementing the fixture) and should return the function to use. For example you can use `saved_fixture` from
`pytest-harvest` as a hook in order to save all such created fixtures in the fixture store.
:param debug: print debug messages on stdout to analyze fixture creation (use pytest -s to see them)
:param args: additional {argnames: argvalues} definition
:return:
"""
_decorate, needs_inject = _parametrize_plus(argnames, argvalues, indirect=indirect, ids=ids, idgen=idgen,
idstyle=idstyle, scope=scope, hook=hook, debug=debug, **args)
if needs_inject:
@inject_host
def _apply_parametrize_plus(f, host_class_or_module):
return _decorate(f, host_class_or_module)
return _apply_parametrize_plus
else:
return _decorate
class InvalidIdTemplateException(Exception):
"""
Raised when a string template provided in an `idgen` raises an error
"""
def __init__(self, idgen, params, caught):
self.idgen = idgen
self.params = params
self.caught = caught
super(InvalidIdTemplateException, self).__init__()
def __str__(self):
return repr(self)
def __repr__(self):
return "Error generating test id using name template '%s' with parameter values " \
"%r. Please check the name template. Caught: %s - %s" \
% (self.idgen, self.params, self.caught.__class__, self.caught)
def _parametrize_plus(argnames=None,
argvalues=None,
indirect=False, # type: bool
ids=None, # type: Union[Callable, Iterable[str]]
idstyle='explicit', # type: Optional[str]
idgen=_IDGEN, # type: Union[str, Callable]
scope=None, # type: str
hook=None, # type: Callable[[Callable], Callable]
debug=False, # type: bool
**args):
"""
:return: a tuple (decorator, needs_inject) where needs_inject is True if decorator has signature (f, host)
and False if decorator has signature (f)
"""
# idgen default
if idgen is _IDGEN:
# default: use the new id style only when some **args are provided
idgen = AUTO if len(args) > 0 else None
if idgen is AUTO:
# note: we use a "trick" here with mini_idval to get the appropriate result
def _make_ids(**args):
for n, v in args.items():
yield "%s=%s" % (n, mini_idval(val=v, argname='', idx=v))
idgen = lambda **args: "-".join(_make_ids(**args))
# first handle argnames / argvalues (new modes of input)
argnames, argvalues = _get_argnames_argvalues(argnames, argvalues, **args)
# argnames related
initial_argnames = ','.join(argnames)
nb_params = len(argnames)
# extract all marks and custom ids.
# Do not check consistency of sizes argname/argvalue as a fixture_ref can stand for several argvalues.
marked_argvalues = argvalues
has_cust_ids = (idgen is not None) or (ids is not None)
p_ids, p_marks, argvalues, fixture_indices = _process_argvalues(argnames, marked_argvalues, nb_params, has_cust_ids)
# generate id
if idgen is not None:
if ids is not None:
raise ValueError("Only one of `ids` and `idgen` should be provided")
ids = _gen_ids(argnames, argvalues, idgen)
if len(fixture_indices) == 0:
if debug:
print("No fixture reference found. Calling @pytest.mark.parametrize...")
print(" - argnames: %s" % initial_argnames)
print(" - argvalues: %s" % marked_argvalues)
print(" - ids: %s" % ids)
# handle infinite iterables like latest pytest, for convenience
ids = resolve_ids(ids, marked_argvalues)
# no fixture reference: shortcut, do as usual (note that the hook wont be called since no fixture is created)
_decorator = pytest.mark.parametrize(initial_argnames, marked_argvalues, indirect=indirect,
ids=ids, scope=scope)
if indirect:
return _decorator, False
else:
# wrap the decorator to check if the test function has the parameters as arguments
def _apply(test_func):
s = signature(test_func)
for p in argnames:
if p not in s.parameters:
raise ValueError("parameter '%s' not found in test function signature '%s%s'"
"" % (p, test_func.__name__, s))
return _decorator(test_func)
return _apply, False
else:
if indirect:
raise ValueError("Setting `indirect=True` is not yet supported when at least a `fixure_ref` is present in "
"the `argvalues`.")
if debug:
print("Fixture references found. Creating references and fixtures...")
# there are fixture references: we will create a specific decorator replacing the params with a "union" fixture
param_names_str = '_'.join(argnames).replace(' ', '')
# First define a few functions that will help us create the various fixtures to use in the final "union"
def _make_idfun_for_params(argnames, # noqa
nb_positions):
"""
Creates an id creating function that will use 'argnames' as the argnames
instead of the one(s) received by pytest. We use this in the case of param fixture
creation because on one side we need a unique fixture name so it is big and horrible,
but on the other side we want the id to rather reflect the simple argnames, no that fixture name.
:param argnames:
:param nb_positions:
:return:
"""
# create a new make id function with its own local counter of parameter
def _tmp_make_id(argvals):
_tmp_make_id.i += 1
if _tmp_make_id.i >= nb_positions:
raise ValueError("Internal error, please report")
if len(argnames) <= 1:
argvals = (argvals,)
elif is_lazy(argvals):
return argvals.get_id()
return mini_idvalset(argnames, argvals, idx=_tmp_make_id.i)
# init its positions counter
_tmp_make_id.i = -1
return _tmp_make_id
def _create_params_alt(fh, test_func_name, union_name, from_i, to_i, hook): # noqa
""" Routine that will be used to create a parameter fixture for argvalues between prev_i and i"""
# check if this is about a single value or several values
single_param_val = (to_i == from_i + 1)
if single_param_val:
i = from_i # noqa
# Create a unique fixture name
p_fix_name = "%s_%s_P%s" % (test_func_name, param_names_str, i)
p_fix_name = check_name_available(fh, p_fix_name, if_name_exists=CHANGE, caller=parametrize_plus)
if debug:
print(" - Creating new fixture %r to handle parameter %s" % (p_fix_name, i))
# Create the fixture that will return the unique parameter value ("auto-simplify" flag)
# IMPORTANT auto_simplify=True we create a NON-parametrized fixture. use argvalues not marked_argvalues
_create_param_fixture(fh, argname=p_fix_name, argvalues=argvalues[i:i + 1], hook=hook, auto_simplify=True)
# Create the alternative
argvals = (argvalues[i],) if nb_params == 1 else argvalues[i]
p_fix_alt = SingleParamAlternative(union_name=union_name, alternative_name=p_fix_name,
argnames=argnames, argvalues_index=i, argvalues=argvals)
# Finally copy the custom id/marks on the ParamAlternative if any
if is_marked_parameter_value(marked_argvalues[i]):
# TODO to support pytest 2 we should rather use our ParameterSet instead of pytest.param
p_fix_alt = pytest.param(p_fix_alt, id=marked_argvalues[i].id, marks=marked_argvalues[i].marks)
# p_fix_alt = ParameterSet(values=(p_fix_alt,),
# id=get_marked_parameter_id(marked_argvalues[i]),
# marks=get_marked_parameter_marks(marked_argvalues[i]))
else:
# Create a unique fixture name
p_fix_name = "%s_%s_is_P%stoP%s" % (test_func_name, param_names_str, from_i, to_i - 1)
p_fix_name = check_name_available(fh, p_fix_name, if_name_exists=CHANGE, caller=parametrize_plus)
if debug:
print(" - Creating new fixture %r to handle parameters %s to %s" % (p_fix_name, from_i, to_i - 1))
# If an explicit list of ids was provided, slice it. Otherwise use the provided callable
try:
param_ids = ids[from_i:to_i]
except TypeError:
# callable ? otherwise default to a customized id maker that replaces the fixture name
# that we use (p_fix_name) with a simpler name in the ids (just the argnames)
param_ids = ids or _make_idfun_for_params(argnames=argnames, nb_positions=(to_i - from_i))
# Create the fixture that will take ALL these parameter values (in a single parameter)
# That fixture WILL be parametrized, this is why we propagate the param_ids and use the marked values
if nb_params == 1:
_argvals = marked_argvalues[from_i:to_i]
else:
# we have to create a tuple around the vals because we have a SINGLE parameter that is a tuple
_argvals = tuple(ParameterSet((vals, ), id=id, marks=marks or ())
for vals, id, marks in zip(argvalues[from_i:to_i],
p_ids[from_i:to_i], p_marks[from_i:to_i]))
_create_param_fixture(fh, argname=p_fix_name, argvalues=_argvals, ids=param_ids, hook=hook)
# todo put back debug=debug above
# Create the corresponding alternative
p_fix_alt = MultiParamAlternative(union_name=union_name, alternative_name=p_fix_name, argnames=argnames,
argvalues_index_from=from_i, argvalues_index_to=to_i)
# no need to copy the custom id/marks to the ParamAlternative: they were passed above already
return p_fix_name, p_fix_alt
def _create_fixture_ref_alt(union_name, i): # noqa
# Get the referenced fixture name
f_fix_name = argvalues[i].fixture
if debug:
print(" - Creating reference to existing fixture %r" % (f_fix_name,))
# Create the alternative
f_fix_alt = FixtureParamAlternative(union_name=union_name, alternative_name=f_fix_name,
argnames=argnames, argvalues_index=i)
# Finally copy the custom id/marks on the ParamAlternative if any
if is_marked_parameter_value(marked_argvalues[i]):
f_fix_alt = pytest.param(f_fix_alt, id=marked_argvalues[i].id, marks=marked_argvalues[i].marks)
return f_fix_name, f_fix_alt
def _create_fixture_ref_product(fh, union_name, i, fixture_ref_positions, test_func_name, hook): # noqa
# If an explicit list of ids was provided, slice it. Otherwise use the provided callable
try:
param_ids = ids[i]
except TypeError:
param_ids = ids # callable
# values to use:
param_values = argvalues[i]
# Create a unique fixture name
p_fix_name = "%s_%s_P%s" % (test_func_name, param_names_str, i)
p_fix_name = check_name_available(fh, p_fix_name, if_name_exists=CHANGE, caller=parametrize_plus)
if debug:
print(" - Creating new fixture %r to handle parameter %s that is a cross-product" % (p_fix_name, i))
# Create the fixture
_make_fixture_product(fh, name=p_fix_name, hook=hook, caller=parametrize_plus, ids=param_ids,
fixtures_or_values=param_values, fixture_positions=fixture_ref_positions)
# Create the corresponding alternative
p_fix_alt = ProductParamAlternative(union_name=union_name, alternative_name=p_fix_name,
argnames=argnames, argvalues_index=i)
# copy the custom id/marks to the ParamAlternative if any
if is_marked_parameter_value(marked_argvalues[i]):
p_fix_alt = pytest.param(p_fix_alt, id=marked_argvalues[i].id, marks=marked_argvalues[i].marks)
return p_fix_name, p_fix_alt
# Then create the decorator per se
def parametrize_plus_decorate(test_func, fixtures_dest):
"""
A decorator that wraps the test function so that instead of receiving the parameter names, it receives the
new fixture. All other decorations are unchanged.
:param test_func:
:return:
"""
test_func_name = test_func.__name__
# Are there explicit ids provided ?
try:
if len(ids) != len(argvalues):
raise ValueError("Explicit list of `ids` provided has a different length (%s) than the number of "
"parameter sets (%s)" % (len(ids), len(argvalues)))
explicit_ids_to_use = []
except TypeError:
explicit_ids_to_use = None
# idstyle callable for generated parameter fixture alternatives
idstyle_callable = ParamIdMakers.get(idstyle)
# first check if the test function has the parameters as arguments
old_sig = signature(test_func)
for p in argnames:
if p not in old_sig.parameters:
raise ValueError("parameter '%s' not found in test function signature '%s%s'"
"" % (p, test_func_name, old_sig))
# The name for the final "union" fixture
# style_template = "%s_param__%s"
main_fixture_style_template = "%s_%s"
fixture_union_name = main_fixture_style_template % (test_func_name, param_names_str)
fixture_union_name = check_name_available(fixtures_dest, fixture_union_name, if_name_exists=CHANGE,
caller=parametrize_plus)
# Retrieve (if ref) or create (for normal argvalues) the fixtures that we will union
fixture_alternatives = []
prev_i = -1
for i, j_list in fixture_indices: # noqa
# A/ Is there any non-empty group of 'normal' parameters before the fixture_ref at <i> ? If so, handle.
if i > prev_i + 1:
# create a new "param" fixture parametrized with all of that consecutive group.
# Important note: we could either wish to create one fixture for parameter value or to create
# one for each consecutive group as shown below. This should not lead to different results but perf
# might differ. Maybe add a parameter in the signature so that users can test it ?
# this would make the ids more readable by removing the "P2toP3"-like ids
p_fix_name, p_fix_alt = _create_params_alt(fixtures_dest, test_func_name=test_func_name, hook=hook,
union_name=fixture_union_name, from_i=prev_i + 1, to_i=i)
fixture_alternatives.append((p_fix_name, p_fix_alt))
if explicit_ids_to_use is not None:
if isinstance(p_fix_alt, SingleParamAlternative):
explicit_ids_to_use.append(ids[prev_i + 1])
else:
# the ids provided by the user are propagated to the params of this fix, so we need an id
explicit_ids_to_use.append(idstyle_callable(p_fix_alt))
# B/ Now handle the fixture ref at position <i>
if j_list is None:
# argvalues[i] contains a single argvalue that is a fixture_ref : add the referenced fixture
f_fix_name, f_fix_alt = _create_fixture_ref_alt(union_name=fixture_union_name, i=i)
fixture_alternatives.append((f_fix_name, f_fix_alt))
if explicit_ids_to_use is not None:
explicit_ids_to_use.append(ids[i])
else:
# argvalues[i] is a tuple, some of them being fixture_ref. create a fixture refering to all of them
prod_fix_name, prod_fix_alt = _create_fixture_ref_product(fixtures_dest,
union_name=fixture_union_name, i=i,
fixture_ref_positions=j_list,
test_func_name=test_func_name, hook=hook)
fixture_alternatives.append((prod_fix_name, prod_fix_alt))
if explicit_ids_to_use is not None:
explicit_ids_to_use.append(ids[i])
prev_i = i
# C/ handle last consecutive group of normal parameters, if any
i = len(argvalues) # noqa
if i > prev_i + 1:
p_fix_name, p_fix_alt = _create_params_alt(fixtures_dest, test_func_name=test_func_name, hook=hook,
union_name=fixture_union_name, from_i=prev_i + 1, to_i=i)
fixture_alternatives.append((p_fix_name, p_fix_alt))
if explicit_ids_to_use is not None:
if isinstance(p_fix_alt, SingleParamAlternative):
explicit_ids_to_use.append(ids[prev_i + 1])
else:
# the ids provided by the user are propagated to the params of this fix, so we need an id
explicit_ids_to_use.append(idstyle_callable(p_fix_alt))
# TO DO if fixtures_to_union has length 1, simplify ? >> No, we leave such "optimization" to the end user
# consolidate the list of alternatives
fix_alternatives = tuple(a[1] for a in fixture_alternatives)
# and the list of their names. Duplicates should be removed here
fix_alt_names = []
for a, alt in fixture_alternatives:
if a not in fix_alt_names:
fix_alt_names.append(a)
else:
# this should only happen when the alternative is directly a fixture reference
if is_marked_parameter_value(alt):
alt = get_marked_parameter_values(alt)
assert len(alt) == 1, "Error with fixture reference, please report"
alt = alt[0]
assert isinstance(alt, FixtureParamAlternative), \
"Created fixture names are not unique, please report"
# Finally create a "main" fixture with a unique name for this test function
if debug:
print("Creating final union fixture %r with alternatives %r"
% (fixture_union_name, UnionFixtureAlternative.to_list_of_fixture_names(fix_alternatives)))
# note: the function automatically registers it in the module
_make_fixture_union(fixtures_dest, name=fixture_union_name, hook=hook, caller=parametrize_plus,
fix_alternatives=fix_alternatives, unique_fix_alt_names=fix_alt_names,
ids=explicit_ids_to_use or ids or idstyle_callable)
# --create the new test function's signature that we want to expose to pytest
# it is the same than existing, except that we want to replace all parameters with the new fixture
# first check where we should insert the new parameters (where is the first param we remove)
_first_idx = -1
for _first_idx, _n in enumerate(old_sig.parameters):
if _n in argnames:
break
# then remove all parameters that will be replaced by the new fixture
new_sig = remove_signature_parameters(old_sig, *argnames)
# finally insert the new fixture in that position. Indeed we can not insert first or last, because
# 'self' arg (case of test class methods) should stay first and exec order should be preserved when possible
new_sig = add_signature_parameters(new_sig, custom_idx=_first_idx,
custom=Parameter(fixture_union_name,
kind=Parameter.POSITIONAL_OR_KEYWORD))
if debug:
print("Creating final test function wrapper with signature %s%s" % (test_func_name, new_sig))
# --Finally create the fixture function, a wrapper of user-provided fixture with the new signature
def replace_paramfixture_with_values(kwargs): # noqa
# remove the created fixture value
encompassing_fixture = kwargs.pop(fixture_union_name)
# and add instead the parameter values
if nb_params > 1:
for i, p in enumerate(argnames): # noqa
kwargs[p] = encompassing_fixture[i]
else:
kwargs[argnames[0]] = encompassing_fixture
# return
return kwargs
if not isgeneratorfunction(test_func):
# normal test function with return statement
@wraps(test_func, new_sig=new_sig)
def wrapped_test_func(*args, **kwargs): # noqa
if kwargs.get(fixture_union_name, None) is NOT_USED:
# TODO why this ? it is probably useless: this fixture
# is private and will never end up in another union
return NOT_USED
else:
replace_paramfixture_with_values(kwargs)
return test_func(*args, **kwargs)
else:
# generator test function (with one or several yield statements)
@wraps(test_func, new_sig=new_sig)
def wrapped_test_func(*args, **kwargs): # noqa
if kwargs.get(fixture_union_name, None) is NOT_USED:
# TODO why this ? it is probably useless: this fixture
# is private and will never end up in another union
yield NOT_USED
else:
replace_paramfixture_with_values(kwargs)
for res in test_func(*args, **kwargs):
yield res
# move all pytest marks from the test function to the wrapper
# not needed because the __dict__ is automatically copied when we use @wraps
# move_all_pytest_marks(test_func, wrapped_test_func)
# With this hack we will be ordered correctly by pytest https://github.com/pytest-dev/pytest/issues/4429
wrapped_test_func.place_as = test_func
# return the new test function
return wrapped_test_func
return parametrize_plus_decorate, True
def _get_argnames_argvalues(argnames=None, argvalues=None, **args):
"""
:param argnames:
:param argvalues:
:param args:
:return: argnames, argvalues - both guaranteed to be lists
"""
# handle **args - a dict of {argnames: argvalues}
if len(args) > 0:
kw_argnames, kw_argvalues = cart_product_pytest(tuple(args.keys()), tuple(args.values()))
else:
kw_argnames, kw_argvalues = (), ()
if argnames is None:
# (1) all {argnames: argvalues} pairs are provided in **args
if argvalues is not None or len(args) == 0:
raise ValueError("No parameters provided")
argnames = kw_argnames
argvalues = kw_argvalues
# simplify if needed to comply with pytest.mark.parametrize
if len(argnames) == 1:
argvalues = [l[0] if not is_marked_parameter_value(l) else l for l in argvalues]
return argnames, argvalues
if isinstance(argnames, string_types):
# (2) argnames + argvalues, as usual. However **args can also be passed and should be added
argnames = get_param_argnames_as_list(argnames)
if not isinstance(argnames, (list, tuple)):
raise TypeError("argnames should be a string, list or a tuple")
if any([not isinstance(argname, str) for argname in argnames]):
raise TypeError("all argnames should be strings")
if argvalues is None:
raise ValueError("No argvalues provided while argnames are provided")
# transform argvalues to a list (it can be a generator)
try:
argvalues = list(argvalues)
except TypeError:
raise InvalidParamsList(argvalues)
# append **args
if len(kw_argnames) > 0:
argnames, argvalues = cart_product_pytest((argnames, kw_argnames),
(argvalues, kw_argvalues))
return argnames, argvalues
def _gen_ids(argnames, argvalues, idgen):
"""
Generates an explicit test ids list from a non-none `idgen`.
`idgen` should be either a callable of a string template.
:param argnames:
:param argvalues:
:param idgen:
:return:
"""
if not callable(idgen):
_formatter = idgen
def gen_id_using_str_formatter(**params):
try:
return _formatter.format(**params)
except Exception as e:
raise InvalidIdTemplateException(_formatter, params, e)
idgen = gen_id_using_str_formatter
if len(argnames) > 1:
ids = [idgen(**{n: v for n, v in zip(argnames, _argvals)}) for _argvals in argvalues]
else:
_only_name = argnames[0]
ids = [idgen(**{_only_name: v}) for v in argvalues]
return ids
def _process_argvalues(argnames, marked_argvalues, nb_params, has_custom_ids):
"""Internal method to use in _pytest_parametrize_plus
Processes the provided marked_argvalues (possibly marked with pytest.param) and returns
p_ids, p_marks, argvalues (not marked with pytest.param), fixture_indices
Note: `marked_argvalues` is modified in the process if a `lazy_value` is found with a custom id or marks.
:param argnames:
:param marked_argvalues:
:param nb_params:
:param has_custom_ids: a boolean indicating if custom ids are provided separately in `ids` or `idgen` (see
@parametrize)
:return:
"""
p_ids, p_marks, argvalues = extract_parameterset_info(argnames, marked_argvalues, check_nb=False)
# find if there are fixture references or lazy values in the values provided
fixture_indices = []
if nb_params == 1:
for i, v in enumerate(argvalues):
if is_lazy_value(v):
# --- A lazy value is used for several parameters at the same time ---
# Users can declare custom marks in the lazy value API, we have to take these into account
# (1) if there was a pytest.param around it, we have to merge the marks from the lazy value into it
# (2) if there was no pytest.param around it and there are marks, we have to create the pytest.param
# Note: a custom id in lazy value does not require such processing as it does not need to take
# precedence over `ids` or `idgen`
# are there any marks in lazy_value ?
_mks = v.get_marks(as_decorators=True)
if len(_mks) > 0:
# update/create the pytest.param marks on this value
p_marks[i] = (list(p_marks[i]) + _mks) if p_marks[i] is not None else list(_mks)
# update the original marked_argvalues
marked_argvalues[i] = ParameterSet(values=(argvalues[i],), id=p_ids[i], marks=p_marks[i])
elif isinstance(v, fixture_ref):
# Fix the referenced fixture length
v.theoretical_size = nb_params
fixture_indices.append((i, None))
elif nb_params > 1:
for i, v in enumerate(argvalues):
# A/ First analyze what is the case at hand
_lazyvalue_used_as_tuple = False
_fixtureref_used_as_tuple = False
if is_lazy_value(v):
_lazyvalue_used_as_tuple = True
elif isinstance(v, fixture_ref):
# Fix the referenced fixture length
v.theoretical_size = nb_params
_fixtureref_used_as_tuple = True
elif len(v) == 1 and is_lazy_value(v[0]):
# same than above but it was in a pytest.param
argvalues[i] = v = v[0]
_lazyvalue_used_as_tuple = True
elif len(v) == 1 and isinstance(v[0], fixture_ref):
# same than above but it was in a pytest.param
_fixtureref_used_as_tuple = True
argvalues[i] = v = v[0]
# Fix the referenced fixture length
v.theoretical_size = nb_params
# B/ Now process it
if _lazyvalue_used_as_tuple:
# --- A lazy value is used for several parameters at the same time ---
# Since users have the possibility in the lazy value API to declare a custom id or custom marks,
# we have to take these into account.
# MARKS:
# (1) if there was a pytest.param around it, we have to merge the marks from the lazy value into it
# (2) if there was no pytest.param around it and there are marks, we have to create the pytest.param
# IDS:
# As opposed to the case of nb_params=1, we can not let pytest generate the id as it would create a
# tuple of LazyTupleItem ids (e.g. <id>[0]-<id>[1]-...). So
# (1) if there is a custom id list or generator, do not care about this.
# (2) if there is a pytest.param with a custom id, do not care about this
# (3) if there is nothing OR if there is a pytest.param with no id, we should create a pytest.param with
# the id.
# in this particular case we have to modify the initial list
argvalues[i] = v.as_lazy_tuple(nb_params)
# TUPLE usage: if the id is not provided elsewhere we HAVE to set an id to avoid <id>[0]-<id>[1]...
if p_ids[i] is None and not has_custom_ids:
if not has_pytest_param:
if v._id is not None:
# (on pytest 2 we cannot do it since pytest.param does not exist)
warn("The custom id %r in `lazy_value` will be ignored as this version of pytest is too old"
" to support `pytest.param`." % v._id)
else:
pass # no warning, but no p_id update
else:
# update/create the pytest.param id on this value
p_ids[i] = v.get_id()
# handle marks
_mks = v.get_marks(as_decorators=True)
if len(_mks) > 0:
# update/create the pytest.param marks on this value
p_marks[i] = (list(p_marks[i]) + _mks) if p_marks[i] is not None else list(_mks)
# update the marked_argvalues
# - at least with the unpacked lazytuple if no pytest.param is there or needs to be created
# - with a pytest.param if one is needed
if p_ids[i] is None and p_marks[i] is None:
marked_argvalues[i] = argvalues[i]
else:
# note that here argvalues[i] IS a tuple-like so we do not create a tuple around it
marked_argvalues[i] = ParameterSet(values=argvalues[i], id=p_ids[i], marks=p_marks[i] or ())
elif _fixtureref_used_as_tuple:
# a fixture ref is used for several parameters at the same time
fixture_indices.append((i, None))
else:
# Tuple: check nb params for consistency
if len(v) != len(argnames):
raise ValueError("Inconsistent number of values in pytest parametrize: %s items found while the "
"number of parameters is %s: %s." % (len(v), len(argnames), v))
# let's dig into the tuple to check if there are fixture_refs or lazy_values
lv_pos_list = [j for j, _pval in enumerate(v) if is_lazy_value(_pval)]
if len(lv_pos_list) > 0:
_mks = [mk for _lv in lv_pos_list for mk in v[_lv].get_marks(as_decorators=True)]
if len(_mks) > 0:
# update/create the pytest.param marks on this value
p_marks[i] = (list(p_marks[i]) + _mks) if p_marks[i] is not None else list(_mks)
marked_argvalues[i] = ParameterSet(values=argvalues[i], id=p_ids[i], marks=p_marks[i] or ())
fix_pos_list = [j for j, _pval in enumerate(v) if isinstance(_pval, fixture_ref)]
if len(fix_pos_list) > 0:
# there is at least one fixture ref inside the tuple
fixture_indices.append((i, fix_pos_list))
# let's dig into the tuple
# has_val_ref = any(isinstance(_pval, lazy_value) for _pval in v)
# val_pos_list = [j for j, _pval in enumerate(v) if isinstance(_pval, lazy_value)]
# if len(val_pos_list) > 0:
# # there is at least one value ref inside the tuple
# argvalues[i] = tuple_with_value_refs(v, theoreticalsize=nb_params, positions=val_pos_list)
return p_ids, p_marks, argvalues, fixture_indices
| 49.558317 | 122 | 0.612003 |
64fb0af142406dfa013533b6366814fdaf6ec49a | 3,166 | py | Python | lama/qc/metric_charts.py | MiaRatkovic/LAMA | 3ccfed0864001c8c270861e23cc81bc43d7d25c9 | [
"Apache-2.0"
] | 6 | 2016-08-15T22:07:02.000Z | 2022-02-17T04:22:58.000Z | lama/qc/metric_charts.py | MiaRatkovic/LAMA | 3ccfed0864001c8c270861e23cc81bc43d7d25c9 | [
"Apache-2.0"
] | 25 | 2019-12-05T02:02:20.000Z | 2021-09-08T01:39:17.000Z | lama/qc/metric_charts.py | MiaRatkovic/LAMA | 3ccfed0864001c8c270861e23cc81bc43d7d25c9 | [
"Apache-2.0"
] | 5 | 2019-12-05T00:15:29.000Z | 2021-07-06T05:24:54.000Z | #!/usr/bin/env python
"""
Given a directory containing a multi-level registration, make plots of the cost function for each resolution.
Creates a html report concatentating all the images into one.
"""
ITER_PREFIX = 'IterationInfo'
CSS_FILE = 'style.css'
import os
from os.path import join, relpath
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from logzero import logger as logging
def make_charts(in_dir, out_dir):
html = '<html><link rel="stylesheet" type="text/css" href="{}" /><body>\n'.format(CSS_FILE)
stage = os.path.basename(in_dir)
html += '<div class="title"> Elastix metric results for {}</div>\n'.format(stage)
img_dir = join(out_dir, 'images')
if not os.path.exists(img_dir):
os.mkdir(img_dir)
for subdir in os.listdir(in_dir):
if not os.path.isdir(join(in_dir, subdir)):
continue
base = os.path.basename(subdir)
html += '<div class="specimen clear">{}</div><br><br>'.format(base)
files = [x for x in os.listdir(join(in_dir, subdir)) if x.startswith(ITER_PREFIX)]
# Sort the iteration info files based on the resolution number preceding the .txt extension
sorted_iter_files = sorted(files, key=lambda x: x.strip('.txt')[-1])
for file_ in sorted_iter_files:
if not file_.startswith(ITER_PREFIX):
continue
iter_path = join(in_dir, subdir, file_)
iter_num = file_.split('.')[1]
res_num = file_.split('.')[2][1]
out_path = join(img_dir, base + '_' + iter_num + '_' + res_num + '.png')
plot(iter_path, out_path)
html += '<div class="chart"><img class="chart_img" src="{}"/></div>\n'.format(relpath(out_path, out_dir))
html += '</body></html>'
outfile = join(out_dir, 'iteration_report.html')
with open(outfile, 'w') as html_fh:
html_fh.write(html)
html_fh.write(get_css())
def get_css():
css = '<style>'
css += 'body{font-family: Arial}'
css += '.title{width: 100%; padding: 20px; background-color: lightblue; margin-bottom: 20px}'
css += '.chart_img{width: 400px}\n'
css += '.chart{float: left}'
css += '.clear{clear: both}'
css += '</style>'
return css
def plot(iteration_file_, out_path):
data = []
with open(iteration_file_) as fh:
# Get rid of header
try:
next(fh)
except StopIteration as e:
logging.warn(f'Problem reading iteration info from {iteration_file_} ')
return
for line in fh:
metric = line.split()[1].strip()
data.append(float(metric))
plt.plot(data)
plt.savefig(out_path)
plt.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser("Create metric charts for a stage of the registration")
parser.add_argument('-i', '--in', dest='in_dir', help='Directory containing a series of registrations', required=True)
parser.add_argument('-o', '--out', dest='out_file', help='File to export charts to', required=True)
args = parser.parse_args()
make_charts(args.in_dir, args.out_file) | 35.177778 | 122 | 0.631712 |
5a0aa1af6a4adef94f744fa06ce2331c1ce00c35 | 1,130 | py | Python | src/integrationtest/python/should_list_single_task_for_simple_project_tests.py | AlexeySanko/pybuilder | 82acc02dd50b243049158815822acceb2d474713 | [
"Apache-2.0"
] | null | null | null | src/integrationtest/python/should_list_single_task_for_simple_project_tests.py | AlexeySanko/pybuilder | 82acc02dd50b243049158815822acceb2d474713 | [
"Apache-2.0"
] | null | null | null | src/integrationtest/python/should_list_single_task_for_simple_project_tests.py | AlexeySanko/pybuilder | 82acc02dd50b243049158815822acceb2d474713 | [
"Apache-2.0"
] | 1 | 2020-11-02T10:06:11.000Z | 2020-11-02T10:06:11.000Z | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from integrationtest_support import IntegrationTestSupport
class Test(IntegrationTestSupport):
def test(self):
self.write_build_file("""
from pybuilder.core import task
@task
def my_task (): pass
""")
reactor = self.prepare_reactor()
tasks = reactor.get_tasks()
self.assertEquals(1, len(tasks))
self.assertEquals("my_task", tasks[0].name)
if __name__ == "__main__":
unittest.main()
| 27.560976 | 76 | 0.700885 |
f70f8ec2299360b366ee181be4fd170341ad6326 | 869 | py | Python | Inflearn_SungKim/1.LinearRegression/LinearRegression(placeholders).py | shinhaha/tensorflow | 4647017a727985d64c5b0addee92f0ec516952c1 | [
"MIT"
] | null | null | null | Inflearn_SungKim/1.LinearRegression/LinearRegression(placeholders).py | shinhaha/tensorflow | 4647017a727985d64c5b0addee92f0ec516952c1 | [
"MIT"
] | null | null | null | Inflearn_SungKim/1.LinearRegression/LinearRegression(placeholders).py | shinhaha/tensorflow | 4647017a727985d64c5b0addee92f0ec516952c1 | [
"MIT"
] | null | null | null | import tensorflow as tf
#placeholder variable(scalar)
X=tf.placeholder(tf.float32,shape=[None])
Y=tf.placeholder(tf.float32,shape=[None])
W=tf.Variable(tf.random_normal([1]),name='weight')
b=tf.Variable(tf.random_normal([1]),name='bias')
hypothesis=X*W+b
#average
cost=tf.reduce_mean(tf.square(hypothesis-Y))
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01)
#minimize cost
train=optimizer.minimize(cost)
sess=tf.Session()
#initialize var
sess.run(tf.global_variables_initializer())
#learning
for step in range(2001):
cost_val,W_val,b_val,_=sess.run([cost,W,b,train],
feed_dict={X:[1,2,3,4,5],Y:[2.1,3.1,4.1,5.1,6.1]})
if step%20==0:
print(step,cost_val,W_val,b_val)
#evlauation
print(sess.run(hypothesis,feed_dict={X:[5]}))
print(sess.run(hypothesis,feed_dict={X:[2.5]}))
print(sess.run(hypothesis,feed_dict={X:[1.5,3.5]})) | 27.15625 | 63 | 0.727273 |
5d56a7c637f60064bb9aa2f5ee354759dfab99b8 | 658 | py | Python | fakesite/http_auth/tests/test_basic.py | akun/fakesite | 19d984011424dba9b9c7641e4ce3603605dd0d43 | [
"MIT"
] | 2 | 2015-12-20T06:57:20.000Z | 2022-03-17T10:26:57.000Z | fakesite/http_auth/tests/test_basic.py | akun/fakesite | 19d984011424dba9b9c7641e4ce3603605dd0d43 | [
"MIT"
] | null | null | null | fakesite/http_auth/tests/test_basic.py | akun/fakesite | 19d984011424dba9b9c7641e4ce3603605dd0d43 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from tornado.testing import AsyncHTTPTestCase as TestCase
from fakesite.fake_server import get_application
class BasicHandlerTestCase(TestCase):
def get_app(self):
return get_application()
def test_handler_if_with_auth(self):
response = self.fetch('/http_auth/basic', auth_username='admin',
auth_password='admin')
self.assertEqual(200, response.code)
self.assertEqual(b'Hello! I am HTTP Authorization(Basic)',
response.body)
def test_handler_if_without_auth(self):
response = self.fetch('/http_auth/basic')
self.assertEqual(401, response.code)
| 27.416667 | 72 | 0.700608 |
92815a93c9db829ccc7db2b5be5010d74837a83b | 382 | py | Python | third_party/blink/tools/web_tests_history.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575 | 2015-06-18T23:58:20.000Z | 2022-03-23T09:32:39.000Z | third_party/blink/tools/web_tests_history.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/blink/tools/web_tests_history.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52 | 2015-07-14T10:40:50.000Z | 2022-03-15T01:11:49.000Z | #!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from blinkpy.common import version_check # pylint: disable=unused-import
from blinkpy.web_tests.web_tests_history import main
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 29.384615 | 73 | 0.769634 |
595a4244a467887796dcf163dd5d3914b86a0d90 | 772 | py | Python | src/rf.py | ScottNicholsonKurland/Capstone | 139df7255e8e09366fb67eaa2256f5dd97f1cd54 | [
"BSD-2-Clause"
] | 1 | 2018-01-22T15:37:51.000Z | 2018-01-22T15:37:51.000Z | src/rf.py | ScottNicholsonKurland/Capstone | 139df7255e8e09366fb67eaa2256f5dd97f1cd54 | [
"BSD-2-Clause"
] | null | null | null | src/rf.py | ScottNicholsonKurland/Capstone | 139df7255e8e09366fb67eaa2256f5dd97f1cd54 | [
"BSD-2-Clause"
] | null | null | null | from sklearn import datasets
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
df=pd.read_csv('Issued_Construction_Permits.csv')
dfa=df[df['TotalNewAddSQFT']>0]
iris=dfa[['OriginalZip','Longitude','Latitude','TotalNewAddSQFT', 'TotalJobValuation','NumberOfFloors']]
iris['TotalJobValuation']=iris['TotalJobValuation'].str.lstrip('$')
iris.fillna(value=0)
iris['TotalJobValuation']=iris['TotalJobValuation'].astype(float)
iris.dropna(inplace=True)
y=iris['TotalNewAddSQFT']
#y=y.iloc[1:]
x=iris.drop(labels='TotalNewAddSQFT',axis=1)
#x=x.iloc[0:-1]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=28)
rf = RandomForestRegressor()
rf.fit(X_train,y_train)
rf.score(X_test,y_test) | 35.090909 | 104 | 0.784974 |
dc3ba8a2525a148851b928ff39696e76233e45ec | 136,041 | py | Python | tests/test_s3.py | mcripps9/cloud-custodian | dcd9cdfed9c9bbbe8f1b8959cacb2a815b978aa5 | [
"Apache-2.0"
] | 3 | 2018-01-18T12:10:53.000Z | 2019-01-03T20:00:52.000Z | tests/test_s3.py | mcripps9/cloud-custodian | dcd9cdfed9c9bbbe8f1b8959cacb2a815b978aa5 | [
"Apache-2.0"
] | 1 | 2018-12-11T19:31:13.000Z | 2018-12-11T19:31:13.000Z | tests/test_s3.py | mcripps9/cloud-custodian | dcd9cdfed9c9bbbe8f1b8959cacb2a815b978aa5 | [
"Apache-2.0"
] | 2 | 2018-11-15T20:32:50.000Z | 2022-01-25T18:26:48.000Z | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import datetime
import functools
import json
import os
import io
import shutil
import tempfile
import time # NOQA needed for some recordings
from unittest import TestCase
from botocore.exceptions import ClientError
from dateutil.tz import tzutc
from pytest_terraform import terraform
from c7n.exceptions import PolicyValidationError
from c7n.executor import MainThreadExecutor
from c7n.resources import s3
from c7n.mu import LambdaManager
from c7n.ufuncs import s3crypt
from c7n.utils import get_account_alias_from_sts
from .common import (
BaseTest,
ConfigTest,
event_data,
skip_if_not_validating,
functional,
)
@terraform('s3_tag')
def test_s3_tag(test, s3_tag):
test.patch(s3.S3, "executor_factory", MainThreadExecutor)
test.patch(s3.EncryptExtantKeys, "executor_factory", MainThreadExecutor)
test.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_tagging", "Tags", [], "TagSet")]
)
session_factory = test.replay_flight_data("test_s3_tag")
session = session_factory()
client = session.client("s3")
bucket_name = s3_tag['aws_s3_bucket.example.bucket']
p = test.load_policy(
{
"name": "s3-tagger",
"resource": "s3",
"filters": [{"Name": bucket_name}],
"actions": [
{
"type": "tag",
"tags": {"new-tag": "new-value"},
}
],
},
session_factory=session_factory,
)
resources = p.run()
test.assertEqual(len(resources), 1)
tags = {
t["Key"]: t["Value"]
for t in client.get_bucket_tagging(Bucket=bucket_name)["TagSet"]
}
test.assertEqual(
{"original-tag": "original-value", "new-tag": "new-value"}, tags
)
class RestoreCompletionTest(TestCase):
def test_restore_complete(self):
self.assertTrue(
s3.restore_complete(
(
'ongoing-request="false", '
'expiry-date="Fri, 23 Dec 2012 00:00:00 GMT"'
)
)
)
self.assertFalse(s3.restore_complete('ongoing-request="true"'))
class BucketScanLogTests(TestCase):
def setUp(self):
self.log_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.log_dir)
self.log = s3.BucketScanLog(self.log_dir, "test")
def test_scan_log(self):
first_five = list(range(5))
next_five = list(range(5, 10))
with self.log:
self.log.add(first_five)
self.log.add(next_five)
with open(self.log.path) as fh:
data = json.load(fh)
self.assertEqual(data, [first_five, next_five, []])
def destroyBucket(client, bucket):
for o in client.list_objects(Bucket=bucket).get("Contents", []):
client.delete_object(Bucket=bucket, Key=o["Key"])
client.delete_bucket(Bucket=bucket)
def destroyVersionedBucket(client, bucket):
for o in client.list_object_versions(Bucket=bucket).get("Versions", []):
client.delete_object(Bucket=bucket, Key=o["Key"], VersionId=o["VersionId"])
client.delete_bucket(Bucket=bucket)
def destroyBucketIfPresent(client, bucket):
try:
destroyVersionedBucket(client, bucket)
except Exception as exc:
response = getattr(
exc, "response", {"ResponseMetadata": {"HTTPStatusCode": None}}
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 404:
raise
def generateBucketContents(s3, bucket, contents=None):
default_contents = {
"home.txt": "hello", "AWSLogs/2015/10/10": "out", "AWSLogs/2015/10/11": "spot"
}
if contents is None:
contents = default_contents
s3.Bucket(bucket)
for k, v in contents.items():
key = s3.Object(bucket, k)
key.put(Body=v, ContentLength=len(v), ContentType="text/plain")
class BucketMetrics(BaseTest):
def test_metrics_dims(self):
factory = self.replay_flight_data('test_s3_metrics_user_dims')
p = self.load_policy({
'name': 's3',
'resource': 's3',
'source': 'config',
'query': [
{'clause': "resourceId = 'c7n-ssm-build'"}],
'filters': [{
'type': 'metrics',
'name': 'BucketSizeBytes',
'dimensions': {
'StorageType': 'StandardStorage'},
'days': 7,
'value': 100,
'op': 'gte'}]},
session_factory=factory,
config={'region': 'us-east-2'})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertIn('c7n.metrics', resources[0])
def test_metrics(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_metrics")
p = self.load_policy(
{
"name": "s3-obj-count",
"resource": "s3",
"filters": [
{
"type": "metrics",
"value": 10000,
"name": "NumberOfObjects",
"op": "greater-than",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], "custodian-skunk-trails")
self.assertTrue("c7n.metrics" in resources[0])
self.assertTrue("AWS/S3.NumberOfObjects.Average" in resources[0]["c7n.metrics"])
class BucketEncryption(BaseTest):
def test_s3_bucket_encryption_filter(self):
bname = "c7n-bucket-with-encryption"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_bucket_encryption_filter")
client = session_factory().client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(client.delete_bucket, Bucket=bname)
enc = {
"Rules": [
{"ApplyServerSideEncryptionByDefault": {"SSEAlgorithm": "AES256"}}
]
}
client.put_bucket_encryption(
Bucket=bname, ServerSideEncryptionConfiguration=enc
)
p = self.load_policy(
{
"name": "s3-enc",
"resource": "s3",
"filters": [{"type": "bucket-encryption", "crypto": "AES256"}],
},
session_factory=session_factory,
)
resources = p.run() or []
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
def test_s3_bucket_encryption_filter_kms(self):
def _get_encryption_config(key_id):
default_encryption = {
"SSEAlgorithm": "aws:kms"
}
if key_id:
default_encryption["KMSMasterKeyID"] = key_id
return {
"Rules": [{
"ApplyServerSideEncryptionByDefault": default_encryption
}]
}
bname_base = "c7n-bucket-with-encryption"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data(
"test_s3_bucket_encryption_filter_kms"
)
client = session_factory().client("s3")
key_alias = "alias/aws/s3"
key_meta = session_factory().client("kms").describe_key(KeyId=key_alias)["KeyMetadata"]
key_arn = key_meta.get('Arn')
alias_arn = ''.join((*key_arn.rpartition(':')[:2], key_alias))
# Create separate buckets to test five ways of specifying the AWS-managed
# KMS key for default server-side encryption.
key_attrs = {
'default': None,
'aliasname': key_alias,
'aliasarn': alias_arn,
'keyid': key_meta.get('KeyId'),
'keyarn': key_arn
}
for attr, value in key_attrs.items():
# Create test buckets. Set a default encryption rule for each
# one, using different attributes of the same key.
bname = f'{bname_base}-by-{attr}'
client.create_bucket(Bucket=bname)
client.put_bucket_encryption(
Bucket=bname,
ServerSideEncryptionConfiguration=_get_encryption_config(value)
)
self.addCleanup(client.delete_bucket, Bucket=bname)
p = self.load_policy(
{
"name": "s3-enc-kms",
"resource": "s3",
"filters": [
{
"type": "value",
"key": "Name",
"op": "glob",
"value": f"{bname_base}*",
},
{
"type": "bucket-encryption",
"crypto": "aws:kms",
"key": key_alias,
}
],
},
session_factory=session_factory,
)
resources = p.run() or []
self.assertEqual(len(resources), len(key_attrs))
def test_s3_filter_bucket_encryption_disabled(self):
bname = "c7n-bucket-without-default-encryption"
self.patch(s3.S3, "executor-factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_bucket_encryption_disabled")
client = session_factory().client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(client.delete_bucket, Bucket=bname)
p = self.load_policy(
{
"name": "s3-disabled-encryption",
"resource": "s3",
"filters": [
{"Name": bname}, {"type": "bucket-encryption", "state": False}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertRaises(ClientError, client.get_bucket_encryption, Bucket=bname)
client.put_bucket_encryption(
Bucket=bname,
ServerSideEncryptionConfiguration={
"Rules": [
{"ApplyServerSideEncryptionByDefault": {"SSEAlgorithm": "AES256"}}
]
},
)
p = self.load_policy(
{
"name": "s3-disabled-encryption",
"resource": "s3",
"filters": [
{"Name": bname},
{"type": "bucket-encryption", "state": False, "crypto": "AES256"},
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_s3_bucket_encryption_bucket_key(self):
session_factory = self.replay_flight_data("test_s3_bucket_encryption_bucket_key")
bname = "custodian-test-bucket-encryption-key"
self.patch(s3.S3, "executor-factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
policy = self.load_policy(
{
"name": "test_s3_bucket_encryption_bucket_key",
"resource": "s3",
"filters": [
{
"Name": bname
},
{
"type": "bucket-encryption",
"state": False
}
],
"actions": [
{
"type": "set-bucket-encryption"
}
]
}, session_factory=session_factory
)
resources = policy.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("s3")
resp = client.get_bucket_encryption(Bucket=bname)
self.assertTrue(resp['ServerSideEncryptionConfiguration']['Rules'][0]['BucketKeyEnabled'])
class BucketInventory(BaseTest):
def test_s3_set_encrypted_inventory_sses3(self):
bname = "custodian-inventory-test"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data(
"test_s3_set_encrypted_inventory_sses3"
)
client = session_factory().client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(client.delete_bucket, Bucket=bname)
p = self.load_policy(
{
"name": "s3-inv",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{
"type": "set-inventory",
"destination": "inv-dest",
"name": "inv-name",
"state": "enabled",
"encryption": "SSES3",
"fields": ["Size", "EncryptionStatus"],
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
invs = client.list_bucket_inventory_configurations(Bucket=bname).get(
"InventoryConfigurationList"
)
self.assertTrue(invs)
self.assertTrue(
"SSES3" in invs[0]["Destination"]["S3BucketDestination"]["Encryption"]
)
self.assertTrue("EncryptionStatus" in invs[0]["OptionalFields"])
def test_s3_set_encrypted_inventory_ssekms(self):
bname = "custodian-inventory-test"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data(
"test_s3_set_encrypted_inventory_ssekms"
)
client = session_factory().client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(client.delete_bucket, Bucket=bname)
p = self.load_policy(
{
"name": "s3-inv",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{
"type": "set-inventory",
"destination": "inv-dest",
"name": "inv-name",
"state": "enabled",
"encryption": "SSEKMS",
"key_id": "arn:valid:kms",
"fields": ["Size", "EncryptionStatus"],
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
invs = client.list_bucket_inventory_configurations(Bucket=bname).get(
"InventoryConfigurationList"
)
self.assertTrue(invs)
self.assertTrue(
"SSEKMS" in invs[0]["Destination"]["S3BucketDestination"]["Encryption"]
)
self.assertTrue("EncryptionStatus" in invs[0]["OptionalFields"])
def test_s3_delete_inventory_inventory_not_set(self):
bname = "delete_inventory"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data(
"test_s3_delete_inventory_inventory_not_set"
)
client = session_factory().client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(client.delete_bucket, Bucket=bname)
p = self.load_policy(
{
"name": "s3-inv",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{
"type": "set-inventory",
"destination": "inv-dest",
"name": "inv-name",
"state": "absent",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
inventoryConfigList = client.list_bucket_inventory_configurations(
Bucket=bname
).get(
"InventoryConfigurationList"
)
self.assertFalse(inventoryConfigList)
@functional
def test_inventory(self):
bname = "custodian-test-data"
inv_bname = "custodian-inv"
inv_name = "something"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_inventory")
client = session_factory().client("s3")
if self.recording:
destroyBucketIfPresent(client, bname)
destroyBucketIfPresent(client, inv_bname)
client.create_bucket(
Bucket=bname, CreateBucketConfiguration={"LocationConstraint": "us-east-2"}
)
client.create_bucket(
Bucket=inv_bname,
CreateBucketConfiguration={"LocationConstraint": "us-east-2"},
)
self.addCleanup(client.delete_bucket, Bucket=bname)
self.addCleanup(client.delete_bucket, Bucket=inv_bname)
inv = {
"Destination": {
"S3BucketDestination": {
"Bucket": "arn:aws:s3:::%s" % inv_bname,
"Format": "CSV",
"Prefix": "abcdef",
}
},
"IsEnabled": True,
"Id": inv_name,
"IncludedObjectVersions": "All",
"OptionalFields": ["LastModifiedDate"],
"Schedule": {"Frequency": "Daily"},
}
client.put_bucket_inventory_configuration(
Bucket=bname, Id=inv_name, InventoryConfiguration=inv
)
p = self.load_policy(
{
"name": "s3-inv",
"resource": "s3",
"filters": [{"Name": "custodian-test-data"}],
"actions": [
{
"type": "set-inventory",
"destination": inv_bname,
"name": inv_name,
}
],
},
session_factory=session_factory,
)
self.assertEqual(len(p.run()), 1)
invs = client.list_bucket_inventory_configurations(Bucket=bname).get(
"InventoryConfigurationList"
)
self.assertTrue(invs)
self.assertEqual(
sorted(invs[0]["OptionalFields"]), ["LastModifiedDate", "Size"]
)
p = self.load_policy(
{
"name": "s3-inv",
"resource": "s3",
"filters": [{"Name": "custodian-test-data"}],
"actions": [
{
"type": "set-inventory",
"destination": inv_bname,
"state": "absent",
"name": inv_name,
}
],
},
session_factory=session_factory,
)
self.assertEqual(len(p.run()), 1)
self.assertFalse(
client.list_bucket_inventory_configurations(Bucket=bname).get(
"InventoryConfigurationList"
)
)
class BucketDelete(BaseTest):
def test_delete_replicated_bucket(self):
# the iam setup is a little for replication to duplicate in a test
# preconditions - custodian-replicated and custodian-replicated-west
# buckets setup with replication, we're deleting the custodian-replicated
# bucket (source).
bname = "custodian-replicated"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3,
"S3_AUGMENT_TABLE",
[
("get_bucket_replication", "Replication", None, None),
("get_bucket_versioning", "Versioning", None, None),
],
)
session_factory = self.replay_flight_data("test_s3_delete_replicated_bucket")
p = self.load_policy(
{
"name": "s3-delete-bucket",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "delete", "remove-contents": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
session = session_factory()
client = session.client("s3")
buckets = {b["Name"] for b in client.list_buckets()["Buckets"]}
self.assertFalse(bname in buckets)
@functional
def test_delete_versioned_bucket(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_versioning", "Versioning", None, None)],
)
session_factory = self.replay_flight_data("test_s3_delete_versioned_bucket")
session = session_factory()
client = session.client("s3")
s3_resource = session.resource("s3")
bname = "custodian-byebye"
if self.recording:
destroyBucketIfPresent(client, bname)
client.create_bucket(
Bucket=bname, CreateBucketConfiguration={"LocationConstraint": "us-east-2"}
)
client.put_bucket_versioning(
Bucket=bname, VersioningConfiguration={"Status": "Enabled"}
)
generateBucketContents(s3_resource, bname)
# Generate some versions
generateBucketContents(s3_resource, bname)
upload_info = client.create_multipart_upload(Bucket=bname, Key="abcdef12345")
client.upload_part(
Body="1" * 1024,
Bucket=bname,
Key="abcdef12345",
PartNumber=1,
UploadId=upload_info["UploadId"],
)
p = self.load_policy(
{
"name": "s3-delete-bucket",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "delete", "remove-contents": True}],
},
session_factory=session_factory,
)
resources = p.run()
if self.recording:
time.sleep(60)
self.assertEqual(len(resources), 1)
buckets = {b["Name"] for b in client.list_buckets()["Buckets"]}
self.assertFalse(bname in buckets)
@functional
def test_delete_bucket(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3.DeleteBucket, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_delete_bucket")
session = session_factory()
client = session.client("s3")
bname = "custodian-byebye"
if self.recording:
destroyBucketIfPresent(client, bname)
client.create_bucket(
Bucket=bname, CreateBucketConfiguration={"LocationConstraint": "us-east-2"}
)
generateBucketContents(session.resource("s3"), bname)
p = self.load_policy(
{
"name": "s3-delete-bucket",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "delete", "remove-contents": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
buckets = {b["Name"] for b in client.list_buckets()["Buckets"]}
self.assertFalse(bname in buckets)
def test_delete_bucket_with_failure(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3.DeleteBucket, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_delete_bucket_with_failure")
session = session_factory()
client = session.client("s3")
bname = "custodian-perm-denied"
client.create_bucket(Bucket=bname)
generateBucketContents(session.resource("s3"), bname)
# This bucket policy prevents viewing contents
policy = {
"Version": "2012-10-17",
"Id": "Policy1487359365244",
"Statement": [
{
"Sid": "Stmt1487359361981",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:DeleteBucket",
"Resource": "arn:aws:s3:::{}".format(bname),
}
],
}
client.put_bucket_policy(Bucket=bname, Policy=json.dumps(policy))
p = self.load_policy(
{
"name": "s3-delete-bucket",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "delete", "remove-contents": True}],
},
output_dir=None,
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
buckets = {b["Name"] for b in client.list_buckets()["Buckets"]}
self.assertIn(bname, buckets)
# Make sure file got written
denied_file = os.path.join(p.ctx.log_dir, "denied.json")
self.assertIn(bname, open(denied_file).read())
#
# Now delete it for real
#
client.delete_bucket_policy(Bucket=bname)
resources = p.run()
self.assertEqual(len(resources), 1)
buckets = {b["Name"] for b in client.list_buckets()["Buckets"]}
self.assertFalse(bname in buckets)
class S3ConfigSource(ConfigTest):
maxDiff = None
@functional
def test_normalize(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
augments = list(s3.S3_AUGMENT_TABLE)
augments.remove((
"get_bucket_location", "Location", {}, None, 's3:GetBucketLocation'))
self.patch(s3, "S3_AUGMENT_TABLE", augments)
bname = "custodian-test-data-23"
session_factory = self.replay_flight_data("test_s3_normalize")
session = session_factory()
queue_url = self.initialize_config_subscriber(session)
client = session.client("s3")
if self.recording:
destroyBucketIfPresent(client, bname)
client.create_bucket(
Bucket=bname, CreateBucketConfiguration={"LocationConstraint": "us-east-2"}
)
self.addCleanup(destroyBucket, client, bname)
sns = session.client("sns")
notify_topic = sns.create_topic(Name=bname).get("TopicArn")
sns.set_topic_attributes(
TopicArn=notify_topic,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Statement": [
{
"Action": "SNS:Publish",
"Effect": "Allow",
"Resource": notify_topic,
"Principal": {"Service": "s3.amazonaws.com"},
}
]
}
),
)
self.addCleanup(sns.delete_topic, TopicArn=notify_topic)
public = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
client.put_bucket_acl(
Bucket=bname,
AccessControlPolicy={
"Owner": {
"DisplayName": "mandeep.bal",
"ID": "e7c8bb65a5fc49cf906715eae09de9e4bb7861a96361ba79b833aa45f6833b15",
},
"Grants": [
{"Grantee": {"Type": "Group", "URI": public}, "Permission": "READ"},
{
"Grantee": {
"Type": "Group",
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
},
"Permission": "WRITE",
},
{
"Grantee": {
"Type": "Group",
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
},
"Permission": "READ_ACP",
},
],
},
)
client.put_bucket_tagging(
Bucket=bname,
Tagging={
"TagSet": [
{"Key": "rudolph", "Value": "rabbit"},
{"Key": "platform", "Value": "tyre"},
]
},
)
client.put_bucket_logging(
Bucket=bname,
BucketLoggingStatus={
"LoggingEnabled": {"TargetBucket": bname, "TargetPrefix": "s3-logs/"}
},
)
client.put_bucket_versioning(
Bucket=bname, VersioningConfiguration={"Status": "Enabled"}
)
client.put_bucket_accelerate_configuration(
Bucket=bname, AccelerateConfiguration={"Status": "Enabled"}
)
client.put_bucket_website(
Bucket=bname,
WebsiteConfiguration={"IndexDocument": {"Suffix": "index.html"}},
)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Zebra",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::%s/*" % bname,
"Condition": {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": [
"AES256", "aws:kms"
]
}
},
}
],
}
),
)
client.put_bucket_notification_configuration(
Bucket=bname,
NotificationConfiguration={
"TopicConfigurations": [
{
"Id": bname,
"TopicArn": notify_topic,
"Events": ["s3:ObjectCreated:*"],
"Filter": {
"Key": {
"FilterRules": [{"Name": "prefix", "Value": "s3-logs/"}]
}
},
}
]
},
)
p = self.load_policy(
{"name": "s3-inv", "resource": "s3", "filters": [{"Name": bname}]},
session_factory=session_factory,
)
manager = p.load_resource_manager()
resource_a = manager.get_resources([bname])[0]
results = self.wait_for_config(session, queue_url, bname)
resource_b = s3.ConfigS3(manager).load_resource(results[0])
self.maxDiff = None
self.assertEqual(s3.get_region(resource_b), 'us-east-1')
for k in ("Logging", "Policy", "Versioning", "Name", "Website"):
self.assertEqual(resource_a[k], resource_b[k])
self.assertEqual(
{t["Key"]: t["Value"] for t in resource_a.get("Tags")},
{t["Key"]: t["Value"] for t in resource_b.get("Tags")},
)
def test_config_normalize_notification(self):
event = event_data("s3-rep-and-notify.json", "config")
p = self.load_policy({"name": "s3cfg", "resource": "s3"})
source = p.resource_manager.get_source("config")
resource = source.load_resource(event)
self.assertEqual(s3.get_region(resource), 'us-east-1')
self.assertEqual(
resource["Notification"],
{
u"TopicConfigurations": [
{
u"Filter": {
u"Key": {
u"FilterRules": [{u"Name": "Prefix", u"Value": "oids/"}]
}
},
u"Id": "rabbit",
u"TopicArn": "arn:aws:sns:us-east-1:644160558196:custodian-test-data-22",
u"Events": [
"s3:ReducedRedundancyLostObject",
"s3:ObjectCreated:CompleteMultipartUpload",
],
}
],
u"LambdaFunctionConfigurations": [
{
u"Filter": {
u"Key": {
u"FilterRules": [{u"Name": "Prefix", u"Value": "void/"}]
}
},
u"LambdaFunctionArn": (
"arn:aws:lambda:us-east-1:644160558196:function:lambdaenv"
),
u"Id": "ZDAzZDViMTUtNGU3MS00ZWIwLWI0MzgtOTZiMWQ3ZWNkZDY1",
u"Events": ["s3:ObjectRemoved:Delete"],
}
],
u"QueueConfigurations": [
{
u"Filter": {
u"Key": {
u"FilterRules": [
{u"Name": "Prefix", u"Value": "images/"}
]
}
},
u"Id": "OGQ5OTAyNjYtYjBmNy00ZTkwLWFiMjUtZjE4ODBmYTgwNTE0",
u"QueueArn": "arn:aws:sqs:us-east-1:644160558196:test-queue",
u"Events": ["s3:ObjectCreated:*"],
}
],
},
)
def test_config_normalize_lifecycle_null_predicate(self):
event = event_data("s3-lifecycle-null-predicate.json", "config")
p = self.load_policy({"name": "s3cfg", "resource": "s3"})
source = p.resource_manager.get_source("config")
resource = source.load_resource(event)
rule = resource["Lifecycle"]["Rules"][0]
self.assertEqual(
rule,
{
"AbortIncompleteMultipartUpload": {"DaysAfterInitiation": 1},
"Expiration": {"Days": 1},
"ID": "RemoveAbortedUploads",
"NoncurrentVersionExpiration": {"NoncurrentDays": -1},
"Status": "Enabled",
},
)
def test_config_normalize_lifecycle_and_predicate(self):
event = event_data("s3-lifecycle-and-predicate.json", "config")
p = self.load_policy({"name": "s3cfg", "resource": "s3"})
source = p.resource_manager.get_source("config")
resource = source.load_resource(event)
rfilter = resource["Lifecycle"]["Rules"][0]["Filter"]
self.assertEqual(rfilter["And"]["Prefix"], "docs/")
self.assertEqual(
rfilter["And"]["Tags"],
[
{"Value": "Archive", "Key": "Workflow"},
{"Value": "Complete", "Key": "State"},
],
)
def test_config_normalize_lifecycle(self):
event = event_data("s3-lifecycle.json", "config")
p = self.load_policy({"name": "s3cfg", "resource": "s3"})
source = p.resource_manager.get_source("config")
resource = source.load_resource(event)
self.assertEqual(
resource["Lifecycle"],
{
"Rules": [
{
"Status": "Enabled",
"NoncurrentVersionExpiration": {"NoncurrentDays": 545},
"Filter": {"Prefix": "docs/"},
"Transitions": [{"Days": 30, "StorageClass": "STANDARD_IA"}],
"Expiration": {"ExpiredObjectDeleteMarker": True},
"AbortIncompleteMultipartUpload": {"DaysAfterInitiation": 7},
"NoncurrentVersionTransitions": [
{"NoncurrentDays": 180, "StorageClass": "GLACIER"}
],
"ID": "Docs",
}
]
},
)
def test_config_normalize_replication(self):
event = event_data("s3-rep-and-notify.json", "config")
p = self.load_policy({"name": "s3cfg", "resource": "s3"})
source = p.resource_manager.get_source("config")
resource = source.load_resource(event)
self.assertEqual(
resource["Replication"],
{
u"ReplicationConfiguration": {
u"Rules": [
{
u"Status": "Enabled",
u"Prefix": "",
u"Destination": {
u"Account": "000111222333",
u"Bucket": "arn:aws:s3:::testing-west"},
u"ID": "testing-99",
}
],
u"Role": (
"arn:aws:iam::644160558196:role"
"/custodian-replicated-custodian-replicated"
"-west-s3-repl-role"
),
}
},
)
def test_config_normalize_website_redirect(self):
event = event_data("s3-website-redirect.json", "config")
p = self.load_policy({"name": "s3cfg", "resource": "s3"})
source = p.resource_manager.get_source("config")
self.maxDiff = None
resource = source.load_resource(event)
self.assertEqual(
resource["Website"],
{
"RedirectAllRequestsTo": {
"HostName": "www.google.com/", "Protocol": "https"
}
},
)
def test_config_normalize_website(self):
event = event_data("s3-website.json", "config")
p = self.load_policy({"name": "s3cfg", "resource": "s3"})
source = p.resource_manager.get_source("config")
self.maxDiff = None
resource = source.load_resource(event)
self.assertEqual(
resource["Website"],
{
u"IndexDocument": {u"Suffix": "index.html"},
u"RoutingRules": [
{
u"Redirect": {u"ReplaceKeyWith": "error.html"},
u"Condition": {
u"HttpErrorCodeReturnedEquals": "404",
u"KeyPrefixEquals": "docs/",
},
}
],
},
)
def test_load_item_resource(self):
event = event_data("s3.json", "config")
p = self.load_policy({"name": "s3cfg", "resource": "s3"})
source = p.resource_manager.get_source("config")
self.maxDiff = None
resource = source.load_resource(event)
resource.pop("CreationDate")
self.assertEqual(
{"Planet": "Earth", "Verbose": "Game"},
{t["Key"]: t["Value"] for t in resource.pop("Tags")},
)
self.assertEqual(s3.get_region(resource), 'us-east-2')
self.assertEqual(
resource,
{
"Location": {"LocationConstraint": u"us-east-2"},
"Name": u"config-rule-sanity",
"Lifecycle": None,
"Website": None,
"Policy": None,
"Replication": None,
"Versioning": None,
"Logging": None,
"Notification": None,
"Acl": {
"Owner": {
"ID": u"e7c8bb65a5fc49cf906715eae09de9e4bb7861a96361ba79b833aa45f6833b15"
},
"Grants": [
{
"Grantee": {
"Type": "CanonicalUser",
"ID": (
"e7c8bb65a5fc49cf906715eae09de"
"9e4bb7861a96361ba79b833aa45f6833b15"
),
},
"Permission": "FULL_CONTROL",
}
],
},
},
)
def test_load_item_resource_config_event(self):
event = event_data("s3-from-rule.json", "config")
p = self.load_policy({"name": "s3cfg", "resource": "s3"})
source = p.resource_manager.get_source("config")
self.maxDiff = None
resource_config = json.loads(event["invokingEvent"])["configurationItem"]
resource = source.load_resource(resource_config)
self.assertEqual(
resource,
{
u"Acl": {
u"Grants": [
{
u"Grantee": {
u"ID": (
"e7c8bb65a5fc49cf906715eae09de9e4"
"bb7861a96361ba79b833aa45f6833b15"
),
u"Type": u"CanonicalUser",
},
u"Permission": u"FULL_CONTROL",
}
],
u"Owner": {
u"DisplayName": u"mandeep.bal",
u"ID": u"e7c8bb65a5fc49cf906715eae09de9e4bb7861a96361ba79b833aa45f6833b15",
},
},
u"CreationDate": datetime.datetime(
2017, 9, 15, 2, 5, 40, tzinfo=tzutc()
),
u"Lifecycle": None,
u"Location": {},
u"Logging": None,
u"Name": u"c7n-fire-logs",
u"Notification": {},
u"Policy": None,
u"Replication": None,
u"Tags": [],
u"Versioning": None,
u"Website": None,
},
)
class BucketPolicyStatements(BaseTest):
@functional
def test_policy(self):
bname = "custodian-test-data"
sid = "CustodianTest"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
session_factory = self.replay_flight_data("test_s3_policy_statements")
client = session_factory().client("s3")
if self.recording:
destroyBucketIfPresent(client, bname)
client.create_bucket(
Bucket=bname, CreateBucketConfiguration={"LocationConstraint": "us-east-2"}
)
self.addCleanup(client.delete_bucket, Bucket=bname)
p = self.load_policy(
{
"name": "s3-policy-statements",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{
"type": "set-statements",
"statements": [
{
"Sid": sid,
"Effect": "Deny",
"Action": "s3:GetObject",
"Principal": {"AWS": "*"},
"Resource": "arn:aws:s3:::{bucket_name}/*",
"Condition": {"Bool": {"aws:SecureTransport": False}},
}
],
}
],
},
session_factory=session_factory,
)
self.assertEqual(len(p.run()), 1)
policy = client.get_bucket_policy(Bucket=bname).get("Policy")
policy = json.loads(policy)
self.assertTrue(len(policy["Statement"]) > 0)
self.assertTrue(
len([s for s in policy["Statement"] if s["Sid"] == sid and
s["Resource"] == "arn:aws:s3:::%s/*" % (bname)]) == 1
)
@functional
def test_policy_no_change(self):
bname = "custodian-test-data"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
session_factory = self.replay_flight_data("test_s3_policy_statements_no_change")
client = session_factory().client("s3")
if self.recording:
destroyBucketIfPresent(client, bname)
client.create_bucket(
Bucket=bname, CreateBucketConfiguration={"LocationConstraint": "us-east-2"}
)
self.addCleanup(client.delete_bucket, Bucket=bname)
p = self.load_policy(
{
"name": "s3-policy-statements",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "set-statements", "statements": []}],
},
session_factory=session_factory,
)
self.assertEqual(len(p.run()), 1)
class S3Test(BaseTest):
def test_bucket_get_resources(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [
('get_bucket_tagging', 'Tags', [], 'TagSet')])
session_factory = self.replay_flight_data("test_s3_get_resources")
p = self.load_policy(
{"name": "bucket-fetch", "resource": "s3"},
session_factory=session_factory)
resources = p.resource_manager.get_resources(['c7n-codebuild'])
self.assertEqual(len(resources), 1)
tags = {t['Key']: t['Value'] for t in resources[0].get('Tags')}
self.assertEqual(
tags, {
'Application': 'test', 'Env': 'Dev', 'Owner': 'nicholase',
'Retention': '2', 'Retention2': '3', 'test': 'test'})
self.assertTrue("CreationDate" in resources[0])
def test_multipart_large_file(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3.EncryptExtantKeys, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
self.patch(s3, "MAX_COPY_SIZE", (1024 * 1024 * 6.1))
session_factory = self.replay_flight_data("test_s3_multipart_file")
session = session_factory()
client = session.client("s3")
bname = "custodian-largef-test"
key = "hello"
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
class wrapper:
def __init__(self, d, length):
self.d = d
self.len = length
self.counter = length
def read(self, size):
if self.counter == 0:
return ""
if size > self.counter:
size = self.counter
self.counter = 0
else:
self.counter -= size
return self.d.read(size)
def seek(self, offset, whence=0):
if whence == 2 and offset == 0:
self.counter = 0
elif whence == 0 and offset == 0:
self.counter = self.len
def tell(self):
return self.len - self.counter
size = 1024 * 1024 * 16
client.put_object(
Bucket=bname,
Key=key,
Metadata={"planet": "earth"},
Body=wrapper(io.BytesIO(bytearray(size)), size),
ContentLength=size,
)
info = client.head_object(Bucket=bname, Key=key)
p = self.load_policy(
{
"name": "encrypt-obj",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": ["encrypt-keys"],
},
output_dir=None,
session_factory=session_factory,
)
p.run()
post_info = client.head_object(Bucket=bname, Key="hello")
self.assertTrue("ServerSideEncryption" in post_info)
self.assertEqual(post_info["Metadata"], {"planet": "earth"})
# etags on multipart do not reflect md5 :-(
self.assertTrue(info["ContentLength"], post_info["ContentLength"])
def test_self_log(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_logging", "Logging", None, "LoggingEnabled")],
)
session_factory = self.replay_flight_data("test_s3_self_log_target")
session = session_factory()
client = session.client("s3")
bname = "custodian-log-test"
client.create_bucket(Bucket=bname)
self.addCleanup(client.delete_bucket, Bucket=bname)
client.put_bucket_acl(
Bucket=bname,
AccessControlPolicy={
"Owner": {
"DisplayName": "k_vertigo",
"ID": "904fc4c4790937100e9eb293a15e6a0a1f265a064888055b43d030034f8881ee",
},
"Grants": [
{
"Grantee": {
"Type": "Group",
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
},
"Permission": "WRITE",
},
{
"Grantee": {
"Type": "Group",
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
},
"Permission": "READ_ACP",
},
],
},
)
client.put_bucket_logging(
Bucket=bname,
BucketLoggingStatus={
"LoggingEnabled": {"TargetBucket": bname, "TargetPrefix": "s3-logs/"}
},
)
p = self.load_policy(
{
"name": "s3-log-targets",
"resource": "s3",
"filters": [{"Name": bname}, {"type": "is-log-target", "self": True}],
},
session_factory=session_factory,
)
resources = p.run()
names = [b["Name"] for b in resources]
self.assertEqual(names[0], bname)
self.assertEqual(len(names), 1)
def test_log_target(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_logging", "Logging", None, "LoggingEnabled")],
)
session_factory = self.replay_flight_data("test_s3_log_target")
session = session_factory()
client = session.client("s3")
bname = "custodian-log-test"
client.create_bucket(Bucket="custodian-log-test")
self.addCleanup(client.delete_bucket, Bucket=bname)
client.put_bucket_acl(
Bucket=bname,
AccessControlPolicy={
"Owner": {
"DisplayName": "k_vertigo",
"ID": "904fc4c4790937100e9eb293a15e6a0a1f265a064888055b43d030034f8881ee",
},
"Grants": [
{
"Grantee": {
"Type": "Group",
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
},
"Permission": "WRITE",
},
{
"Grantee": {
"Type": "Group",
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
},
"Permission": "READ_ACP",
},
],
},
)
client.put_bucket_logging(
Bucket=bname,
BucketLoggingStatus={
"LoggingEnabled": {"TargetBucket": bname, "TargetPrefix": "s3-logs/"}
},
)
p = self.load_policy(
{"name": "s3-log-targets", "resource": "s3", "filters": ["is-log-target"]},
session_factory=session_factory,
)
resources = p.run()
names = [b["Name"] for b in resources]
self.assertTrue(bname in names)
def test_has_statement(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3.MissingPolicyStatementFilter, "executor_factory", MainThreadExecutor
)
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
session_factory = self.replay_flight_data("test_s3_has_statement")
bname = "custodian-policy-test"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Zebra",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::%s/*" % bname,
"Condition": {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": [
"AES256", "aws:kms"
]
}
},
},
{
"Sid": "Zebra2",
"Effect": "Deny",
"Principal": "arn:aws:iam::644160558196:root",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::%s/*" % bname,
},
],
}
),
)
p = self.load_policy(
{
"name": "s3-has-policy",
"resource": "s3",
"filters": [
{"Name": bname},
{"type": "has-statement", "statement_ids": ["Zebra"]},
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_has_statement_policy(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3.MissingPolicyStatementFilter, "executor_factory", MainThreadExecutor
)
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
session_factory = self.replay_flight_data("test_s3_has_statement")
bname = "custodian-policy-test"
p = self.load_policy(
{
"name": "s3-has-policy",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "has-statement",
"statements": [
{
"Effect": "Deny",
"Action": "s3:PutObject",
"Principal": "*",
"Resource": "arn:aws:s3:::{bucket_name}/*"
}
],
},
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_bucket_replication_policy_remove(self):
replicated_from_name = "replication-from-12345"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
# only augment with logging info to minimize API calls
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_replication", 'Replication',
None, None, 's3:GetReplicationConfiguration')],
)
# and ignore any other buckets we might have in this test account
# to minimize the placebo data and API calls
# Inspired by #5206, thanks tjstansell!
self.patch(
s3.S3.resource_type,
"enum_spec",
('list_buckets', "Buckets[?Name=='{}']".format(replicated_from_name), None)
)
session_factory = self.replay_flight_data("test_s3_replication_policy_remove")
session = session_factory()
client = session.client("s3")
p = self.load_policy(
{
"name": "s3-has-replica-policy",
"resource": "s3",
"filters": [
{
"type": "value",
"key": "Replication.ReplicationConfiguration.Rules[].Destination",
"value": "present"
},
{
"type": "value",
"key": "Replication.ReplicationConfiguration.Rules[].Status",
"value": "Enabled",
"op": "contains"
}
],
"actions": [
{
"type": "set-replication",
"state": "remove"
}
]
},
session_factory=session_factory,
)
# Test that there was a bucket with an enabled replication policy
resources = p.run()
self.assertEqual(len(resources), 1)
# Test to make sure that the replication policy removed from the buckets
self.assertRaises(ClientError, client.get_bucket_replication, Bucket=replicated_from_name)
def test_bucket_replication_policy_disable(self):
bname = "repela"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_replication", 'Replication',
None, None, 's3:GetReplicationConfiguration')],
)
self.patch(
s3.S3.resource_type,
"enum_spec",
('list_buckets', "Buckets[?Name=='{}']".format(bname), None)
)
session_factory = self.replay_flight_data("test_s3_replication_policy_disable")
session = session_factory()
client = session.client("s3")
p = self.load_policy(
{
"name": "s3-has-replica-policy",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "value",
"key": "Replication.ReplicationConfiguration.Rules[].Destination",
"value": "present"
},
{
"type": "value",
"key": "Replication.ReplicationConfiguration.Rules[].Status",
"value": "Enabled",
"op": "contains"
}
],
"actions": [
{
"type": "set-replication",
"state": "disable"
}
]
},
session_factory=session_factory,
)
resources = p.run()
# Test that there was a bucket with an enabled replication policy
self.assertEqual(len(resources), 1)
# Test that there is a disbled policy on the bucket now
response = client.get_bucket_replication(Bucket=bname)
for rule in response['ReplicationConfiguration']['Rules']:
self.assertEqual(rule['Status'], 'Disabled')
def test_set_public_block_enable_all(self):
bname = 'mypublicblock'
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_public_block_enable_all")
session = session_factory()
client = session.client("s3")
p = self.load_policy(
{
"name": "CheckForPublicBlocks-Absent",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "check-public-block",
}
],
"actions": [
{
"type": "set-public-block"
}
]
},
session_factory=session_factory,
)
# Test that there was a bucket with missing public blocks
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
# Make sure that all blocks are set to on/enabled now
response = client.get_public_access_block(
Bucket=bname)['PublicAccessBlockConfiguration']
for key in response.keys():
self.assertEqual(response[key], True)
def test_set_public_block_disable_all(self):
bname = 'mypublicblock'
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_public_block_disable_all")
session = session_factory()
client = session.client("s3")
p = self.load_policy(
{
"name": "CheckForPublicBlocks-Present",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "check-public-block",
"BlockPublicAcls": True
}
],
"actions": [
{
"type": "set-public-block",
"BlockPublicAcls": False,
"IgnorePublicAcls": False,
"BlockPublicPolicy": False,
"RestrictPublicBuckets": False
}
]
},
session_factory=session_factory,
)
# Test that there was a bucket with missing public blocks
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
# Make sure that the public blocks are disabled on the buckets
response = client.get_public_access_block(
Bucket=bname)['PublicAccessBlockConfiguration']
for key in response.keys():
self.assertEqual(response[key], False)
def test_set_public_block_disable_all_via_state(self):
bname = 'mypublicblock'
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_public_block_disable_all")
session = session_factory()
client = session.client("s3")
p = self.load_policy(
{
"name": "CheckForPublicBlocks-Present",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "check-public-block",
"BlockPublicAcls": True
}
],
"actions": [
{
"type": "set-public-block",
"state": False
}
]
},
session_factory=session_factory,
)
# Test that there was a bucket with missing public blocks
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
# Make sure that the public blocks are disabled on the buckets
response = client.get_public_access_block(
Bucket=bname)['PublicAccessBlockConfiguration']
for key in response.keys():
self.assertEqual(response[key], False)
def test_set_public_block_enable_one(self):
bname = 'mypublicblock'
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_public_block_enable_one")
session = session_factory()
client = session.client("s3")
p = self.load_policy(
{
"name": "CheckForPublicBlocks-Absent",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "check-public-block",
"BlockPublicPolicy": False
}
],
"actions": [
{
"type": "set-public-block",
"BlockPublicPolicy": True
}
]
},
session_factory=session_factory,
)
# Test that there was a bucket with BlockPublicAcls public block turned off
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
self.assertEqual(resources[0]["c7n:PublicAccessBlock"]["BlockPublicPolicy"], False)
# Make sure that BlockPublicAcls public block turned on now
assert client.get_public_access_block(
Bucket=bname)['PublicAccessBlockConfiguration'] == {
"BlockPublicAcls": False,
"IgnorePublicAcls": False,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": False}
def test_set_public_block_disable_one(self):
bname = 'mypublicblock'
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_public_block_disable_one")
session = session_factory()
client = session.client("s3")
p = self.load_policy(
{
"name": "CheckForPublicBlocks-Absent",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "check-public-block",
"IgnorePublicAcls": True
}
],
"actions": [
{
"type": "set-public-block",
"IgnorePublicAcls": False
}
]
},
session_factory=session_factory,
)
# Test that there was a bucket with the IgnorePublicAcls public block set to on
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
self.assertEqual(resources[0]["c7n:PublicAccessBlock"]["IgnorePublicAcls"], True)
# Make sure that the IgnorePublicAcls public block set to off
assert client.get_public_access_block(
Bucket=bname)['PublicAccessBlockConfiguration'] == {
'BlockPublicAcls': False,
'BlockPublicPolicy': True,
'IgnorePublicAcls': False,
'RestrictPublicBuckets': False}
def test_set_public_block_throws_errors(self):
bname = 'mypublicblock'
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_public_block_throws_errors")
session = session_factory()
client = session.client("s3")
p = self.load_policy(
{
"name": "CheckForPublicBlocks-Absent",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "check-public-block",
"BlockPublicAcls": False,
"IgnorePublicAcls": False,
"BlockPublicPolicy": False,
"RestrictPublicBuckets": False
}
],
"actions": [
{"type": "set-public-block"}
]
},
session_factory=session_factory,
)
# Test that there was a bucket with no public blocks
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
# Because there are no public blocks we will get a client error
# We want this to throw for code cov
try:
client.get_public_access_block(Bucket=bname)['PublicAccessBlockConfiguration']
except ClientError as e:
# Assert that it is the proper error code
self.assertEqual(e.response['Error']['Code'], 'NoSuchPublicAccessBlockConfiguration')
def test_has_statement_similar_policies(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3.MissingPolicyStatementFilter, "executor_factory", MainThreadExecutor
)
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
session_factory = self.replay_flight_data("test_s3_has_statement")
bname = "custodian-policy-test"
p = self.load_policy(
{
"name": "s3-has-policy",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "has-statement",
"statements": [{"Effect": "Deny", "Action": "s3:PutObject"}],
},
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_no_encryption_statement(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3.MissingPolicyStatementFilter, "executor_factory", MainThreadExecutor
)
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
session_factory = self.replay_flight_data("test_s3_no_encryption_statement")
bname = "custodian-encryption-test"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps(
{
"Version": "2017-3-28",
"Statement": [
{
"Sid": "RequiredEncryptedObject",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::%s/*" % bname,
"Condition": {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": [
"AES256", "aws:kms"
]
}
},
}
],
}
),
)
p = self.load_policy(
{
"name": "s3-no-encryption-policy",
"resource": "s3",
"filters": [{"Name": bname}, {"type": "no-encryption-statement"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_missing_policy_statement(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3.MissingPolicyStatementFilter, "executor_factory", MainThreadExecutor
)
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
session_factory = self.replay_flight_data("test_s3_missing_policy")
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Zebra",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::%s/*" % bname,
"Condition": {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": [
"AES256", "aws:kms"
]
}
},
}
],
}
),
)
p = self.load_policy(
{
"name": "encrypt-keys",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"],
},
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_enable_versioning(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_versioning", "Versioning", None, None)],
)
session_factory = self.replay_flight_data("test_s3_enable_versioning")
bname = "superduper-and-magic"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "s3-version",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": ["toggle-versioning"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
# eventual consistency fun for recording
# time.sleep(10)
versioning = client.get_bucket_versioning(Bucket=bname)["Status"]
self.assertEqual("Enabled", versioning)
# running against a bucket with versioning already on
# is idempotent
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy(
{
"name": "s3-version",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "toggle-versioning", "enabled": False}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
# eventual consistency fun for recording
# time.sleep(10)
versioning = client.get_bucket_versioning(Bucket=bname)["Status"]
self.assertEqual("Suspended", versioning)
@functional
def test_enable_logging(self):
bname = "superduper-and-magic"
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
# only augment with logging info to minimize API calls
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_logging", "Logging", None, "LoggingEnabled")],
)
# and ignore any other buckets we might have in this test account
# to minimize the placebo data and API calls
self.patch(
s3.S3.resource_type,
"enum_spec",
('list_buckets', "Buckets[?Name=='{}']".format(bname), None)
)
session_factory = self.replay_flight_data("test_s3_enable_logging")
session = session_factory()
account_name = get_account_alias_from_sts(session)
client = session.client("s3")
client.create_bucket(Bucket=bname, ACL="log-delivery-write")
self.addCleanup(destroyBucket, client, bname)
if self.recording:
time.sleep(5)
acl = client.get_bucket_acl(Bucket=bname)
self.assertEqual(len(acl['Grants']), 3)
p = self.load_policy(
{
"name": "s3-version",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{
"type": "toggle-logging",
"target_bucket": bname,
"target_prefix": "{account}/{source_bucket_region}/{source_bucket_name}/",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
self.assertEqual(
resources[0]["Logging"]["TargetPrefix"],
"{}/{}/{}/".format(account_name, client.meta.region_name, bname)
)
if self.recording:
time.sleep(5)
logging = client.get_bucket_logging(Bucket=bname).get("LoggingEnabled")
self.assertEqual(
logging["TargetPrefix"], "{}/{}".format(account_name, bname)
)
# now override existing setting
p = self.load_policy(
{
"name": "s3-version",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "bucket-logging",
"op": "not-equal",
"target_bucket": bname,
"target_prefix": "{account_id}/{source_bucket_name}/",
}
],
"actions": [
{
"type": "toggle-logging",
"target_bucket": bname,
"target_prefix": "{account_id}/{source_bucket_name}/",
}
],
},
config={'account_id': self.account_id},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
self.assertEqual(
resources[0]["Logging"]["TargetPrefix"], "{}/{}/".format(self.account_id, bname)
)
if self.recording:
time.sleep(5)
logging = client.get_bucket_logging(Bucket=bname).get("LoggingEnabled")
self.assertTrue(logging)
self.assertEqual(
logging["TargetPrefix"], "{}/{}/".format(self.account_id, bname)
)
# Flip the switch
p = self.load_policy(
{
"name": "s3-version",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "toggle-logging", "enabled": False}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Name"], bname)
if self.recording:
time.sleep(20)
logging = client.get_bucket_logging(Bucket=bname).get("LoggingEnabled")
self.assertFalse(logging)
def test_encrypt_policy(self):
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
session_factory = self.replay_flight_data("test_s3_encrypt_policy")
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "encrypt-keys",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": ["encryption-policy"],
},
session_factory=session_factory,
)
p.run()
try:
resource = session.resource("s3")
key = resource.Object(bname, "home.txt")
key.put(Body="hello", ContentLength=5, ContentType="text/plain")
except ClientError as e:
self.assertEqual(e.response["Error"]["Code"], "AccessDenied")
else:
self.fail("Encryption required policy")
def test_remove_policy_none_extant(self):
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_s3_remove_empty_policy")
bname = "custodian-policy-test"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "remove-policy",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{"type": "remove-statements", "statement_ids": ["Zebra", "Moon"]}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertRaises(ClientError, client.get_bucket_policy, Bucket=bname)
def test_remove_policy(self):
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3.RemovePolicyStatement, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_s3_remove_policy")
bname = "custodian-policy-test"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Zebra",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::%s/*" % bname,
"Condition": {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": [
"AES256", "aws:kms"
]
}
},
}
],
}
),
)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "remove-policy",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{"type": "remove-statements", "statement_ids": ["Zebra", "Moon"]}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertRaises(ClientError, client.get_bucket_policy, Bucket=bname)
def test_remove_policy_matched(self):
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3.RemovePolicyStatement, "executor_factory", MainThreadExecutor)
self.patch(MainThreadExecutor, "c7n_async", False)
bname = "custodian-policy-test"
statement = {
"Sid": "Zebra",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::%s/*" % bname,
"Condition": {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": ["AES256", "aws:kms"]
}
},
}
process_buckets = s3.RemovePolicyStatement.process
def enrich(self, buckets):
buckets[0]["CrossAccountViolations"] = [statement]
process_buckets(self, buckets)
self.patch(s3.RemovePolicyStatement, "process", enrich)
session_factory = self.replay_flight_data("test_s3_remove_policy")
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps({"Version": "2012-10-17", "Statement": [statement]}),
)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "remove-policy",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "remove-statements", "statement_ids": "matched"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertRaises(ClientError, client.get_bucket_policy, Bucket=bname)
def test_attach_encrypt_requires_role(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{
"name": "attach-encrypt",
"resource": "s3",
"actions": [{"type": "attach-encrypt"}],
},
)
@skip_if_not_validating
def test_attach_encrypt_accepts_topic(self):
p = self.load_policy(
{
"name": "attach-encrypt",
"resource": "s3",
"actions": [
{"type": "attach-encrypt", "role": "-", "topic": "default"}
],
}
)
self.assertEqual(p.data["actions"][0]["topic"], "default")
def test_create_bucket_event(self):
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_policy", "Policy", None, "Policy")]
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_s3_create")
bname = "custodian-create-bucket-v4"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "bucket-create-v2",
"resource": "s3",
"mode": {
"type": "cloudtrail",
"role": "arn:aws:iam::619193117841:role/CustodianDemoRole",
"events": ["CreateBucket"],
},
"actions": ["encryption-policy"],
},
session_factory=session_factory,
)
p.push(event_data("event-cloud-trail-create-bucket.json"), None)
try:
result = client.get_bucket_policy(Bucket=bname)
except Exception:
self.fail("Could not get bucket policy")
self.assertTrue("Policy" in result)
policy = json.loads(result["Policy"])
self.assertEqual(
policy,
{
u"Statement": [
{
u"Action": u"s3:PutObject",
u"Condition": {
u"StringNotEquals": {
u"s3:x-amz-server-side-encryption": [
u"AES256", u"aws:kms"
]
}
},
u"Effect": u"Deny",
u"Principal": u"*",
u"Resource": u"arn:aws:s3:::custodian-create-bucket-v4/*",
u"Sid": u"RequireEncryptedPutObject",
}
],
u"Version": u"2012-10-17",
},
)
def test_attach_encrypt_via_bucket_notification(self):
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_location", "Location", None, None)]
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data(
"test_s3_attach_encrypt_via_bucket_notification"
)
bname = "custodian-attach-encrypt-test"
role = "arn:aws:iam::644160558196:role/custodian-mu"
self.maxDiff = None
session = session_factory(region="us-west-2")
client = session.client("s3")
client.create_bucket(
Bucket=bname, CreateBucketConfiguration={"LocationConstraint": "us-west-2"}
)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "attach-encrypt",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "attach-encrypt", "role": role}],
},
config=dict(region="us-west-2"),
session_factory=session_factory,
)
self.addCleanup(
LambdaManager(
functools.partial(session_factory, region="us-west-2")
).remove,
s3crypt.get_function(None, role),
)
resources = p.run()
self.assertEqual(len(resources), 1)
# time.sleep(10)
notifications = client.get_bucket_notification_configuration(Bucket=bname)
notifications.pop("ResponseMetadata")
self.assertEqual(
notifications,
{
"LambdaFunctionConfigurations": [
{
"Events": ["s3:ObjectCreated:*"],
"Id": "c7n-s3-encrypt",
"LambdaFunctionArn": (
"arn:aws:lambda:us-west-2:644160558196:function:c7n-s3-encrypt"
),
}
]
},
)
client.put_object(
Bucket=bname,
Key="hello-world.txt",
Body="hello world",
ContentType="text/plain",
)
# time.sleep(30)
info = client.head_object(Bucket=bname, Key="hello-world.txt")
self.assertTrue("ServerSideEncryption" in info)
def test_attach_encrypt_via_new_topic(self):
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_notification_configuration", "Notification", None, None)],
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data(
"test_s3_attach_encrypt_via_new_topic"
)
bname = "custodian-attach-encrypt-test"
role = "arn:aws:iam::644160558196:role/custodian-mu"
self.maxDiff = None
session = session_factory(region="us-east-1")
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "attach-encrypt",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{"type": "attach-encrypt", "role": role, "topic": "default"}
],
},
session_factory=session_factory,
)
self.addCleanup(
LambdaManager(
functools.partial(session_factory, region="us-east-1")
).remove,
s3crypt.get_function(None, role),
)
arn = "arn:aws:sns:us-east-1:644160558196:custodian-attach-encrypt-test"
self.addCleanup(session.client("sns").delete_topic, TopicArn=arn)
self.addCleanup(
session.client("logs").delete_log_group,
logGroupName="/aws/lambda/c7n-s3-encrypt",
)
# Check that the policy sets stuff up properly.
resources = p.run()
self.assertEqual(len(resources), 1)
# time.sleep(10)
topic_notifications = client.get_bucket_notification_configuration(
Bucket=bname
).get(
"TopicConfigurations", []
)
us = [t for t in topic_notifications if t.get("TopicArn") == arn]
self.assertEqual(len(us), 1)
# Check that the stuff behaves properly.
client.put_object(
Bucket=bname,
Key="hello-world.txt",
Body="hello world",
ContentType="text/plain",
)
# time.sleep(30)
info = client.head_object(Bucket=bname, Key="hello-world.txt")
self.assertTrue("ServerSideEncryption" in info)
def test_attach_encrypt_via_implicit_existing_topic(self):
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_notification_configuration", "Notification", None, None)],
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data(
"test_s3_attach_encrypt_via_implicit_existing_topic"
)
bname = "custodian-attach-encrypt-test"
role = "arn:aws:iam::644160558196:role/custodian-mu"
self.maxDiff = None
session = session_factory(region="us-east-1")
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
# Create two sns topics
topic_configs = []
for suffix in (".jpg", ".txt"):
sns = session.client("sns")
existing_topic_arn = sns.create_topic(
Name="existing-{}-{}".format(bname, suffix[1:])
)[
"TopicArn"
]
policy = {
"Statement": [
{
"Action": "SNS:Publish",
"Effect": "Allow",
"Resource": existing_topic_arn,
"Principal": {"Service": "s3.amazonaws.com"},
}
]
}
sns.set_topic_attributes(
TopicArn=existing_topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(policy),
)
self.addCleanup(
session.client("sns").delete_topic, TopicArn=existing_topic_arn
)
topic_configs.append(
{
"TopicArn": existing_topic_arn,
"Events": ["s3:ObjectCreated:*"],
"Filter": {
"Key": {"FilterRules": [{"Name": "suffix", "Value": suffix}]}
},
}
)
session.resource("s3").BucketNotification(bname).put(
NotificationConfiguration={"TopicConfigurations": topic_configs}
)
# Now define the policy.
p = self.load_policy(
{
"name": "attach-encrypt",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{"type": "attach-encrypt", "role": role, "topic": "default"}
],
},
session_factory=session_factory,
)
self.addCleanup(
LambdaManager(
functools.partial(session_factory, region="us-east-1")
).remove,
s3crypt.get_function(None, role),
)
self.addCleanup(
session.client("logs").delete_log_group,
logGroupName="/aws/lambda/c7n-s3-encrypt",
)
# Check that the policy sets stuff up properly.
resources = p.run()
self.assertEqual(len(resources), 1)
# time.sleep(10)
notifies = client.get_bucket_notification_configuration(Bucket=bname).get(
"TopicConfigurations", []
)
existing = [t for t in notifies if "existing" in t["TopicArn"]]
self.assertEqual(len(existing), 2)
# Check that the stuff behaves properly.
client.put_object(
Bucket=bname,
Key="hello-world.txt",
Body="hello world",
ContentType="text/plain",
)
# time.sleep(30)
info = client.head_object(Bucket=bname, Key="hello-world.txt")
self.assertTrue("ServerSideEncryption" in info)
def test_attach_encrypt_via_explicit_existing_topic(self):
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_notification_configuration", "Notification", None, None)],
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data(
"test_s3_attach_encrypt_via_explicit_existing_topic"
)
bname = "custodian-attach-encrypt-test"
role = "arn:aws:iam::644160558196:role/custodian-mu"
self.maxDiff = None
session = session_factory(region="us-east-1")
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
# Create an sns topic
topic_configs = []
sns = session.client("sns")
existing_topic_arn = sns.create_topic(Name="preexisting-{}".format(bname))[
"TopicArn"
]
policy = {
"Statement": [
{
"Action": "SNS:Publish",
"Effect": "Allow",
"Resource": existing_topic_arn,
"Principal": {"Service": "s3.amazonaws.com"},
}
]
}
sns.set_topic_attributes(
TopicArn=existing_topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(policy),
)
self.addCleanup(session.client("sns").delete_topic, TopicArn=existing_topic_arn)
topic_configs.append(
{"TopicArn": existing_topic_arn, "Events": ["s3:ObjectCreated:*"]}
)
session.resource("s3").BucketNotification(bname).put(
NotificationConfiguration={"TopicConfigurations": topic_configs}
)
# Now define the policy.
p = self.load_policy(
{
"name": "attach-encrypt",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{
"type": "attach-encrypt",
"role": role,
"topic": existing_topic_arn,
}
],
},
session_factory=session_factory,
)
self.addCleanup(
LambdaManager(
functools.partial(session_factory, region="us-east-1")
).remove,
s3crypt.get_function(None, role),
)
self.addCleanup(
session.client("logs").delete_log_group,
logGroupName="/aws/lambda/c7n-s3-encrypt",
)
# Check that the policy sets stuff up properly.
resources = p.run()
self.assertEqual(len(resources), 1)
# time.sleep(10)
notifies = client.get_bucket_notification_configuration(Bucket=bname).get(
"TopicConfigurations", []
)
existing = [t for t in notifies if "existing" in t["TopicArn"]]
self.assertEqual(len(existing), 1)
# Check that the stuff behaves properly.
client.put_object(
Bucket=bname,
Key="hello-world.txt",
Body="hello world",
ContentType="text/plain",
)
# time.sleep(30)
info = client.head_object(Bucket=bname, Key="hello-world.txt")
self.assertTrue("ServerSideEncryption" in info)
def test_encrypt_versioned_bucket(self):
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_versioning", "Versioning", None, None)],
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3.EncryptExtantKeys, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_s3_encrypt_versioned")
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
client.put_bucket_versioning(
Bucket=bname, VersioningConfiguration={"Status": "Enabled"}
)
self.addCleanup(destroyVersionedBucket, client, bname)
generateBucketContents(session.resource("s3"), bname)
p = self.load_policy(
{
"name": "encrypt-keys",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": ["encrypt-keys"],
},
output_dir=None,
session_factory=session_factory,
)
p.run()
self.assertTrue(len(client.list_object_versions(Bucket=bname)["Versions"]) == 3)
self.assertTrue(
"ServerSideEncryption" in client.head_object(Bucket=bname, Key="home.txt")
)
@functional
def test_encrypt_versioned_bucket_with_existing_keys(self):
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_versioning", "Versioning", None, None)],
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3.EncryptExtantKeys, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data(
"test_s3_encrypt_versioned_bucket_with_existing_keys"
)
bname = "custodian-encrypt-test-versioning"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
generateBucketContents(
session.resource("s3"), bname, {"data1.txt": "one", "data2.txt": "two"}
)
client.put_bucket_versioning(
Bucket=bname, VersioningConfiguration={"Status": "Enabled"}
)
self.addCleanup(destroyVersionedBucket, client, bname)
generateBucketContents(session.resource("s3"), bname, {"data1.txt": "three"})
p = self.load_policy(
{
"name": "encrypt-keys",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": ["encrypt-keys"],
},
output_dir=None,
session_factory=session_factory,
)
p.run()
self.assertTrue(len(client.list_object_versions(Bucket=bname)["Versions"]) == 2)
self.assertTrue(
"ServerSideEncryption" in client.head_object(Bucket=bname, Key="data1.txt")
)
self.assertTrue(
"ServerSideEncryption" in client.head_object(Bucket=bname, Key="data2.txt")
)
def test_encrypt_key_empty_bucket(self):
self.patch(s3, "S3_AUGMENT_TABLE", [])
self.patch(s3.EncryptExtantKeys, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_s3_encrypt_empty")
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "encrypt-keys",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": ["encrypt-keys"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_encrypt_keys(self):
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_encrypt")
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
generateBucketContents(session.resource("s3"), bname)
# start with a report-only option since it doesn't modify the bucket
report_policy = self.load_policy(
{
"name": "encrypt-keys",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "encrypt-keys", "report-only": True}],
},
output_dir=None,
session_factory=session_factory,
)
report_resources = report_policy.run()
self.assertEqual(report_resources[0]["KeyRemediated"], 3)
p = self.load_policy(
{
"name": "encrypt-keys",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": ["encrypt-keys"],
},
output_dir=None,
session_factory=session_factory,
)
p.run()
self.assertTrue(
"ServerSideEncryption" in client.head_object(Bucket=bname, Key="home.txt")
)
# re-run the report policy after to ensure we have no items
# needing remediation
report_resources = report_policy.run()
self.assertEqual(report_resources[0]["KeyRemediated"], 0)
def test_encrypt_keys_aes256_sufficient(self):
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_encrypt_aes256_sufficient")
bname = "custodian-encrypt-sufficient-test"
session = session_factory()
client = session.client("s3")
kms = session.client("kms")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
key_id = [
k
for k in kms.list_aliases().get("Aliases", ())
if k["AliasName"] == "alias/aws/s3"
][
0
][
"AliasArn"
]
client.put_object(
Bucket=bname,
Key="testing-abc",
ServerSideEncryption="aws:kms",
SSEKMSKeyId=key_id,
)
client.put_object(
Bucket=bname, Key="testing-123", ServerSideEncryption="AES256"
)
p = self.load_policy(
{
"name": "encrypt-keys",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "encrypt-keys"}],
},
output_dir=None,
session_factory=session_factory,
)
p.run()
result = client.head_object(Bucket=bname, Key="testing-123")
self.assertTrue(result["ServerSideEncryption"] == "AES256")
result = client.head_object(Bucket=bname, Key="testing-abc")
self.assertTrue(result["ServerSideEncryption"] == "aws:kms")
data = json.load(
open(os.path.join(p.ctx.output.root_dir, "action-encryptextantkeys"))
)
self.assertEqual([{"Count": 2, "Remediated": 0, "Bucket": bname}], data)
def test_encrypt_keys_key_id_option(self):
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_encrypt_key_id_option")
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
generateBucketContents(session.resource("s3"), bname)
key_one = "845ab6f1-744c-4edc-b702-efae6836818a"
p = self.load_policy(
{
"name": "encrypt-keys",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{"type": "encrypt-keys", "crypto": "aws:kms", "key-id": key_one}
],
},
output_dir=None,
session_factory=session_factory,
)
p.run()
result = client.head_object(Bucket=bname, Key="home.txt")
self.assertTrue("SSEKMSKeyId" in result)
self.assertTrue(key_one in result["SSEKMSKeyId"])
# Now test that we can re-key it to something else
key_two = "5fd9f6d6-4294-4926-8719-1e85695e2ad6"
p = self.load_policy(
{
"name": "encrypt-keys",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{"type": "encrypt-keys", "crypto": "aws:kms", "key-id": key_two}
],
},
output_dir=None,
session_factory=session_factory,
)
p.run()
result = client.head_object(Bucket=bname, Key="home.txt")
self.assertTrue("SSEKMSKeyId" in result)
self.assertTrue(key_two in result["SSEKMSKeyId"])
def test_global_grants_filter_option(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [("get_bucket_acl", "Acl", None, None)])
session_factory = self.replay_flight_data("test_s3_global_grants_filter")
bname = "custodian-testing-grants"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
public = "http://acs.amazonaws.com/groups/global/AllUsers"
client.put_bucket_acl(
Bucket=bname,
AccessControlPolicy={
"Owner": {
"DisplayName": "k_vertigo",
"ID": "904fc4c4790937100e9eb293a15e6a0a1f265a064888055b43d030034f8881ee",
},
"Grants": [
{"Grantee": {"Type": "Group", "URI": public}, "Permission": "WRITE"}
],
},
)
p = self.load_policy(
{
"name": "s3-global-check",
"resource": "s3",
"filters": [
{"Name": "custodian-testing-grants"},
{"type": "global-grants", "permissions": ["READ_ACP"]},
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
p = self.load_policy(
{
"name": "s3-global-check",
"resource": "s3",
"filters": [
{"Name": "custodian-testing-grants"},
{"type": "global-grants", "permissions": ["WRITE"]},
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_global_grants_filter_and_remove(self):
self.patch(s3, "S3_AUGMENT_TABLE", [("get_bucket_acl", "Acl", None, None)])
session_factory = self.replay_flight_data("test_s3_grants")
bname = "custodian-testing-grants"
session = session_factory()
client = session.client("s3")
client.create_bucket(Bucket=bname)
public = "http://acs.amazonaws.com/groups/global/AllUsers"
client.put_bucket_acl(
Bucket=bname,
AccessControlPolicy={
"Owner": {
"DisplayName": "k_vertigo",
"ID": "904fc4c4790937100e9eb293a15e6a0a1f265a064888055b43d030034f8881ee",
},
"Grants": [
{"Grantee": {"Type": "Group", "URI": public}, "Permission": "WRITE"}
],
},
)
p = self.load_policy(
{
"name": "s3-remove-global",
"resource": "s3",
"filters": [
{"Name": "custodian-testing-grants"}, {"type": "global-grants"}
],
"actions": [{"type": "delete-global-grants", "grantees": [public]}],
},
session_factory=session_factory,
)
resources = p.run()
grants = client.get_bucket_acl(Bucket=bname)
client.delete_bucket(Bucket=bname)
self.assertEqual(grants["Grants"], [])
self.assertEqual(resources[0]["Name"], bname)
def test_s3_mark_for_op(self):
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_tagging", "Tags", [], "TagSet")]
)
session_factory = self.replay_flight_data("test_s3_mark_for_op")
session = session_factory()
client = session.client("s3")
bname = "custodian-mark-test"
p = self.load_policy(
{
"name": "s3-mark",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "mark-for-op", "days": 3, "op": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
tags = client.get_bucket_tagging(Bucket=bname)
tag_map = {t["Key"]: t["Value"] for t in tags.get("TagSet", {})}
self.assertTrue("maid_status" in tag_map)
self.assertTrue("delete" in tag_map.get("maid_status"))
def test_s3_remove_tag(self):
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_tagging", "Tags", [], "TagSet")]
)
self.patch(s3.RemoveTag, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_s3_remove_tag")
session = session_factory()
client = session.client("s3")
bname = "custodian-mark-test"
p = self.load_policy(
{
"name": "s3-unmark",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": ["unmark"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
tags = client.get_bucket_tagging(Bucket=bname)
tag_map = {t["Key"]: t["Value"] for t in tags.get("TagSet", {})}
self.assertTrue("maid_status" not in tag_map)
old_tags = {t["Key"]: t["Value"] for t in resources[0]["Tags"]}
self.assertTrue("maid_status" in old_tags)
def test_hosts_website(self):
self.patch(
s3, "S3_AUGMENT_TABLE", [("get_bucket_website", "Website", None, None)]
)
session_factory = self.replay_flight_data("test_s3_hosts_website")
session = session_factory()
client = session.client("s3")
bname = "custodian-static-website-test"
client.create_bucket(Bucket=bname)
client.put_bucket_website(
Bucket=bname,
WebsiteConfiguration={
"ErrorDocument": {"Key": "error.html"},
"IndexDocument": {"Suffix": "index.html"},
},
)
self.addCleanup(client.delete_bucket, Bucket=bname)
p = self.load_policy(
{
"name": "s3-website-hosting",
"resource": "s3",
"filters": [{"Website": "not-null"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
names = [b["Name"] for b in resources]
self.assertTrue(bname in names)
p = self.load_policy(
{
"name": "s3-website-hosting",
"resource": "s3",
"filters": [{"Website": "not-null"}],
"actions": ["remove-website-hosting"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_delete_bucket_notification(self):
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_notification_configuration", "Notification", None, None)],
)
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_s3_delete_bucket_notification")
bname = "custodian-delete-bucket-notification-test"
config_id = "c7n-notify-1"
self.maxDiff = None
session = session_factory(region="us-east-1")
client = session.client("s3")
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "s3-delete-bucket-notification",
"resource": "s3",
"filters": [
{"Name": bname},
{
"type": "bucket-notification",
"kind": "sns",
"key": "Id",
"value": config_id,
"op": "eq",
},
],
"actions": [
{"type": "delete-bucket-notification", "statement_ids": "matched"}
],
},
session_factory=session_factory,
)
topic_arn = session.client("sns").create_topic(Name="bucket-notification-test")[
"TopicArn"
]
self.addCleanup(session.client("sns").delete_topic, TopicArn=topic_arn)
topic_policy = {
"Statement": [
{
"Action": "SNS:Publish",
"Effect": "Allow",
"Resource": topic_arn,
"Principal": {"Service": "s3.amazonaws.com"},
}
]
}
session.client("sns").set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(topic_policy),
)
client.put_bucket_notification_configuration(
Bucket=bname,
NotificationConfiguration={
"TopicConfigurations": [
{
"TopicArn": topic_arn,
"Events": ["s3:ObjectCreated:*"],
"Id": config_id,
},
{
"TopicArn": topic_arn,
"Events": ["s3:ObjectRemoved:*"],
"Id": "another1",
},
]
},
)
resources = p.run()
self.assertEqual(len(resources), 1)
# time.sleep(10)
topic_notifications = client.get_bucket_notification_configuration(
Bucket=bname
).get(
"TopicConfigurations", []
)
us = [t for t in topic_notifications if t.get("TopicArn") == topic_arn]
self.assertEqual(len(us), 1)
def test_enable_bucket_encryption_kms(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data(
"test_s3_enable_bucket_encryption_kms"
)
session = session_factory()
client = session.client("s3")
kms_client = session.client("kms")
bname = "custodian-enable-bucket-encryption-kms"
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
with self.assertRaises(Exception):
response = client.get_bucket_encryption(Bucket=bname)
key = kms_client.list_keys()["Keys"][0]
key_arn = kms_client.describe_key(KeyId=key["KeyId"])["KeyMetadata"]["Arn"]
p = self.load_policy(
{
"name": "s3-enable-bucket-encryption",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{
"type": "set-bucket-encryption",
"key": str(key["KeyId"]),
"crypto": "aws:kms",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(5)
response = client.get_bucket_encryption(Bucket=bname)
rules = response["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]
self.assertEqual(rules["SSEAlgorithm"], "aws:kms")
self.assertEqual(rules["KMSMasterKeyID"], key_arn)
def test_enable_bucket_encryption_kms_alias(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data(
"test_s3_enable_bucket_encryption_kms_alias"
)
session = session_factory()
client = session.client("s3")
kms_client = session.client("kms")
bname = "custodian-enable-bucket-encryption-kms-alias"
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
kms_alias = "alias/some-key"
kms_alias_id = kms_client.describe_key(KeyId=kms_alias)["KeyMetadata"]["Arn"]
p = self.load_policy(
{
"name": "s3-enable-bucket-encryption-alias",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{
"type": "set-bucket-encryption",
"crypto": "aws:kms",
"key": kms_alias,
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(5)
response = client.get_bucket_encryption(Bucket=bname)
rules = response["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]
self.assertEqual(rules["SSEAlgorithm"], "aws:kms")
self.assertEqual(rules["KMSMasterKeyID"], kms_alias_id)
bname = "custodian-enable-bucket-encryption-kms-bad-alias"
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy(
{
"name": "s3-enable-bucket-encryption-bad-alias",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [
{
"type": "set-bucket-encryption",
"crypto": "aws:kms",
"key": "alias/some-nonexistant-alias",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(5)
with self.assertRaises(ClientError):
client.get_bucket_encryption(Bucket=bname)
def test_enable_bucket_encryption_aes256(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data(
"test_s3_enable_bucket_encryption_aes256"
)
session = session_factory()
client = session.client("s3")
bname = "custodian-enable-bucket-encryption-aes256"
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
with self.assertRaises(Exception):
client.get_bucket_encryption(Bucket=bname)
p = self.load_policy(
{
"name": "s3-enable-bucket-encryption",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "set-bucket-encryption"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(5)
response = client.get_bucket_encryption(Bucket=bname)
rules = response["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]
self.assertEqual(rules["SSEAlgorithm"], "AES256")
client.delete_bucket_encryption(Bucket=bname)
if self.recording:
time.sleep(5)
with self.assertRaises(Exception):
client.get_bucket_encryption(Bucket=bname)
p = self.load_policy(
{
"name": "s3-enable-bucket-encryption",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "set-bucket-encryption", "crypto": "AES256"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(5)
response = client.get_bucket_encryption(Bucket=bname)
rules = response["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]
self.assertEqual(rules["SSEAlgorithm"], "AES256")
def test_delete_bucket_encryption(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = self.replay_flight_data("test_s3_delete_bucket_encryption")
session = session_factory()
client = session.client("s3")
bname = "custodian-delete-bucket-encryption-aes256"
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
with self.assertRaises(Exception):
client.get_bucket_encryption(Bucket=bname)
client.put_bucket_encryption(
Bucket=bname,
ServerSideEncryptionConfiguration={
"Rules": [
{"ApplyServerSideEncryptionByDefault": {"SSEAlgorithm": "AES256"}}
]
},
)
p = self.load_policy(
{
"name": "s3-delete-bucket-encryption",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "set-bucket-encryption", "enabled": False}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(5)
with self.assertRaises(Exception):
client.get_bucket_encryption(Bucket=bname)
class S3LifecycleTest(BaseTest):
def test_lifecycle(self):
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(
s3,
"S3_AUGMENT_TABLE",
[("get_bucket_lifecycle_configuration", "Lifecycle", None, None)],
)
session_factory = self.replay_flight_data("test_s3_lifecycle")
session = session_factory()
client = session.client("s3")
bname = "custodian-lifecycle-test"
# Make a bucket
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
buckets = {b["Name"] for b in client.list_buckets()["Buckets"]}
self.assertIn(bname, buckets)
def get_policy(**kwargs):
rule = {
"Status": "Enabled",
"Prefix": "foo/",
"Transitions": [{"Days": 60, "StorageClass": "GLACIER"}],
}
rule.update(**kwargs)
policy = {
"name": "s3-lifecycle",
"resource": "s3",
"filters": [{"Name": bname}],
"actions": [{"type": "configure-lifecycle", "rules": [rule]}],
}
return policy
def run_policy(policy):
p = self.load_policy(policy, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(5)
#
# Add the first lifecycle
#
lifecycle_id1 = "test-lifecycle"
policy = get_policy(ID=lifecycle_id1)
run_policy(policy)
lifecycle = client.get_bucket_lifecycle_configuration(Bucket=bname)
self.assertEqual(lifecycle["Rules"][0]["ID"], lifecycle_id1)
#
# Now add another lifecycle rule to ensure it doesn't clobber the first one
#
lifecycle_id2 = "test-lifecycle-two"
policy = get_policy(ID=lifecycle_id2, Prefix="bar/")
run_policy(policy)
# Verify the lifecycle
lifecycle = client.get_bucket_lifecycle_configuration(Bucket=bname)
self.assertEqual(len(lifecycle["Rules"]), 2)
self.assertSetEqual(
{x["ID"] for x in lifecycle["Rules"]},
{lifecycle_id1, lifecycle_id2},
)
#
# Next, overwrite one of the lifecycles and make sure it changed
#
policy = get_policy(ID=lifecycle_id2, Prefix="baz/")
run_policy(policy)
# Verify the lifecycle
lifecycle = client.get_bucket_lifecycle_configuration(Bucket=bname)
self.assertEqual(len(lifecycle["Rules"]), 2)
self.assertSetEqual(
{x["ID"] for x in lifecycle["Rules"]},
{lifecycle_id1, lifecycle_id2},
)
for rule in lifecycle["Rules"]:
if rule["ID"] == lifecycle_id2:
self.assertEqual(rule["Prefix"], "baz/")
#
# Test deleting a lifecycle
#
policy = get_policy(ID=lifecycle_id1, Status="absent")
run_policy(policy)
lifecycle = client.get_bucket_lifecycle_configuration(Bucket=bname)
self.assertEqual(len(lifecycle["Rules"]), 1)
self.assertEqual(lifecycle["Rules"][0]["ID"], lifecycle_id2)
@terraform('aws_s3_encryption_audit')
def test_s3_encryption_audit(test, aws_s3_encryption_audit):
test.patch(s3.S3, "executor_factory", MainThreadExecutor)
test.patch(s3.BucketEncryption, "executor_factory", MainThreadExecutor)
test.patch(s3, "S3_AUGMENT_TABLE", [])
session_factory = test.replay_flight_data("test_s3_encryption_audit")
p = test.load_policy(
{
"name": "s3-audit",
"resource": "s3",
"filters": [
{
"or": [
{
"type": "bucket-encryption",
"state": False,
},
{
"type": "bucket-encryption",
"crypto": "aws:kms",
"state": True,
},
{
"type": "bucket-encryption",
"crypto": "AES256",
"state": True,
},
]
},
],
},
session_factory=session_factory,
)
resources = p.run()
assert len(resources) == 3
expected_names = [
'c7n-aws-s3-encryption-audit-test-a',
'c7n-aws-s3-encryption-audit-test-b',
'c7n-aws-s3-encryption-audit-test-c',
]
actual_names = sorted([r.get('Name') for r in resources])
assert actual_names == expected_names
@terraform('s3_ownership', scope='class')
class TestBucketOwnership:
def test_s3_ownership_empty(self, test, s3_ownership):
test.patch(s3.S3, "executor_factory", MainThreadExecutor)
test.patch(s3.BucketOwnershipControls, "executor_factory", MainThreadExecutor)
test.patch(
s3, "S3_AUGMENT_TABLE", []
)
session_factory = test.replay_flight_data("test_s3_ownership_empty")
bucket_name = s3_ownership['aws_s3_bucket.no_ownership_controls.bucket']
p = test.load_policy(
{
"name": "s3-ownership-empty",
"resource": "s3",
"filters": [
{"type": "value",
"op": "glob",
"key": "Name",
"value": "c7ntest*"},
{"type": "ownership",
"value": "empty"},
],
},
session_factory=session_factory,
)
resources = p.run()
assert len(resources) == 1
assert resources[0]["Name"] == bucket_name
def test_s3_ownership_defined(self, test, s3_ownership):
test.patch(s3.S3, "executor_factory", MainThreadExecutor)
test.patch(s3.BucketOwnershipControls, "executor_factory", MainThreadExecutor)
test.patch(
s3, "S3_AUGMENT_TABLE", []
)
session_factory = test.replay_flight_data("test_s3_ownership_defined")
bucket_names = {s3_ownership[f'aws_s3_bucket.{r}.bucket']
for r in ('owner_preferred', 'owner_enforced')}
p = test.load_policy(
{
"name": "s3-ownership-defined",
"resource": "s3",
"filters": [
{"type": "value",
"op": "glob",
"key": "Name",
"value": "c7ntest*"},
{"type": "ownership",
"op": "in",
"value": ["BucketOwnerPreferred", "BucketOwnerEnforced"]},
],
},
session_factory=session_factory,
)
resources = p.run()
assert len(resources) == 2
assert {r["Name"] for r in resources} == bucket_names
| 35.932647 | 99 | 0.502922 |
64633dc6d86239b5c5b82889229d40323f6f4018 | 31,339 | py | Python | lazypredict/Supervised.py | dataprofessor/lazypredict | 563a4982547f0a5fba399b0920c30e9d8dc2ed32 | [
"MIT"
] | 6 | 2021-01-16T17:11:39.000Z | 2021-09-28T11:22:28.000Z | lazypredict/Supervised.py | dataprofessor/lazypredict | 563a4982547f0a5fba399b0920c30e9d8dc2ed32 | [
"MIT"
] | null | null | null | lazypredict/Supervised.py | dataprofessor/lazypredict | 563a4982547f0a5fba399b0920c30e9d8dc2ed32 | [
"MIT"
] | 2 | 2021-01-29T11:11:17.000Z | 2021-09-28T11:22:29.000Z | """
Supervised Models
"""
# Author: Shankar Rao Pandala <shankar.pandala@live.com>
import numpy as np
import pandas as pd
from tqdm import tqdm
import datetime
import time
import sklearn
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.utils.testing import all_estimators
from sklearn.base import RegressorMixin
from sklearn.base import ClassifierMixin
from sklearn.metrics import (
accuracy_score,
balanced_accuracy_score,
roc_auc_score,
f1_score,
r2_score,
mean_squared_error,
)
import warnings
import xgboost
# import catboost
import lightgbm
warnings.filterwarnings("ignore")
pd.set_option("display.precision", 2)
pd.set_option("display.float_format", lambda x: "%.2f" % x)
CLASSIFIERS = [est for est in all_estimators() if issubclass(est[1], ClassifierMixin)]
REGRESSORS = [est for est in all_estimators() if issubclass(est[1], RegressorMixin)]
removed_classifiers = [
("CheckingClassifier", sklearn.utils._mocking.CheckingClassifier),
("ClassifierChain", sklearn.multioutput.ClassifierChain),
("ComplementNB", sklearn.naive_bayes.ComplementNB),
(
"GradientBoostingClassifier",
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier,
),
(
"GaussianProcessClassifier",
sklearn.gaussian_process.gpc.GaussianProcessClassifier,
),
(
"HistGradientBoostingClassifier",
sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier,
),
("MLPClassifier", sklearn.neural_network.multilayer_perceptron.MLPClassifier),
("LogisticRegressionCV", sklearn.linear_model.logistic.LogisticRegressionCV),
("MultiOutputClassifier", sklearn.multioutput.MultiOutputClassifier),
("MultinomialNB", sklearn.naive_bayes.MultinomialNB),
("OneVsOneClassifier", sklearn.multiclass.OneVsOneClassifier),
("OneVsRestClassifier", sklearn.multiclass.OneVsRestClassifier),
("OutputCodeClassifier", sklearn.multiclass.OutputCodeClassifier),
(
"RadiusNeighborsClassifier",
sklearn.neighbors.classification.RadiusNeighborsClassifier,
),
("VotingClassifier", sklearn.ensemble.voting.VotingClassifier),
]
removed_regressors = [
("TheilSenRegressor", sklearn.linear_model.theil_sen.TheilSenRegressor),
("ARDRegression", sklearn.linear_model.ARDRegression),
("CCA", sklearn.cross_decomposition.CCA),
("IsotonicRegression", sklearn.isotonic.IsotonicRegression),
("StackingRegressor",sklearn.ensemble.StackingRegressor),
("MultiOutputRegressor", sklearn.multioutput.MultiOutputRegressor),
("MultiTaskElasticNet", sklearn.linear_model.MultiTaskElasticNet),
("MultiTaskElasticNetCV", sklearn.linear_model.MultiTaskElasticNetCV),
("MultiTaskLasso", sklearn.linear_model.MultiTaskLasso),
("MultiTaskLassoCV", sklearn.linear_model.MultiTaskLassoCV),
("PLSCanonical", sklearn.cross_decomposition.PLSCanonical),
("PLSRegression", sklearn.cross_decomposition.PLSRegression),
("RadiusNeighborsRegressor", sklearn.neighbors.RadiusNeighborsRegressor),
("RegressorChain", sklearn.multioutput.RegressorChain),
("VotingRegressor", sklearn.ensemble.VotingRegressor),
("_SigmoidCalibration", sklearn.calibration._SigmoidCalibration),
]
for i in removed_regressors:
REGRESSORS.pop(REGRESSORS.index(i))
for i in removed_classifiers:
CLASSIFIERS.pop(CLASSIFIERS.index(i))
REGRESSORS.append(("XGBRegressor", xgboost.XGBRegressor))
REGRESSORS.append(("LGBMRegressor", lightgbm.LGBMRegressor))
# REGRESSORS.append(('CatBoostRegressor',catboost.CatBoostRegressor))
CLASSIFIERS.append(("XGBClassifier", xgboost.XGBClassifier))
CLASSIFIERS.append(("LGBMClassifier", lightgbm.LGBMClassifier))
# CLASSIFIERS.append(('CatBoostClassifier',catboost.CatBoostClassifier))
numeric_transformer = Pipeline(
steps=[("imputer", SimpleImputer(strategy="mean")), ("scaler", StandardScaler())]
)
categorical_transformer_low = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
("encoding", OneHotEncoder(handle_unknown="ignore", sparse=False)),
]
)
categorical_transformer_high = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
# 'OrdianlEncoder' Raise a ValueError when encounters an unknown value. Check https://github.com/scikit-learn/scikit-learn/pull/13423
("encoding", OrdinalEncoder()),
]
)
# Helper function
def get_card_split(df, cols, n=11):
"""
Splits categorical columns into 2 lists based on cardinality (i.e # of unique values)
Parameters
----------
df : Pandas DataFrame
DataFrame from which the cardinality of the columns is calculated.
cols : list-like
Categorical columns to list
n : int, optional (default=11)
The value of 'n' will be used to split columns.
Returns
-------
card_low : list-like
Columns with cardinality < n
card_high : list-like
Columns with cardinality >= n
"""
cond = df[cols].nunique() > n
card_high = cols[cond]
card_low = cols[~cond]
return card_low, card_high
# Helper class for performing classification
class LazyClassifier:
"""
This module helps in fitting to all the classification algorithms that are available in Scikit-learn
Parameters
----------
verbose : int, optional (default=0)
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
ignore_warnings : bool, optional (default=True)
When set to True, the warning related to algorigms that are not able to run are ignored.
custom_metric : function, optional (default=None)
When function is provided, models are evaluated based on the custom evaluation metric provided.
prediction : bool, optional (default=False)
When set to True, the predictions of all the models models are returned as dataframe.
classifiers : list, optional (default="all")
When function is provided, trains the chosen classifier(s).
Examples
--------
>>> from lazypredict.Supervised import LazyClassifier
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.model_selection import train_test_split
>>> data = load_breast_cancer()
>>> X = data.data
>>> y= data.target
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=.5,random_state =123)
>>> clf = LazyClassifier(verbose=0,ignore_warnings=True, custom_metric=None)
>>> models,predictions = clf.fit(X_train, X_test, y_train, y_test)
>>> model_dictionary = clf.provide_models(X_train,X_test,y_train,y_test)
>>> models
| Model | Accuracy | Balanced Accuracy | ROC AUC | F1 Score | Time Taken |
|:-------------------------------|-----------:|--------------------:|----------:|-----------:|-------------:|
| LinearSVC | 0.989474 | 0.987544 | 0.987544 | 0.989462 | 0.0150008 |
| SGDClassifier | 0.989474 | 0.987544 | 0.987544 | 0.989462 | 0.0109992 |
| MLPClassifier | 0.985965 | 0.986904 | 0.986904 | 0.985994 | 0.426 |
| Perceptron | 0.985965 | 0.984797 | 0.984797 | 0.985965 | 0.0120046 |
| LogisticRegression | 0.985965 | 0.98269 | 0.98269 | 0.985934 | 0.0200036 |
| LogisticRegressionCV | 0.985965 | 0.98269 | 0.98269 | 0.985934 | 0.262997 |
| SVC | 0.982456 | 0.979942 | 0.979942 | 0.982437 | 0.0140011 |
| CalibratedClassifierCV | 0.982456 | 0.975728 | 0.975728 | 0.982357 | 0.0350015 |
| PassiveAggressiveClassifier | 0.975439 | 0.974448 | 0.974448 | 0.975464 | 0.0130005 |
| LabelPropagation | 0.975439 | 0.974448 | 0.974448 | 0.975464 | 0.0429988 |
| LabelSpreading | 0.975439 | 0.974448 | 0.974448 | 0.975464 | 0.0310006 |
| RandomForestClassifier | 0.97193 | 0.969594 | 0.969594 | 0.97193 | 0.033 |
| GradientBoostingClassifier | 0.97193 | 0.967486 | 0.967486 | 0.971869 | 0.166998 |
| QuadraticDiscriminantAnalysis | 0.964912 | 0.966206 | 0.966206 | 0.965052 | 0.0119994 |
| HistGradientBoostingClassifier | 0.968421 | 0.964739 | 0.964739 | 0.968387 | 0.682003 |
| RidgeClassifierCV | 0.97193 | 0.963272 | 0.963272 | 0.971736 | 0.0130029 |
| RidgeClassifier | 0.968421 | 0.960525 | 0.960525 | 0.968242 | 0.0119977 |
| AdaBoostClassifier | 0.961404 | 0.959245 | 0.959245 | 0.961444 | 0.204998 |
| ExtraTreesClassifier | 0.961404 | 0.957138 | 0.957138 | 0.961362 | 0.0270066 |
| KNeighborsClassifier | 0.961404 | 0.95503 | 0.95503 | 0.961276 | 0.0560005 |
| BaggingClassifier | 0.947368 | 0.954577 | 0.954577 | 0.947882 | 0.0559971 |
| BernoulliNB | 0.950877 | 0.951003 | 0.951003 | 0.951072 | 0.0169988 |
| LinearDiscriminantAnalysis | 0.961404 | 0.950816 | 0.950816 | 0.961089 | 0.0199995 |
| GaussianNB | 0.954386 | 0.949536 | 0.949536 | 0.954337 | 0.0139935 |
| NuSVC | 0.954386 | 0.943215 | 0.943215 | 0.954014 | 0.019989 |
| DecisionTreeClassifier | 0.936842 | 0.933693 | 0.933693 | 0.936971 | 0.0170023 |
| NearestCentroid | 0.947368 | 0.933506 | 0.933506 | 0.946801 | 0.0160074 |
| ExtraTreeClassifier | 0.922807 | 0.912168 | 0.912168 | 0.922462 | 0.0109999 |
| CheckingClassifier | 0.361404 | 0.5 | 0.5 | 0.191879 | 0.0170043 |
| DummyClassifier | 0.512281 | 0.489598 | 0.489598 | 0.518924 | 0.0119965 |
"""
def __init__(
self,
verbose=0,
ignore_warnings=True,
custom_metric=None,
predictions=False,
random_state=42,
classifiers = "all"
):
self.verbose = verbose
self.ignore_warnings = ignore_warnings
self.custom_metric = custom_metric
self.predictions = predictions
self.models = {}
self.random_state = random_state
self.classifiers = classifiers
def fit(self, X_train, X_test, y_train, y_test):
"""Fit Classification algorithms to X_train and y_train, predict and score on X_test, y_test.
Parameters
----------
X_train : array-like,
Training vectors, where rows is the number of samples
and columns is the number of features.
X_test : array-like,
Testing vectors, where rows is the number of samples
and columns is the number of features.
y_train : array-like,
Training vectors, where rows is the number of samples
and columns is the number of features.
y_test : array-like,
Testing vectors, where rows is the number of samples
and columns is the number of features.
Returns
-------
scores : Pandas DataFrame
Returns metrics of all the models in a Pandas DataFrame.
predictions : Pandas DataFrame
Returns predictions of all the models in a Pandas DataFrame.
"""
Accuracy = []
B_Accuracy = []
ROC_AUC = []
F1 = []
names = []
TIME = []
predictions = {}
if self.custom_metric is not None:
CUSTOM_METRIC = []
if isinstance(X_train, np.ndarray):
X_train = pd.DataFrame(X_train)
X_test = pd.DataFrame(X_test)
numeric_features = X_train.select_dtypes(include=[np.number]).columns
categorical_features = X_train.select_dtypes(include=["object"]).columns
categorical_low, categorical_high = get_card_split(
X_train, categorical_features
)
preprocessor = ColumnTransformer(
transformers=[
("numeric", numeric_transformer, numeric_features),
("categorical_low", categorical_transformer_low, categorical_low),
("categorical_high", categorical_transformer_high, categorical_high),
]
)
if self.classifiers == "all":
self.classifiers = CLASSIFIERS
else:
try:
temp_list = []
for classifier in self.classifiers:
full_name = (classifier.__class__.__name__, classifier)
temp_list.append(full_name)
self.classifiers = temp_list
except Exception as exception:
print(exception)
print("Invalid Classifier(s)")
for name, model in tqdm(self.classifiers):
start = time.time()
try:
if "random_state" in model().get_params().keys():
pipe = Pipeline(
steps=[
("preprocessor", preprocessor),
("classifier", model(random_state=self.random_state)),
]
)
else:
pipe = Pipeline(
steps=[("preprocessor", preprocessor), ("classifier", model())]
)
pipe.fit(X_train, y_train)
self.models[name] = pipe
y_pred = pipe.predict(X_test)
accuracy = accuracy_score(y_test, y_pred, normalize=True)
b_accuracy = balanced_accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred, average="weighted")
try:
roc_auc = roc_auc_score(y_test, y_pred)
except Exception as exception:
roc_auc = None
if self.ignore_warnings is False:
print("ROC AUC couldn't be calculated for " + name)
print(exception)
names.append(name)
Accuracy.append(accuracy)
B_Accuracy.append(b_accuracy)
ROC_AUC.append(roc_auc)
F1.append(f1)
TIME.append(time.time() - start)
if self.custom_metric is not None:
custom_metric = self.custom_metric(y_test, y_pred)
CUSTOM_METRIC.append(custom_metric)
if self.verbose > 0:
if self.custom_metric is not None:
print(
{
"Model": name,
"Accuracy": accuracy,
"Balanced Accuracy": b_accuracy,
"ROC AUC": roc_auc,
"F1 Score": f1,
self.custom_metric.__name__: custom_metric,
"Time taken": time.time() - start,
}
)
else:
print(
{
"Model": name,
"Accuracy": accuracy,
"Balanced Accuracy": b_accuracy,
"ROC AUC": roc_auc,
"F1 Score": f1,
"Time taken": time.time() - start,
}
)
if self.predictions:
predictions[name] = y_pred
except Exception as exception:
if self.ignore_warnings is False:
print(name + " model failed to execute")
print(exception)
if self.custom_metric is None:
scores = pd.DataFrame(
{
"Model": names,
"Accuracy": Accuracy,
"Balanced Accuracy": B_Accuracy,
"ROC AUC": ROC_AUC,
"F1 Score": F1,
"Time Taken": TIME,
}
)
else:
scores = pd.DataFrame(
{
"Model": names,
"Accuracy": Accuracy,
"Balanced Accuracy": B_Accuracy,
"ROC AUC": ROC_AUC,
"F1 Score": F1,
self.custom_metric.__name__: CUSTOM_METRIC,
"Time Taken": TIME,
}
)
scores = scores.sort_values(by="Balanced Accuracy", ascending=False).set_index(
"Model"
)
if self.predictions:
predictions_df = pd.DataFrame.from_dict(predictions)
return scores, predictions_df if self.predictions is True else scores
def provide_models(self, X_train, X_test, y_train, y_test):
"""
This function returns all the model objects trained in fit function.
If fit is not called already, then we call fit and then return the models.
Parameters
----------
X_train : array-like,
Training vectors, where rows is the number of samples
and columns is the number of features.
X_test : array-like,
Testing vectors, where rows is the number of samples
and columns is the number of features.
y_train : array-like,
Training vectors, where rows is the number of samples
and columns is the number of features.
y_test : array-like,
Testing vectors, where rows is the number of samples
and columns is the number of features.
Returns
-------
models: dict-object,
Returns a dictionary with each model pipeline as value
with key as name of models.
"""
if len(self.models.keys()) == 0:
self.fit(X_train,X_test,y_train,y_test)
return self.models
def adjusted_rsquared(r2, n, p):
return 1 - (1-r2) * ((n-1) / (n-p-1))
# Helper class for performing classification
class LazyRegressor:
"""
This module helps in fitting regression models that are available in Scikit-learn
Parameters
----------
verbose : int, optional (default=0)
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
ignore_warnings : bool, optional (default=True)
When set to True, the warning related to algorigms that are not able to run are ignored.
custom_metric : function, optional (default=None)
When function is provided, models are evaluated based on the custom evaluation metric provided.
prediction : bool, optional (default=False)
When set to True, the predictions of all the models models are returned as dataframe.
regressors : list, optional (default="all")
When function is provided, trains the chosen regressor(s).
Examples
--------
>>> from lazypredict.Supervised import LazyRegressor
>>> from sklearn import datasets
>>> from sklearn.utils import shuffle
>>> import numpy as np
>>> boston = datasets.load_boston()
>>> X, y = shuffle(boston.data, boston.target, random_state=13)
>>> X = X.astype(np.float32)
>>> offset = int(X.shape[0] * 0.9)
>>> X_train, y_train = X[:offset], y[:offset]
>>> X_test, y_test = X[offset:], y[offset:]
>>> reg = LazyRegressor(verbose=0, ignore_warnings=False, custom_metric=None)
>>> models, predictions = reg.fit(X_train, X_test, y_train, y_test)
>>> model_dictionary = reg.provide_models(X_train, X_test, y_train, y_test)
>>> models
| Model | Adjusted R-Squared | R-Squared | RMSE | Time Taken |
|:------------------------------|-------------------:|----------:|------:|-----------:|
| SVR | 0.83 | 0.88 | 2.62 | 0.01 |
| BaggingRegressor | 0.83 | 0.88 | 2.63 | 0.03 |
| NuSVR | 0.82 | 0.86 | 2.76 | 0.03 |
| RandomForestRegressor | 0.81 | 0.86 | 2.78 | 0.21 |
| XGBRegressor | 0.81 | 0.86 | 2.79 | 0.06 |
| GradientBoostingRegressor | 0.81 | 0.86 | 2.84 | 0.11 |
| ExtraTreesRegressor | 0.79 | 0.84 | 2.98 | 0.12 |
| AdaBoostRegressor | 0.78 | 0.83 | 3.04 | 0.07 |
| HistGradientBoostingRegressor | 0.77 | 0.83 | 3.06 | 0.17 |
| PoissonRegressor | 0.77 | 0.83 | 3.11 | 0.01 |
| LGBMRegressor | 0.77 | 0.83 | 3.11 | 0.07 |
| KNeighborsRegressor | 0.77 | 0.83 | 3.12 | 0.01 |
| DecisionTreeRegressor | 0.65 | 0.74 | 3.79 | 0.01 |
| MLPRegressor | 0.65 | 0.74 | 3.80 | 1.63 |
| HuberRegressor | 0.64 | 0.74 | 3.84 | 0.01 |
| GammaRegressor | 0.64 | 0.73 | 3.88 | 0.01 |
| LinearSVR | 0.62 | 0.72 | 3.96 | 0.01 |
| RidgeCV | 0.62 | 0.72 | 3.97 | 0.01 |
| BayesianRidge | 0.62 | 0.72 | 3.97 | 0.01 |
| Ridge | 0.62 | 0.72 | 3.97 | 0.01 |
| TransformedTargetRegressor | 0.62 | 0.72 | 3.97 | 0.01 |
| LinearRegression | 0.62 | 0.72 | 3.97 | 0.01 |
| ElasticNetCV | 0.62 | 0.72 | 3.98 | 0.04 |
| LassoCV | 0.62 | 0.72 | 3.98 | 0.06 |
| LassoLarsIC | 0.62 | 0.72 | 3.98 | 0.01 |
| LassoLarsCV | 0.62 | 0.72 | 3.98 | 0.02 |
| Lars | 0.61 | 0.72 | 3.99 | 0.01 |
| LarsCV | 0.61 | 0.71 | 4.02 | 0.04 |
| SGDRegressor | 0.60 | 0.70 | 4.07 | 0.01 |
| TweedieRegressor | 0.59 | 0.70 | 4.12 | 0.01 |
| GeneralizedLinearRegressor | 0.59 | 0.70 | 4.12 | 0.01 |
| ElasticNet | 0.58 | 0.69 | 4.16 | 0.01 |
| Lasso | 0.54 | 0.66 | 4.35 | 0.02 |
| RANSACRegressor | 0.53 | 0.65 | 4.41 | 0.04 |
| OrthogonalMatchingPursuitCV | 0.45 | 0.59 | 4.78 | 0.02 |
| PassiveAggressiveRegressor | 0.37 | 0.54 | 5.09 | 0.01 |
| GaussianProcessRegressor | 0.23 | 0.43 | 5.65 | 0.03 |
| OrthogonalMatchingPursuit | 0.16 | 0.38 | 5.89 | 0.01 |
| ExtraTreeRegressor | 0.08 | 0.32 | 6.17 | 0.01 |
| DummyRegressor | -0.38 | -0.02 | 7.56 | 0.01 |
| LassoLars | -0.38 | -0.02 | 7.56 | 0.01 |
| KernelRidge | -11.50 | -8.25 | 22.74 | 0.01 |
"""
def __init__(
self,
verbose=0,
ignore_warnings=True,
custom_metric=None,
predictions=False,
random_state=42,
regressors="all",
):
self.verbose = verbose
self.ignore_warnings = ignore_warnings
self.custom_metric = custom_metric
self.predictions = predictions
self.models = {}
self.random_state = random_state
self.regressors = regressors
def fit(self, X_train, X_test, y_train, y_test):
"""Fit Regression algorithms to X_train and y_train, predict and score on X_test, y_test.
Parameters
----------
X_train : array-like,
Training vectors, where rows is the number of samples
and columns is the number of features.
X_test : array-like,
Testing vectors, where rows is the number of samples
and columns is the number of features.
y_train : array-like,
Training vectors, where rows is the number of samples
and columns is the number of features.
y_test : array-like,
Testing vectors, where rows is the number of samples
and columns is the number of features.
Returns
-------
scores : Pandas DataFrame
Returns metrics of all the models in a Pandas DataFrame.
predictions : Pandas DataFrame
Returns predictions of all the models in a Pandas DataFrame.
"""
R2 = []
ADJR2 = []
RMSE = []
# WIN = []
names = []
TIME = []
predictions = {}
if self.custom_metric:
CUSTOM_METRIC = []
if isinstance(X_train, np.ndarray):
X_train = pd.DataFrame(X_train)
X_test = pd.DataFrame(X_test)
numeric_features = X_train.select_dtypes(include=[np.number]).columns
categorical_features = X_train.select_dtypes(include=["object"]).columns
categorical_low, categorical_high = get_card_split(
X_train, categorical_features
)
preprocessor = ColumnTransformer(
transformers=[
("numeric", numeric_transformer, numeric_features),
("categorical_low", categorical_transformer_low, categorical_low),
("categorical_high", categorical_transformer_high, categorical_high),
]
)
if self.regressors == "all":
self.regressors = REGRESSORS
else:
try:
temp_list = []
for regressor in self.regressors:
full_name = (regressor.__class__.__name__, regressor)
temp_list.append(full_name)
self.regressors = temp_list
except Exception as exception:
print(exception)
print("Invalid Regressor(s)")
for name, model in tqdm(self.regressors):
start = time.time()
try:
if "random_state" in model().get_params().keys():
pipe = Pipeline(
steps=[
("preprocessor", preprocessor),
("regressor", model(random_state=self.random_state)),
]
)
else:
pipe = Pipeline(
steps=[("preprocessor", preprocessor), ("regressor", model())]
)
pipe.fit(X_train, y_train)
self.models[name] = pipe
y_pred = pipe.predict(X_test)
r_squared = r2_score(y_test, y_pred)
adj_rsquared = adjusted_rsquared(r_squared, X_test.shape[0], X_test.shape[1])
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
names.append(name)
R2.append(r_squared)
ADJR2.append(adj_rsquared)
RMSE.append(rmse)
TIME.append(time.time() - start)
if self.custom_metric:
custom_metric = self.custom_metric(y_test, y_pred)
CUSTOM_METRIC.append(custom_metric)
if self.verbose > 0:
scores_verbose = {
"Model": name,
"R-Squared": r_squared,
"Adjusted R-Squared": adj_rsquared,
"RMSE": rmse,
"Time taken": time.time() - start,
}
if self.custom_metric:
scores_verbose[self.custom_metric.__name__] = custom_metric
print(scores_verbose)
if self.predictions:
predictions[name] = y_pred
except Exception as exception:
if self.ignore_warnings is False:
print(name + " model failed to execute")
print(exception)
scores = {
"Model": names,
"Adjusted R-Squared": ADJR2,
"R-Squared": R2,
"RMSE": RMSE,
"Time Taken": TIME
}
if self.custom_metric:
scores[self.custom_metric.__name__] = CUSTOM_METRIC
scores = pd.DataFrame(scores)
scores = scores.sort_values(by="Adjusted R-Squared", ascending=False).set_index("Model")
if self.predictions:
predictions_df = pd.DataFrame.from_dict(predictions)
return scores, predictions_df if self.predictions is True else scores
def provide_models(self, X_train, X_test, y_train, y_test):
"""
This function returns all the model objects trained in fit function.
If fit is not called already, then we call fit and then return the models.
Parameters
----------
X_train : array-like,
Training vectors, where rows is the number of samples
and columns is the number of features.
X_test : array-like,
Testing vectors, where rows is the number of samples
and columns is the number of features.
y_train : array-like,
Training vectors, where rows is the number of samples
and columns is the number of features.
y_test : array-like,
Testing vectors, where rows is the number of samples
and columns is the number of features.
Returns
-------
models: dict-object,
Returns a dictionary with each model pipeline as value
with key as name of models.
"""
if len(self.models.keys()) == 0:
self.fit(X_train,X_test,y_train,y_test)
return self.models
Regression = LazyRegressor
Classification = LazyClassifier
| 44.64245 | 141 | 0.535371 |
57a23e28061d1bd231cafed7f1e1bf33d964fd10 | 19,483 | py | Python | async_http/protocol.py | denismakogon/async-http | 508e3d694598708d1abae9f50aac18b7b84bd523 | [
"Apache-2.0"
] | null | null | null | async_http/protocol.py | denismakogon/async-http | 508e3d694598708d1abae9f50aac18b7b84bd523 | [
"Apache-2.0"
] | null | null | null | async_http/protocol.py | denismakogon/async-http | 508e3d694598708d1abae9f50aac18b7b84bd523 | [
"Apache-2.0"
] | null | null | null | import asyncio
import traceback
from httptools import HttpRequestParser
from httptools.parser.errors import HttpParserError
from .exceptions import (
InvalidUsage,
PayloadTooLarge,
RequestTimeout,
ServerError,
ServiceUnavailable,
)
from .request import Request, StreamBuffer
from .response import HTTPResponse
import logging
logger = logging.getLogger(__name__)
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
current_time = None
class HttpProtocol(asyncio.Protocol):
"""
This class provides a basic HTTP implementation.
"""
__slots__ = (
# event loop, connection
"loop",
"transport",
"connections",
"signal",
# request params
"parser",
"request",
"url",
"headers",
# request config
"request_handler",
"request_timeout",
"response_timeout",
"keep_alive_timeout",
"request_max_size",
"request_buffer_queue_size",
"request_class",
"is_request_stream",
"router",
"error_handler",
# enable or disable access log purpose
"access_log",
# connection management
"_total_request_size",
"_request_timeout_handler",
"_response_timeout_handler",
"_keep_alive_timeout_handler",
"_last_request_time",
"_last_response_time",
"_is_stream_handler",
"_not_paused",
"_request_handler_task",
"_request_stream_task",
"_keep_alive",
"_header_fragment",
"state",
"_debug",
)
def __init__(
self,
*,
loop,
request_handler,
error_handler,
signal=None,
connections=None,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
request_max_size=None,
request_buffer_queue_size=100,
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
state=None,
debug=False,
**kwargs
):
self.loop = loop
self.transport = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.router = router
self.signal = signal
self.access_log = access_log
self.connections = connections or set()
self.request_handler = request_handler
self.error_handler = error_handler
self.request_timeout = request_timeout
self.request_buffer_queue_size = request_buffer_queue_size
self.response_timeout = response_timeout
self.keep_alive_timeout = keep_alive_timeout
self.request_max_size = request_max_size
self.request_class = request_class or Request
self.is_request_stream = is_request_stream
self._is_stream_handler = False
self._not_paused = asyncio.Event(loop=loop)
self._total_request_size = 0
self._request_timeout_handler = None
self._response_timeout_handler = None
self._keep_alive_timeout_handler = None
self._last_request_time = None
self._last_response_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = keep_alive
self._header_fragment = b""
self.state = state if state else {}
if "requests_count" not in self.state:
self.state["requests_count"] = 0
self._debug = debug
self._not_paused.set()
@property
def keep_alive(self):
"""
Check if the connection needs to be kept alive based on the params
attached to the `_keep_alive` attribute, :attr:`Signal.stopped`
and :func:`HttpProtocol.parser.should_keep_alive`
:return: ``True`` if connection is to be kept alive ``False`` else
"""
return (
self._keep_alive
and not self.signal.stopped
and self.parser.should_keep_alive()
)
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._request_timeout_handler = self.loop.call_later(
self.request_timeout, self.request_timeout_callback
)
self.transport = transport
self._last_request_time = current_time
def connection_lost(self, exc):
self.connections.discard(self)
if self._request_handler_task:
self._request_handler_task.cancel()
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
def pause_writing(self):
self._not_paused.clear()
def resume_writing(self):
self._not_paused.set()
def request_timeout_callback(self):
# See the docstring in the RequestTimeout exception, to see
# exactly what this timeout is checking for.
# Check if elapsed time since request initiated exceeds our
# configured maximum request timeout value
time_elapsed = current_time - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._request_timeout_handler = self.loop.call_later(
time_left, self.request_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(RequestTimeout("Request Timeout"))
def response_timeout_callback(self):
# Check if elapsed time since response was initiated exceeds our
# configured maximum request timeout value
time_elapsed = current_time - self._last_request_time
if time_elapsed < self.response_timeout:
time_left = self.response_timeout - time_elapsed
self._response_timeout_handler = self.loop.call_later(
time_left, self.response_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(ServiceUnavailable("Response Timeout"))
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = current_time - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.info("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
# if self._total_request_size > self.request_max_size:
# self.write_error(PayloadTooLarge("Payload Too Large"))
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state["requests_count"] = self.state["requests_count"] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = "Bad Request"
if self._debug:
message += "\n" + traceback.format_exc()
self.write_error(InvalidUsage(message))
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if (
self._header_fragment == b"Content-Length"
and int(value) > self.request_max_size
):
self.write_error(PayloadTooLarge("Payload Too Large"))
try:
value = value.decode()
except UnicodeDecodeError:
value = value.decode("latin_1")
self.headers.append(
(self._header_fragment.decode().casefold(), value)
)
self._header_fragment = b""
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=dict(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport,
)
# Remove any existing KeepAlive handler here,
# It will be recreated if required on the new request.
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
self._keep_alive_timeout_handler = None
if self.is_request_stream:
self._is_stream_handler = self.router.is_stream_handler(
self.request
)
if self._is_stream_handler:
self.request.stream = StreamBuffer(
self.request_buffer_queue_size
)
self.execute_request_handler()
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.body_append(body)
)
else:
self.request.body_push(body)
async def body_append(self, body):
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
def on_message_complete(self):
# Entire request (headers and whole body) is received.
# We can cancel and remove the request timeout handler now.
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.request.stream.put(None)
)
return
self.request.body_finish()
self.execute_request_handler()
def execute_request_handler(self):
"""
Invoke the request handler defined by the
:return: None
"""
self._response_timeout_handler = self.loop.call_later(
self.response_timeout, self.response_timeout_callback
)
self._last_request_time = current_time
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request, self.write_response, self.stream_response
)
)
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def log_response(self, response):
"""
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`async_http.response.HTTPResponse` or
:class:`async_http.response.StreamingHTTPResponse`
:return: None
"""
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s" %
(self.url, type(response)), exc_info=1
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s" %
self.request.ip, exc_info=1
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = current_time
self.cleanup()
async def drain(self):
await self._not_paused.wait()
def push_data(self, data):
self.transport.write(data)
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
response.protocol = self
await response.stream(
self.request.version, keep_alive, self.keep_alive_timeout
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s" %
(self.url, type(response)), exc_info=1
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s" %
self.request.ip, exc_info=1
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = current_time
self.cleanup()
def write_error(self, exception):
# An error _is_ a response.
# Don't throw a response timeout, when a response _is_ given.
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
response = None
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else "1.1"
self.transport.write(response.output(version))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before error written @ %s" %
(self.request.ip if self.request else "Unknown"),
exc_info=1
)
except Exception as e:
self.bail_out(
"Writing error failed, connection closed {}".format(repr(e)),
from_error=True,
)
finally:
if self.parser and (
self.keep_alive or getattr(response, "status", 0) == 408
):
self.log_response(response)
try:
self.transport.close()
except AttributeError:
logger.error("Connection lost before server could close it.")
def bail_out(self, message, from_error=False):
"""
In case if the transport pipes are closed and the app encounters
an error while writing data to the transport pipe, we log the error
with proper details.
:param message: Error message to display
:param from_error: If the bail out was invoked while handling an
exception scenario.
:type message: str
:type from_error: bool
:return: None
"""
if from_error or self.transport.is_closing():
logger.error(
"Transport closed @ %s and exception "
"experienced during error handling" %
self.transport.get_extra_info("peername"),
exc_info=1
)
else:
self.write_error(ServerError(message))
logger.error(message)
def cleanup(self):
"""This is called when KeepAlive feature is used,
it resets the connection in order for it to be able
to handle receiving another request on the same connection."""
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
| 34.361552 | 79 | 0.587281 |
0796c7ba171d094a2a11344f1dbdb9dc3a9be5eb | 1,365 | py | Python | train_net.py | TanviAgg/AVT-local | 8f31cc4b85da2b0b08345ac507331f69fbf6da02 | [
"Apache-2.0"
] | null | null | null | train_net.py | TanviAgg/AVT-local | 8f31cc4b85da2b0b08345ac507331f69fbf6da02 | [
"Apache-2.0"
] | null | null | null | train_net.py | TanviAgg/AVT-local | 8f31cc4b85da2b0b08345ac507331f69fbf6da02 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
"""Main training entry."""
import os
import logging
import random
import subprocess
import torch
import hydra
from omegaconf import DictConfig, OmegaConf
import func
OmegaConf.register_new_resolver('minus', lambda x, y: x - y)
# Multiply and cast to integer
OmegaConf.register_new_resolver('times_int', lambda x, y: int(x * y))
@hydra.main(config_path='conf', config_name='config')
def main(cfg: DictConfig) -> None:
# Since future runs might corrupt the stored hydra config, copy it over
# for backup.
if not os.path.exists('.hydra.orig'):
subprocess.call('cp -r .hydra .hydra.orig', shell=True)
random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
# try:
# print(subprocess.check_output('nvidia-smi'))
# except subprocess.CalledProcessError:
# print('Could not run nvidia-smi..')
# cudnn.deterministic = True # Makes it slow..
getattr(func, cfg.train.fn).main(cfg)
if __name__ == "__main__":
logging.basicConfig(format=('%(asctime)s %(levelname)-8s'
' {%(module)s:%(lineno)d} %(message)s'),
level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S')
torch.multiprocessing.set_start_method('spawn')
main() # pylint: disable=no-value-for-parameter # Uses hydra
| 30.333333 | 75 | 0.651282 |
40400739e203e99f4dfd3746ea32ae3b017c9917 | 65,139 | py | Python | v2/shell.py | hybrid-storage-dev/cinder-client-fs-111t-hybrid-cherry | dad5632c91190c179479dcec58c13c655b839122 | [
"Apache-2.0"
] | null | null | null | v2/shell.py | hybrid-storage-dev/cinder-client-fs-111t-hybrid-cherry | dad5632c91190c179479dcec58c13c655b839122 | [
"Apache-2.0"
] | null | null | null | v2/shell.py | hybrid-storage-dev/cinder-client-fs-111t-hybrid-cherry | dad5632c91190c179479dcec58c13c655b839122 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import copy
import os
import sys
import time
import six
from cinderclient import exceptions
from cinderclient import utils
from cinderclient.openstack.common import strutils
from cinderclient.v2 import availability_zones
def _poll_for_status(poll_fn, obj_id, action, final_ok_states,
poll_period=5, show_progress=True):
"""Blocks while an action occurs. Periodically shows progress."""
def print_progress(progress):
if show_progress:
msg = ('\rInstance %(action)s... %(progress)s%% complete'
% dict(action=action, progress=progress))
else:
msg = '\rInstance %(action)s...' % dict(action=action)
sys.stdout.write(msg)
sys.stdout.flush()
print()
while True:
obj = poll_fn(obj_id)
status = obj.status.lower()
progress = getattr(obj, 'progress', None) or 0
if status in final_ok_states:
print_progress(100)
print("\nFinished")
break
elif status == "error":
print("\nError %(action)s instance" % {'action': action})
break
else:
print_progress(progress)
time.sleep(poll_period)
def _find_volume_snapshot(cs, snapshot):
"""Gets a volume snapshot by name or ID."""
return utils.find_resource(cs.volume_snapshots, snapshot)
def _find_backup(cs, backup):
"""Gets a backup by name or ID."""
return utils.find_resource(cs.backups, backup)
def _find_consistencygroup(cs, consistencygroup):
"""Gets a consistencygroup by name or ID."""
return utils.find_resource(cs.consistencygroups, consistencygroup)
def _find_cgsnapshot(cs, cgsnapshot):
"""Gets a cgsnapshot by name or ID."""
return utils.find_resource(cs.cgsnapshots, cgsnapshot)
def _find_transfer(cs, transfer):
"""Gets a transfer by name or ID."""
return utils.find_resource(cs.transfers, transfer)
def _find_qos_specs(cs, qos_specs):
"""Gets a qos specs by ID."""
return utils.find_resource(cs.qos_specs, qos_specs)
def _print_volume_snapshot(snapshot):
utils.print_dict(snapshot._info)
def _print_volume_image(image):
utils.print_dict(image[1]['os-volume_upload_image'])
def _translate_keys(collection, convert):
for item in collection:
keys = item.__dict__
for from_key, to_key in convert:
if from_key in keys and to_key not in keys:
setattr(item, to_key, item._info[from_key])
def _translate_volume_keys(collection):
convert = [('volumeType', 'volume_type'),
('os-vol-tenant-attr:tenant_id', 'tenant_id')]
_translate_keys(collection, convert)
def _translate_volume_snapshot_keys(collection):
convert = [('volumeId', 'volume_id')]
_translate_keys(collection, convert)
def _translate_availability_zone_keys(collection):
convert = [('zoneName', 'name'), ('zoneState', 'status')]
_translate_keys(collection, convert)
def _extract_metadata(args):
metadata = {}
for metadatum in args.metadata:
# unset doesn't require a val, so we have the if/else
if '=' in metadatum:
(key, value) = metadatum.split('=', 1)
else:
key = metadatum
value = None
metadata[key] = value
return metadata
@utils.arg('--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Shows details for all tenants. Admin only.')
@utils.arg('--all_tenants',
nargs='?',
type=int,
const=1,
help=argparse.SUPPRESS)
@utils.arg('--name',
metavar='<name>',
default=None,
help='Filters results by a name. OPTIONAL: Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.arg('--status',
metavar='<status>',
default=None,
help='Filters results by a status. OPTIONAL: Default=None.')
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Filters results by a metadata key and value pair. '
'OPTIONAL: Default=None.',
default=None)
@utils.arg('--marker',
metavar='<marker>',
default=None,
help='Begin returning volumes that appear later in the volume '
'list than that represented by this volume id. '
'OPTIONAL: Default=None.')
@utils.arg('--limit',
metavar='<limit>',
default=None,
help='Maximum number of volumes to return. OPTIONAL: Default=None.')
@utils.arg('--sort_key',
metavar='<sort_key>',
default=None,
help='Key to be sorted, should be (`id`, `status`, `size`, '
'`availability_zone`, `name`, `bootable`, `created_at`). '
'OPTIONAL: Default=None.')
@utils.arg('--sort_dir',
metavar='<sort_dir>',
default=None,
help='Sort direction, should be `desc` or `asc`. '
'OPTIONAL: Default=None.')
@utils.arg('--availability-zone',
metavar='<availability-zone>',
help='Filters list by availability zone.')
@utils.service_type('volumev2')
def do_list(cs, args):
"""Lists all volumes."""
# NOTE(thingee): Backwards-compatibility with v1 args
if args.display_name is not None:
args.name = args.display_name
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
search_opts = {
'all_tenants': all_tenants,
'name': args.name,
'status': args.status,
'availability-zone': args.availability_zone,
'metadata': _extract_metadata(args) if args.metadata else None,
}
volumes = cs.volumes.list(search_opts=search_opts, marker=args.marker,
limit=args.limit, sort_key=args.sort_key,
sort_dir=args.sort_dir)
_translate_volume_keys(volumes)
# Create a list of servers to which the volume is attached
for vol in volumes:
servers = [s.get('server_id') for s in vol.attachments]
setattr(vol, 'attached_to', ','.join(map(str, servers)))
if all_tenants:
key_list = ['ID', 'Tenant ID', 'Status', 'Name',
'Size', 'Volume Type', 'Bootable', 'Shareable','Attached to']
else:
key_list = ['ID', 'Status', 'Name',
'Size', 'Volume Type', 'Bootable', 'Shareable','Attached to']
utils.print_list(volumes, key_list)
@utils.arg('volume',
metavar='<volume>',
help='Name or ID of volume.')
@utils.service_type('volumev2')
def do_show(cs, args):
"""Shows volume details."""
info = dict()
volume = utils.find_volume(cs, args.volume)
info.update(volume._info)
info.pop('links', None)
utils.print_dict(info)
class CheckSizeArgForCreate(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if (values or args.snapshot_id or args.source_volid
or args.source_replica) is None:
parser.error('Size is a required parameter if snapshot '
'or source volume is not specified.')
setattr(args, self.dest, values)
@utils.arg('size',
metavar='<size>',
nargs='?',
type=int,
action=CheckSizeArgForCreate,
help='Size of volume, in GBs. (Required unless '
'snapshot-id/source-volid is specified).')
@utils.arg('--consisgroup-id',
metavar='<consistencygroup-id>',
default=None,
help='ID of a consistency group where the new volume belongs to. '
'Default=None.')
@utils.arg('--snapshot-id',
metavar='<snapshot-id>',
default=None,
help='Creates volume from snapshot ID. Default=None.')
@utils.arg('--snapshot_id',
help=argparse.SUPPRESS)
@utils.arg('--source-volid',
metavar='<source-volid>',
default=None,
help='Creates volume from volume ID. Default=None.')
@utils.arg('--source_volid',
help=argparse.SUPPRESS)
@utils.arg('--source-replica',
metavar='<source-replica>',
default=None,
help='Creates volume from replicated volume ID. Default=None.')
@utils.arg('--image-id',
metavar='<image-id>',
default=None,
help='Creates volume from image ID. Default=None.')
@utils.arg('--image_id',
help=argparse.SUPPRESS)
@utils.arg('--name',
metavar='<name>',
default=None,
help='Volume name. Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.arg('--display_name',
help=argparse.SUPPRESS)
@utils.arg('--description',
metavar='<description>',
default=None,
help='Volume description. Default=None.')
@utils.arg('--display-description',
help=argparse.SUPPRESS)
@utils.arg('--display_description',
help=argparse.SUPPRESS)
@utils.arg('--volume-type',
metavar='<volume-type>',
default=None,
help='Volume type. Default=None.')
@utils.arg('--volume_type',
help=argparse.SUPPRESS)
@utils.arg('--availability-zone',
metavar='<availability-zone>',
default=None,
help='Availability zone for volume. Default=None.')
@utils.arg('--availability_zone',
help=argparse.SUPPRESS)
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Metadata key and value pairs. Default=None.',
default=None)
@utils.arg('--hint',
metavar='<key=value>',
dest='scheduler_hints',
action='append',
default=[],
help='Scheduler hint, like in nova.')
@utils.arg('--shareable',
metavar="<'T'|'F'>",
help=('Allow volume to be attached more than once'
'(Optional, Default=False)'),
default=False)
@utils.service_type('volumev2')
def do_create(cs, args):
"""Creates a volume."""
# NOTE(thingee): Backwards-compatibility with v1 args
if args.display_name is not None:
args.name = args.display_name
if args.display_description is not None:
args.description = args.display_description
volume_metadata = None
if args.metadata is not None:
volume_metadata = _extract_metadata(args)
#NOTE(N.S.): take this piece from novaclient
hints = {}
if args.scheduler_hints:
for hint in args.scheduler_hints:
key, _sep, value = hint.partition('=')
# NOTE(vish): multiple copies of same hint will
# result in a list of values
if key in hints:
if isinstance(hints[key], six.string_types):
hints[key] = [hints[key]]
hints[key] += [value]
else:
hints[key] = value
#NOTE(N.S.): end of taken piece
volume = cs.volumes.create(args.size,
args.consisgroup_id,
args.snapshot_id,
args.source_volid,
args.name,
args.description,
args.volume_type,
availability_zone=args.availability_zone,
imageRef=args.image_id,
metadata=volume_metadata,
scheduler_hints=hints,
source_replica=args.source_replica,
shareable=strutils.bool_from_string(args.shareable))
info = dict()
volume = cs.volumes.get(volume.id)
info.update(volume._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('volume',
metavar='<volume>', nargs='+',
help='Name or ID of volume or volumes to delete.')
@utils.service_type('volumev2')
def do_delete(cs, args):
"""Removes one or more volumes."""
failure_count = 0
for volume in args.volume:
try:
utils.find_volume(cs, volume).delete()
except Exception as e:
failure_count += 1
print("Delete for volume %s failed: %s" % (volume, e))
if failure_count == len(args.volume):
raise exceptions.CommandError("Unable to delete any of specified "
"volumes.")
@utils.arg('volume',
metavar='<volume>', nargs='+',
help='Name or ID of volume or volumes to delete.')
@utils.service_type('volumev2')
def do_force_delete(cs, args):
"""Attempts force-delete of volume, regardless of state."""
failure_count = 0
for volume in args.volume:
try:
utils.find_volume(cs, volume).force_delete()
except Exception as e:
failure_count += 1
print("Delete for volume %s failed: %s" % (volume, e))
if failure_count == len(args.volume):
raise exceptions.CommandError("Unable to force delete any of "
"specified volumes.")
@utils.arg('volume', metavar='<volume>', nargs='+',
help='Name or ID of volume to modify.')
@utils.arg('--state', metavar='<state>', default='available',
help=('The state to assign to the volume. Valid values are '
'"available," "error," "creating," "deleting," and '
'"error_deleting." '
'Default=available.'))
@utils.service_type('volumev2')
def do_reset_state(cs, args):
"""Explicitly updates the volume state."""
failure_flag = False
for volume in args.volume:
try:
utils.find_volume(cs, volume).reset_state(args.state)
except Exception as e:
failure_flag = True
msg = "Reset state for volume %s failed: %s" % (volume, e)
print(msg)
if failure_flag:
msg = "Unable to reset the state for the specified volume(s)."
raise exceptions.CommandError(msg)
@utils.arg('volume',
metavar='<volume>',
help='Name or ID of volume to rename.')
@utils.arg('name',
nargs='?',
metavar='<name>',
help='New name for volume.')
@utils.arg('--description', metavar='<description>',
help='Volume description. Default=None.',
default=None)
@utils.arg('--display-description',
help=argparse.SUPPRESS)
@utils.arg('--display_description',
help=argparse.SUPPRESS)
@utils.service_type('volumev2')
def do_rename(cs, args):
"""Renames a volume."""
kwargs = {}
if args.name is not None:
kwargs['name'] = args.name
if args.display_description is not None:
kwargs['description'] = args.display_description
elif args.description is not None:
kwargs['description'] = args.description
if not any(kwargs):
msg = 'Must supply either name or description.'
raise exceptions.ClientException(code=1, message=msg)
utils.find_volume(cs, args.volume).update(**kwargs)
@utils.arg('volume',
metavar='<volume>',
help='Name or ID of volume for which to update metadata.')
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="The action. Valid values are 'set' or 'unset.'")
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='Metadata key and value pair to set or unset. '
'For unset, specify only the key.')
@utils.service_type('volumev2')
def do_metadata(cs, args):
"""Sets or deletes volume metadata."""
volume = utils.find_volume(cs, args.volume)
metadata = _extract_metadata(args)
if args.action == 'set':
cs.volumes.set_metadata(volume, metadata)
elif args.action == 'unset':
# NOTE(zul): Make sure py2/py3 sorting is the same
cs.volumes.delete_metadata(volume, sorted(metadata.keys(),
reverse=True))
@utils.arg('--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Shows details for all tenants. Admin only.')
@utils.arg('--all_tenants',
nargs='?',
type=int,
const=1,
help=argparse.SUPPRESS)
@utils.arg('--name',
metavar='<name>',
default=None,
help='Filters results by a name. Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.arg('--display_name',
help=argparse.SUPPRESS)
@utils.arg('--status',
metavar='<status>',
default=None,
help='Filters results by a status. Default=None.')
@utils.arg('--volume-id',
metavar='<volume-id>',
default=None,
help='Filters results by a volume ID. Default=None.')
@utils.arg('--volume_id',
help=argparse.SUPPRESS)
@utils.service_type('volumev2')
def do_snapshot_list(cs, args):
"""Lists all snapshots."""
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
if args.display_name is not None:
args.name = args.display_name
search_opts = {
'all_tenants': all_tenants,
'display_name': args.name,
'status': args.status,
'volume_id': args.volume_id,
}
snapshots = cs.volume_snapshots.list(search_opts=search_opts)
_translate_volume_snapshot_keys(snapshots)
utils.print_list(snapshots,
['ID', 'Volume ID', 'Status', 'Name', 'Size'])
@utils.arg('snapshot',
metavar='<snapshot>',
help='Name or ID of snapshot.')
@utils.service_type('volumev2')
def do_snapshot_show(cs, args):
"""Shows snapshot details."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
_print_volume_snapshot(snapshot)
@utils.arg('volume',
metavar='<volume>',
help='Name or ID of volume to snapshot.')
@utils.arg('--force',
metavar='<True|False>',
help='Allows or disallows snapshot of '
'a volume when the volume is attached to an instance. '
'If set to True, ignores the current status of the '
'volume when attempting to snapshot it rather '
'than forcing it to be available. '
'Default=False.',
default=False)
@utils.arg('--name',
metavar='<name>',
default=None,
help='Snapshot name. Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.arg('--display_name',
help=argparse.SUPPRESS)
@utils.arg('--description',
metavar='<description>',
default=None,
help='Snapshot description. Default=None.')
@utils.arg('--display-description',
help=argparse.SUPPRESS)
@utils.arg('--display_description',
help=argparse.SUPPRESS)
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Snapshot metadata key and value pairs. Default=None.',
default=None)
@utils.service_type('volumev2')
def do_snapshot_create(cs, args):
"""Creates a snapshot."""
if args.display_name is not None:
args.name = args.display_name
if args.display_description is not None:
args.description = args.display_description
snapshot_metadata = None
if args.metadata is not None:
snapshot_metadata = _extract_metadata(args)
volume = utils.find_volume(cs, args.volume)
snapshot = cs.volume_snapshots.create(volume.id,
args.force,
args.name,
args.description,
metadata=snapshot_metadata)
_print_volume_snapshot(snapshot)
@utils.arg('snapshot',
metavar='<snapshot>', nargs='+',
help='Name or ID of the snapshot(s) to delete.')
@utils.service_type('volumev2')
def do_snapshot_delete(cs, args):
"""Removes one or more snapshots."""
failure_count = 0
for snapshot in args.snapshot:
try:
_find_volume_snapshot(cs, snapshot).delete()
except Exception as e:
failure_count += 1
print("Delete for snapshot %s failed: %s" % (snapshot, e))
if failure_count == len(args.snapshot):
raise exceptions.CommandError("Unable to delete any of the specified "
"snapshots.")
@utils.arg('snapshot', metavar='<snapshot>',
help='Name or ID of snapshot.')
@utils.arg('name', nargs='?', metavar='<name>',
help='New name for snapshot.')
@utils.arg('--description', metavar='<description>',
help='Snapshot description. Default=None.',
default=None)
@utils.arg('--display-description',
help=argparse.SUPPRESS)
@utils.arg('--display_description',
help=argparse.SUPPRESS)
@utils.service_type('volumev2')
def do_snapshot_rename(cs, args):
"""Renames a snapshot."""
kwargs = {}
if args.name is not None:
kwargs['name'] = args.name
if args.description is not None:
kwargs['description'] = args.description
elif args.display_description is not None:
kwargs['description'] = args.display_description
if not any(kwargs):
msg = 'Must supply either name or description.'
raise exceptions.ClientException(code=1, message=msg)
_find_volume_snapshot(cs, args.snapshot).update(**kwargs)
@utils.arg('snapshot', metavar='<snapshot>', nargs='+',
help='Name or ID of snapshot to modify.')
@utils.arg('--state', metavar='<state>',
default='available',
help=('The state to assign to the snapshot. Valid values are '
'"available," "error," "creating," "deleting," and '
'"error_deleting." '
'Default is "available."'))
@utils.service_type('volumev2')
def do_snapshot_reset_state(cs, args):
"""Explicitly updates the snapshot state."""
failure_count = 0
single = (len(args.snapshot) == 1)
for snapshot in args.snapshot:
try:
_find_volume_snapshot(cs, snapshot).reset_state(args.state)
except Exception as e:
failure_count += 1
msg = "Reset state for snapshot %s failed: %s" % (snapshot, e)
if not single:
print(msg)
if failure_count == len(args.snapshot):
if not single:
msg = ("Unable to reset the state for any of the specified "
"snapshots.")
raise exceptions.CommandError(msg)
def _print_volume_type_list(vtypes):
utils.print_list(vtypes, ['ID', 'Name'])
@utils.service_type('volumev2')
def do_type_list(cs, args):
"""Lists available 'volume types'."""
vtypes = cs.volume_types.list()
_print_volume_type_list(vtypes)
@utils.service_type('volumev2')
def do_extra_specs_list(cs, args):
"""Lists current volume types and extra specs."""
vtypes = cs.volume_types.list()
utils.print_list(vtypes, ['ID', 'Name', 'extra_specs'])
@utils.arg('name',
metavar='<name>',
help="Name of new volume type.")
@utils.service_type('volumev2')
def do_type_create(cs, args):
"""Creates a volume type."""
vtype = cs.volume_types.create(args.name)
_print_volume_type_list([vtype])
@utils.arg('id',
metavar='<id>',
help="ID of volume type to delete.")
@utils.service_type('volumev2')
def do_type_delete(cs, args):
"""Deletes a volume type."""
cs.volume_types.delete(args.id)
@utils.arg('vtype',
metavar='<vtype>',
help="Name or ID of volume type.")
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="The action. Valid values are 'set' or 'unset.'")
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='The extra specs key and value pair to set or unset. '
'For unset, specify only the key.')
@utils.service_type('volumev2')
def do_type_key(cs, args):
"""Sets or unsets extra_spec for a volume type."""
vtype = _find_volume_type(cs, args.vtype)
keypair = _extract_metadata(args)
if args.action == 'set':
vtype.set_keys(keypair)
elif args.action == 'unset':
vtype.unset_keys(list(keypair))
@utils.service_type('volumev2')
def do_endpoints(cs, args):
"""Discovers endpoints registered by authentication service."""
catalog = cs.client.service_catalog.catalog
for e in catalog['serviceCatalog']:
utils.print_dict(e['endpoints'][0], e['name'])
@utils.service_type('volumev2')
def do_credentials(cs, args):
"""Shows user credentials returned from auth."""
catalog = cs.client.service_catalog.catalog
utils.print_dict(catalog['user'], "User Credentials")
utils.print_dict(catalog['token'], "Token")
_quota_resources = ['volumes', 'snapshots', 'gigabytes']
_quota_infos = ['Type', 'In_use', 'Reserved', 'Limit']
def _quota_show(quotas):
quota_dict = {}
for resource in quotas._info:
good_name = False
for name in _quota_resources:
if resource.startswith(name):
good_name = True
if not good_name:
continue
quota_dict[resource] = getattr(quotas, resource, None)
utils.print_dict(quota_dict)
def _quota_usage_show(quotas):
quota_list = []
for resource in quotas._info.keys():
good_name = False
for name in _quota_resources:
if resource.startswith(name):
good_name = True
if not good_name:
continue
quota_info = getattr(quotas, resource, None)
quota_info['Type'] = resource
quota_info = dict((k.capitalize(), v) for k, v in quota_info.items())
quota_list.append(quota_info)
utils.print_list(quota_list, _quota_infos)
def _quota_update(manager, identifier, args):
updates = {}
for resource in _quota_resources:
val = getattr(args, resource, None)
if val is not None:
if args.volume_type:
resource = resource + '_%s' % args.volume_type
updates[resource] = val
if updates:
_quota_show(manager.update(identifier, **updates))
@utils.arg('tenant',
metavar='<tenant_id>',
help='ID of tenant for which to list quotas.')
@utils.service_type('volumev2')
def do_quota_show(cs, args):
"""Lists quotas for a tenant."""
_quota_show(cs.quotas.get(args.tenant))
@utils.arg('tenant', metavar='<tenant_id>',
help='ID of tenant for which to list quota usage.')
@utils.service_type('volumev2')
def do_quota_usage(cs, args):
"""Lists quota usage for a tenant."""
_quota_usage_show(cs.quotas.get(args.tenant, usage=True))
@utils.arg('tenant',
metavar='<tenant_id>',
help='ID of tenant for which to list quota defaults.')
@utils.service_type('volumev2')
def do_quota_defaults(cs, args):
"""Lists default quotas for a tenant."""
_quota_show(cs.quotas.defaults(args.tenant))
@utils.arg('tenant',
metavar='<tenant_id>',
help='ID of tenant for which to set quotas.')
@utils.arg('--volumes',
metavar='<volumes>',
type=int, default=None,
help='The new "volumes" quota value. Default=None.')
@utils.arg('--snapshots',
metavar='<snapshots>',
type=int, default=None,
help='The new "snapshots" quota value. Default=None.')
@utils.arg('--gigabytes',
metavar='<gigabytes>',
type=int, default=None,
help='The new "gigabytes" quota value. Default=None.')
@utils.arg('--volume-type',
metavar='<volume_type_name>',
default=None,
help='Volume type. Default=None.')
@utils.service_type('volumev2')
def do_quota_update(cs, args):
"""Updates quotas for a tenant."""
_quota_update(cs.quotas, args.tenant, args)
@utils.arg('tenant', metavar='<tenant_id>',
help='UUID of tenant to delete the quotas for.')
@utils.service_type('volume')
def do_quota_delete(cs, args):
"""Delete the quotas for a tenant."""
cs.quotas.delete(args.tenant)
@utils.arg('class_name',
metavar='<class>',
help='Name of quota class for which to list quotas.')
@utils.service_type('volumev2')
def do_quota_class_show(cs, args):
"""Lists quotas for a quota class."""
_quota_show(cs.quota_classes.get(args.class_name))
@utils.arg('class-name',
metavar='<class-name>',
help='Name of quota class for which to set quotas.')
@utils.arg('--volumes',
metavar='<volumes>',
type=int, default=None,
help='The new "volumes" quota value. Default=None.')
@utils.arg('--snapshots',
metavar='<snapshots>',
type=int, default=None,
help='The new "snapshots" quota value. Default=None.')
@utils.arg('--gigabytes',
metavar='<gigabytes>',
type=int, default=None,
help='The new "gigabytes" quota value. Default=None.')
@utils.arg('--volume-type',
metavar='<volume_type_name>',
default=None,
help='Volume type. Default=None.')
@utils.service_type('volumev2')
def do_quota_class_update(cs, args):
"""Updates quotas for a quota class."""
_quota_update(cs.quota_classes, args.class_name, args)
@utils.service_type('volumev2')
def do_absolute_limits(cs, args):
"""Lists absolute limits for a user."""
limits = cs.limits.get().absolute
columns = ['Name', 'Value']
utils.print_list(limits, columns)
@utils.service_type('volumev2')
def do_rate_limits(cs, args):
"""Lists rate limits for a user."""
limits = cs.limits.get().rate
columns = ['Verb', 'URI', 'Value', 'Remain', 'Unit', 'Next_Available']
utils.print_list(limits, columns)
def _find_volume_type(cs, vtype):
"""Gets a volume type by name or ID."""
return utils.find_resource(cs.volume_types, vtype)
@utils.arg('volume',
metavar='<volume>',
help='Name or ID of volume to snapshot.')
@utils.arg('--force',
metavar='<True|False>',
help='Enables or disables upload of '
'a volume that is attached to an instance. '
'Default=False.',
default=False)
@utils.arg('--container-format',
metavar='<container-format>',
help='Container format type. '
'Default is bare.',
default='bare')
@utils.arg('--container_format',
help=argparse.SUPPRESS)
@utils.arg('--disk-format',
metavar='<disk-format>',
help='Disk format type. '
'Default is raw.',
default='raw')
@utils.arg('--disk_format',
help=argparse.SUPPRESS)
@utils.arg('image_name',
metavar='<image-name>',
help='The new image name.')
@utils.arg('--image_name',
help=argparse.SUPPRESS)
@utils.service_type('volumev2')
def do_upload_to_image(cs, args):
"""Uploads volume to Image Service as an image."""
volume = utils.find_volume(cs, args.volume)
_print_volume_image(volume.upload_to_image(args.force,
args.image_name,
args.container_format,
args.disk_format))
@utils.arg('volume', metavar='<volume>', help='ID of volume to migrate.')
@utils.arg('host', metavar='<host>', help='Destination host.')
@utils.arg('--force-host-copy', metavar='<True|False>',
choices=['True', 'False'], required=False,
help='Enables or disables generic host-based '
'force-migration, which bypasses driver '
'optimizations. Default=False.',
default=False)
@utils.service_type('volumev2')
def do_migrate(cs, args):
"""Migrates volume to a new host."""
volume = utils.find_volume(cs, args.volume)
volume.migrate_volume(args.host, args.force_host_copy)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of volume for which to modify type.')
@utils.arg('new_type', metavar='<volume-type>', help='New volume type.')
@utils.arg('--migration-policy', metavar='<never|on-demand>', required=False,
choices=['never', 'on-demand'], default='never',
help='Migration policy during retype of volume.')
@utils.service_type('volumev2')
def do_retype(cs, args):
"""Changes the volume type for a volume."""
volume = utils.find_volume(cs, args.volume)
volume.retype(args.new_type, args.migration_policy)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of volume to backup.')
@utils.arg('--container', metavar='<container>',
help='Backup container name. Default=None.',
default=None)
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.arg('--name', metavar='<name>',
help='Backup name. Default=None.',
default=None)
@utils.arg('--display-description',
help=argparse.SUPPRESS)
@utils.arg('--description',
metavar='<description>',
default=None,
help='Backup description. Default=None.')
@utils.arg('--force',
metavar='<True|False>',
default=False,
help='force to backup in-use volume or not.')
@utils.service_type('volumev2')
def do_backup_create(cs, args):
"""Creates a volume backup."""
if args.display_name is not None:
args.name = args.display_name
if args.display_description is not None:
args.description = args.display_description
volume = utils.find_volume(cs, args.volume)
backup = cs.backups.create(volume.id,
args.container,
args.name,
args.description,
args.force)
info = {"volume_id": volume.id}
info.update(backup._info)
if 'links' in info:
info.pop('links')
utils.print_dict(info)
@utils.arg('backup', metavar='<backup>', help='Name or ID of backup.')
@utils.service_type('volumev2')
def do_backup_show(cs, args):
"""Shows backup details."""
backup = _find_backup(cs, args.backup)
info = dict()
info.update(backup._info)
info.pop('links', None)
utils.print_dict(info)
@utils.service_type('volumev2')
def do_backup_list(cs, args):
"""Lists all backups."""
backups = cs.backups.list()
columns = ['ID', 'Volume ID', 'Status', 'Name', 'Size', 'Object Count',
'Container']
utils.print_list(backups, columns)
@utils.arg('backup', metavar='<backup>',
help='Name or ID of backup to delete.')
@utils.service_type('volumev2')
def do_backup_delete(cs, args):
"""Removes a backup."""
backup = _find_backup(cs, args.backup)
backup.delete()
@utils.arg('backup', metavar='<backup>',
help='ID of backup to restore.')
@utils.arg('--volume-id', metavar='<volume>',
help=argparse.SUPPRESS,
default=None)
@utils.arg('--volume', metavar='<volume>',
help='Name or ID of volume to which to restore. '
'Default=None.',
default=None)
@utils.service_type('volumev2')
def do_backup_restore(cs, args):
"""Restores a backup."""
vol = args.volume or args.volume_id
if vol:
volume_id = utils.find_volume(cs, vol).id
else:
volume_id = None
cs.restores.restore(args.backup, volume_id)
@utils.arg('backup', metavar='<backup>',
help='ID of the backup to export.')
@utils.service_type('volumev2')
def do_backup_export(cs, args):
"""Export backup metadata record."""
info = cs.backups.export_record(args.backup)
utils.print_dict(info)
@utils.arg('backup_service', metavar='<backup_service>',
help='Backup service to use for importing the backup.')
@utils.arg('backup_url', metavar='<backup_url>',
help='Backup URL for importing the backup metadata.')
@utils.service_type('volumev2')
def do_backup_import(cs, args):
"""Import backup metadata record."""
info = cs.backups.import_record(args.backup_service, args.backup_url)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of volume to transfer.')
@utils.arg('--name',
metavar='<name>',
default=None,
help='Transfer name. Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.service_type('volumev2')
def do_transfer_create(cs, args):
"""Creates a volume transfer."""
if args.display_name is not None:
args.name = args.display_name
volume = utils.find_volume(cs, args.volume)
transfer = cs.transfers.create(volume.id,
args.name)
info = dict()
info.update(transfer._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('transfer', metavar='<transfer>',
help='Name or ID of transfer to delete.')
@utils.service_type('volumev2')
def do_transfer_delete(cs, args):
"""Undoes a transfer."""
transfer = _find_transfer(cs, args.transfer)
transfer.delete()
@utils.arg('transfer', metavar='<transfer>',
help='ID of transfer to accept.')
@utils.arg('auth_key', metavar='<auth_key>',
help='Authentication key of transfer to accept.')
@utils.service_type('volumev2')
def do_transfer_accept(cs, args):
"""Accepts a volume transfer."""
transfer = cs.transfers.accept(args.transfer, args.auth_key)
info = dict()
info.update(transfer._info)
info.pop('links', None)
utils.print_dict(info)
@utils.service_type('volumev2')
def do_transfer_list(cs, args):
"""Lists all transfers."""
transfers = cs.transfers.list()
columns = ['ID', 'Volume ID', 'Name']
utils.print_list(transfers, columns)
@utils.arg('transfer', metavar='<transfer>',
help='Name or ID of transfer to accept.')
@utils.service_type('volumev2')
def do_transfer_show(cs, args):
"""Shows transfer details."""
transfer = _find_transfer(cs, args.transfer)
info = dict()
info.update(transfer._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of volume to extend.')
@utils.arg('new_size',
metavar='<new_size>',
type=int,
help='New size of volume, in GBs.')
@utils.service_type('volumev2')
def do_extend(cs, args):
"""Attempts to extend size of an existing volume."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.extend(volume, args.new_size)
@utils.arg('--host', metavar='<hostname>', default=None,
help='Host name. Default=None.')
@utils.arg('--binary', metavar='<binary>', default=None,
help='Service binary. Default=None.')
@utils.service_type('volumev2')
def do_service_list(cs, args):
"""Lists all services. Filter by host and service binary."""
result = cs.services.list(host=args.host, binary=args.binary)
columns = ["Binary", "Host", "Zone", "Status", "State", "Updated_at"]
# NOTE(jay-lau-513): we check if the response has disabled_reason
# so as not to add the column when the extended ext is not enabled.
if result and hasattr(result[0], 'disabled_reason'):
columns.append("Disabled Reason")
utils.print_list(result, columns)
@utils.arg('host', metavar='<hostname>', help='Host name.')
@utils.arg('binary', metavar='<binary>', help='Service binary.')
@utils.service_type('volumev2')
def do_service_enable(cs, args):
"""Enables the service."""
result = cs.services.enable(args.host, args.binary)
columns = ["Host", "Binary", "Status"]
utils.print_list([result], columns)
@utils.arg('host', metavar='<hostname>', help='Host name.')
@utils.arg('binary', metavar='<binary>', help='Service binary.')
@utils.arg('--reason', metavar='<reason>',
help='Reason for disabling service.')
@utils.service_type('volumev2')
def do_service_disable(cs, args):
"""Disables the service."""
columns = ["Host", "Binary", "Status"]
if args.reason:
columns.append('Disabled Reason')
result = cs.services.disable_log_reason(args.host, args.binary,
args.reason)
else:
result = cs.services.disable(args.host, args.binary)
utils.print_list([result], columns)
def _treeizeAvailabilityZone(zone):
"""Builds a tree view for availability zones."""
AvailabilityZone = availability_zones.AvailabilityZone
az = AvailabilityZone(zone.manager,
copy.deepcopy(zone._info), zone._loaded)
result = []
# Zone tree view item
az.zoneName = zone.zoneName
az.zoneState = ('available'
if zone.zoneState['available'] else 'not available')
az._info['zoneName'] = az.zoneName
az._info['zoneState'] = az.zoneState
result.append(az)
if getattr(zone, "hosts", None) and zone.hosts is not None:
for (host, services) in zone.hosts.items():
# Host tree view item
az = AvailabilityZone(zone.manager,
copy.deepcopy(zone._info), zone._loaded)
az.zoneName = '|- %s' % host
az.zoneState = ''
az._info['zoneName'] = az.zoneName
az._info['zoneState'] = az.zoneState
result.append(az)
for (svc, state) in services.items():
# Service tree view item
az = AvailabilityZone(zone.manager,
copy.deepcopy(zone._info), zone._loaded)
az.zoneName = '| |- %s' % svc
az.zoneState = '%s %s %s' % (
'enabled' if state['active'] else 'disabled',
':-)' if state['available'] else 'XXX',
state['updated_at'])
az._info['zoneName'] = az.zoneName
az._info['zoneState'] = az.zoneState
result.append(az)
return result
@utils.service_type('volumev2')
def do_availability_zone_list(cs, _args):
"""Lists all availability zones."""
try:
availability_zones = cs.availability_zones.list()
except exceptions.Forbidden as e: # policy doesn't allow probably
try:
availability_zones = cs.availability_zones.list(detailed=False)
except Exception:
raise e
result = []
for zone in availability_zones:
result += _treeizeAvailabilityZone(zone)
_translate_availability_zone_keys(result)
utils.print_list(result, ['Name', 'Status'])
def _print_volume_encryption_type_list(encryption_types):
"""
Lists volume encryption types.
:param encryption_types: a list of :class: VolumeEncryptionType instances
"""
utils.print_list(encryption_types, ['Volume Type ID', 'Provider',
'Cipher', 'Key Size',
'Control Location'])
@utils.service_type('volumev2')
def do_encryption_type_list(cs, args):
"""Shows encryption type details for volume types. Admin only."""
result = cs.volume_encryption_types.list()
utils.print_list(result, ['Volume Type ID', 'Provider', 'Cipher',
'Key Size', 'Control Location'])
@utils.arg('volume_type',
metavar='<volume_type>',
type=str,
help="Name or ID of volume type.")
@utils.service_type('volumev2')
def do_encryption_type_show(cs, args):
"""Shows encryption type details for a volume type. Admin only."""
volume_type = _find_volume_type(cs, args.volume_type)
result = cs.volume_encryption_types.get(volume_type)
# Display result or an empty table if no result
if hasattr(result, 'volume_type_id'):
_print_volume_encryption_type_list([result])
else:
_print_volume_encryption_type_list([])
@utils.arg('volume_type',
metavar='<volume_type>',
type=str,
help="Name or ID of volume type.")
@utils.arg('provider',
metavar='<provider>',
type=str,
help='The class that provides encryption support. '
'For example, LuksEncryptor.')
@utils.arg('--cipher',
metavar='<cipher>',
type=str,
required=False,
default=None,
help='The encryption algorithm or mode. '
'For example, aes-xts-plain64. Default=None.')
@utils.arg('--key_size',
metavar='<key_size>',
type=int,
required=False,
default=None,
help='Size of encryption key, in bits. '
'For example, 128 or 256. Default=None.')
@utils.arg('--control_location',
metavar='<control_location>',
choices=['front-end', 'back-end'],
type=str,
required=False,
default='front-end',
help='Notional service where encryption is performed. '
'Valid values are "front-end" or "back-end." '
'For example, front-end=Nova. Default is "front-end."')
@utils.service_type('volumev2')
def do_encryption_type_create(cs, args):
"""Creates encryption type for a volume type. Admin only."""
volume_type = _find_volume_type(cs, args.volume_type)
body = {}
body['provider'] = args.provider
body['cipher'] = args.cipher
body['key_size'] = args.key_size
body['control_location'] = args.control_location
result = cs.volume_encryption_types.create(volume_type, body)
_print_volume_encryption_type_list([result])
@utils.arg('volume_type',
metavar='<volume_type>',
type=str,
help="Name or ID of volume type.")
@utils.service_type('volumev2')
def do_encryption_type_delete(cs, args):
"""Deletes encryption type for a volume type. Admin only."""
volume_type = _find_volume_type(cs, args.volume_type)
cs.volume_encryption_types.delete(volume_type)
def _print_qos_specs(qos_specs):
utils.print_dict(qos_specs._info)
def _print_qos_specs_list(q_specs):
utils.print_list(q_specs, ['ID', 'Name', 'Consumer', 'specs'])
def _print_qos_specs_and_associations_list(q_specs):
utils.print_list(q_specs, ['ID', 'Name', 'Consumer', 'specs'])
def _print_associations_list(associations):
utils.print_list(associations, ['Association_Type', 'Name', 'ID'])
@utils.arg('name',
metavar='<name>',
help="Name of new QoS specifications.")
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help="QoS specifications.")
@utils.service_type('volumev2')
def do_qos_create(cs, args):
"""Creates a qos specs."""
keypair = None
if args.metadata is not None:
keypair = _extract_metadata(args)
qos_specs = cs.qos_specs.create(args.name, keypair)
_print_qos_specs(qos_specs)
@utils.service_type('volumev2')
def do_qos_list(cs, args):
"""Lists qos specs."""
qos_specs = cs.qos_specs.list()
_print_qos_specs_list(qos_specs)
@utils.arg('qos_specs', metavar='<qos_specs>',
help="ID of QoS specifications to show.")
@utils.service_type('volumev2')
def do_qos_show(cs, args):
"""Shows qos specs details."""
qos_specs = _find_qos_specs(cs, args.qos_specs)
_print_qos_specs(qos_specs)
@utils.arg('qos_specs', metavar='<qos_specs>',
help="ID of QoS specifications to delete.")
@utils.arg('--force',
metavar='<True|False>',
default=False,
help='Enables or disables deletion of in-use '
'QoS specifications. Default=False.')
@utils.service_type('volumev2')
def do_qos_delete(cs, args):
"""Deletes a specified qos specs."""
force = strutils.bool_from_string(args.force)
qos_specs = _find_qos_specs(cs, args.qos_specs)
cs.qos_specs.delete(qos_specs, force)
@utils.arg('qos_specs', metavar='<qos_specs>',
help='ID of QoS specifications.')
@utils.arg('vol_type_id', metavar='<volume_type_id>',
help='ID of volume type with which to associate '
'QoS specifications.')
@utils.service_type('volumev2')
def do_qos_associate(cs, args):
"""Associates qos specs with specified volume type."""
cs.qos_specs.associate(args.qos_specs, args.vol_type_id)
@utils.arg('qos_specs', metavar='<qos_specs>',
help='ID of QoS specifications.')
@utils.arg('vol_type_id', metavar='<volume_type_id>',
help='ID of volume type with which to associate '
'QoS specifications.')
@utils.service_type('volumev2')
def do_qos_disassociate(cs, args):
"""Disassociates qos specs from specified volume type."""
cs.qos_specs.disassociate(args.qos_specs, args.vol_type_id)
@utils.arg('qos_specs', metavar='<qos_specs>',
help='ID of QoS specifications on which to operate.')
@utils.service_type('volumev2')
def do_qos_disassociate_all(cs, args):
"""Disassociates qos specs from all its associations."""
cs.qos_specs.disassociate_all(args.qos_specs)
@utils.arg('qos_specs', metavar='<qos_specs>',
help='ID of QoS specifications.')
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="The action. Valid values are 'set' or 'unset.'")
@utils.arg('metadata', metavar='key=value',
nargs='+',
default=[],
help='Metadata key and value pair to set or unset. '
'For unset, specify only the key.')
def do_qos_key(cs, args):
"""Sets or unsets specifications for a qos spec."""
keypair = _extract_metadata(args)
if args.action == 'set':
cs.qos_specs.set_keys(args.qos_specs, keypair)
elif args.action == 'unset':
cs.qos_specs.unset_keys(args.qos_specs, list(keypair))
@utils.arg('qos_specs', metavar='<qos_specs>',
help='ID of QoS specifications.')
@utils.service_type('volumev2')
def do_qos_get_association(cs, args):
"""Lists all associations for specified qos specs."""
associations = cs.qos_specs.get_associations(args.qos_specs)
_print_associations_list(associations)
@utils.arg('snapshot',
metavar='<snapshot>',
help='ID of snapshot for which to update metadata.')
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="The action. Valid values are 'set' or 'unset.'")
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='Metadata key and value pair to set or unset. '
'For unset, specify only the key.')
@utils.service_type('volumev2')
def do_snapshot_metadata(cs, args):
"""Sets or deletes snapshot metadata."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
metadata = _extract_metadata(args)
if args.action == 'set':
metadata = snapshot.set_metadata(metadata)
utils.print_dict(metadata._info)
elif args.action == 'unset':
snapshot.delete_metadata(list(metadata.keys()))
@utils.arg('snapshot', metavar='<snapshot>',
help='ID of snapshot.')
@utils.service_type('volumev2')
def do_snapshot_metadata_show(cs, args):
"""Shows snapshot metadata."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
utils.print_dict(snapshot._info['metadata'], 'Metadata-property')
@utils.arg('volume', metavar='<volume>',
help='ID of volume.')
@utils.service_type('volumev2')
def do_metadata_show(cs, args):
"""Shows volume metadata."""
volume = utils.find_volume(cs, args.volume)
utils.print_dict(volume._info['metadata'], 'Metadata-property')
@utils.arg('volume',
metavar='<volume>',
help='ID of volume for which to update metadata.')
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='Metadata key and value pair or pairs to update.')
@utils.service_type('volumev2')
def do_metadata_update_all(cs, args):
"""Updates volume metadata."""
volume = utils.find_volume(cs, args.volume)
metadata = _extract_metadata(args)
metadata = volume.update_all_metadata(metadata)
utils.print_dict(metadata)
@utils.arg('snapshot',
metavar='<snapshot>',
help='ID of snapshot for which to update metadata.')
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='Metadata key and value pair to update.')
@utils.service_type('volumev2')
def do_snapshot_metadata_update_all(cs, args):
"""Updates snapshot metadata."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
metadata = _extract_metadata(args)
metadata = snapshot.update_all_metadata(metadata)
utils.print_dict(metadata)
@utils.arg('volume', metavar='<volume>', help='ID of volume to update.')
@utils.arg('read_only',
metavar='<True|true|False|false>',
choices=['True', 'true', 'False', 'false'],
help='Enables or disables update of volume to '
'read-only access mode.')
@utils.service_type('volumev2')
def do_readonly_mode_update(cs, args):
"""Updates volume read-only access-mode flag."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.update_readonly_flag(volume,
strutils.bool_from_string(args.read_only))
@utils.arg('volume', metavar='<volume>', help='ID of the volume to update.')
@utils.arg('bootable',
metavar='<True|true|False|false>',
choices=['True', 'true', 'False', 'false'],
help='Flag to indicate whether volume is bootable.')
@utils.service_type('volumev2')
def do_set_bootable(cs, args):
"""Update bootable status of a volume."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.set_bootable(volume,
strutils.bool_from_string(args.bootable))
@utils.arg('host',
metavar='<host>',
help='Cinder host on which the existing volume resides; '
'takes the form: host@backend-name#pool')
@utils.arg('identifier',
metavar='<identifier>',
help='Name or other Identifier for existing volume')
@utils.arg('--id-type',
metavar='<id-type>',
default='source-name',
help='Type of backend device identifier provided, '
'typically source-name or source-id (Default=source-name)')
@utils.arg('--name',
metavar='<name>',
help='Volume name (Default=None)')
@utils.arg('--description',
metavar='<description>',
help='Volume description (Default=None)')
@utils.arg('--volume-type',
metavar='<volume-type>',
help='Volume type (Default=None)')
@utils.arg('--availability-zone',
metavar='<availability-zone>',
help='Availability zone for volume (Default=None)')
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Metadata key=value pairs (Default=None)')
@utils.arg('--bootable',
action='store_true',
help='Specifies that the newly created volume should be'
' marked as bootable')
@utils.service_type('volumev2')
def do_manage(cs, args):
"""Manage an existing volume."""
volume_metadata = None
if args.metadata is not None:
volume_metadata = _extract_metadata(args)
# Build a dictionary of key/value pairs to pass to the API.
ref_dict = {}
ref_dict[args.id_type] = args.identifier
# The recommended way to specify an existing volume is by ID or name, and
# have the Cinder driver look for 'source-name' or 'source-id' elements in
# the ref structure. To make things easier for the user, we have special
# --source-name and --source-id CLI options that add the appropriate
# element to the ref structure.
#
# Note how argparse converts hyphens to underscores. We use hyphens in the
# dictionary so that it is consistent with what the user specified on the
# CLI.
if hasattr(args, 'source_name') and \
args.source_name is not None:
ref_dict['source-name'] = args.source_name
if hasattr(args, 'source_id') and \
args.source_id is not None:
ref_dict['source-id'] = args.source_id
volume = cs.volumes.manage(host=args.host,
ref=ref_dict,
name=args.name,
description=args.description,
volume_type=args.volume_type,
availability_zone=args.availability_zone,
metadata=volume_metadata,
bootable=args.bootable)
info = {}
volume = cs.volumes.get(volume.id)
info.update(volume._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of the volume to unmanage.')
@utils.service_type('volumev2')
def do_unmanage(cs, args):
"""Stop managing a volume."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.unmanage(volume.id)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of the volume to promote.')
@utils.service_type('volumev2')
def do_replication_promote(cs, args):
"""Promote a secondary volume to primary for a relationship."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.promote(volume.id)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of the volume to reenable replication.')
@utils.service_type('volumev2')
def do_replication_reenable(cs, args):
"""Sync the secondary volume with primary for a relationship."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.reenable(volume.id)
@utils.arg('--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Shows details for all tenants. Admin only.')
@utils.service_type('volumev2')
def do_consisgroup_list(cs, args):
"""Lists all consistencygroups."""
consistencygroups = cs.consistencygroups.list()
columns = ['ID', 'Status', 'Name']
utils.print_list(consistencygroups, columns)
@utils.arg('consistencygroup',
metavar='<consistencygroup>',
help='Name or ID of a consistency group.')
@utils.service_type('volumev2')
def do_consisgroup_show(cs, args):
"""Shows details of a consistency group."""
info = dict()
consistencygroup = _find_consistencygroup(cs, args.consistencygroup)
info.update(consistencygroup._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('volumetypes',
metavar='<volume-types>',
help='Volume types.')
@utils.arg('--name',
metavar='<name>',
help='Name of a consistency group.')
@utils.arg('--description',
metavar='<description>',
default=None,
help='Description of a consistency group. Default=None.')
@utils.arg('--availability-zone',
metavar='<availability-zone>',
default=None,
help='Availability zone for volume. Default=None.')
@utils.service_type('volumev2')
def do_consisgroup_create(cs, args):
"""Creates a consistency group."""
consistencygroup = cs.consistencygroups.create(
args.volumetypes,
args.name,
args.description,
availability_zone=args.availability_zone)
info = dict()
consistencygroup = cs.consistencygroups.get(consistencygroup.id)
info.update(consistencygroup._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('consistencygroup',
metavar='<consistencygroup>', nargs='+',
help='Name or ID of one or more consistency groups '
'to be deleted.')
@utils.arg('--force',
action='store_true',
help='Allows or disallows consistency groups '
'to be deleted. If the consistency group is empty, '
'it can be deleted without the force flag. '
'If the consistency group is not empty, the force '
'flag is required for it to be deleted.',
default=False)
@utils.service_type('volumev2')
def do_consisgroup_delete(cs, args):
"""Removes one or more consistency groups."""
failure_count = 0
for consistencygroup in args.consistencygroup:
try:
_find_consistencygroup(cs, consistencygroup).delete(args.force)
except Exception as e:
failure_count += 1
print("Delete for consistency group %s failed: %s" %
(consistencygroup, e))
if failure_count == len(args.consistencygroup):
raise exceptions.CommandError("Unable to delete any of specified "
"consistency groups.")
@utils.arg('--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Shows details for all tenants. Admin only.')
@utils.arg('--status',
metavar='<status>',
default=None,
help='Filters results by a status. Default=None.')
@utils.arg('--consistencygroup-id',
metavar='<consistencygroup_id>',
default=None,
help='Filters results by a consistency group ID. Default=None.')
@utils.service_type('volumev2')
def do_cgsnapshot_list(cs, args):
"""Lists all cgsnapshots."""
cgsnapshots = cs.cgsnapshots.list()
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
search_opts = {
'all_tenants': all_tenants,
'status': args.status,
'consistencygroup_id': args.consistencygroup_id,
}
cgsnapshots = cs.cgsnapshots.list(search_opts=search_opts)
columns = ['ID', 'Status', 'Name']
utils.print_list(cgsnapshots, columns)
@utils.arg('cgsnapshot',
metavar='<cgsnapshot>',
help='Name or ID of cgsnapshot.')
@utils.service_type('volumev2')
def do_cgsnapshot_show(cs, args):
"""Shows cgsnapshot details."""
info = dict()
cgsnapshot = _find_cgsnapshot(cs, args.cgsnapshot)
info.update(cgsnapshot._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('consistencygroup',
metavar='<consistencygroup>',
help='Name or ID of a consistency group.')
@utils.arg('--name',
metavar='<name>',
default=None,
help='Cgsnapshot name. Default=None.')
@utils.arg('--description',
metavar='<description>',
default=None,
help='Cgsnapshot description. Default=None.')
@utils.service_type('volumev2')
def do_cgsnapshot_create(cs, args):
"""Creates a cgsnapshot."""
consistencygroup = _find_consistencygroup(cs, args.consistencygroup)
cgsnapshot = cs.cgsnapshots.create(
consistencygroup.id,
args.name,
args.description)
info = dict()
cgsnapshot = cs.cgsnapshots.get(cgsnapshot.id)
info.update(cgsnapshot._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('cgsnapshot',
metavar='<cgsnapshot>', nargs='+',
help='Name or ID of one or more cgsnapshots to be deleted.')
@utils.service_type('volumev2')
def do_cgsnapshot_delete(cs, args):
"""Removes one or more cgsnapshots."""
failure_count = 0
for cgsnapshot in args.cgsnapshot:
try:
_find_cgsnapshot(cs, cgsnapshot).delete()
except Exception as e:
failure_count += 1
print("Delete for cgsnapshot %s failed: %s" % (cgsnapshot, e))
if failure_count == len(args.cgsnapshot):
raise exceptions.CommandError("Unable to delete any of specified "
"cgsnapshots.")
| 33.838442 | 83 | 0.611907 |
5cf2599bffc5fecdce3aa41431d633a282b39396 | 913 | py | Python | migrations/versions/375bc72d1423_.py | Zarkantho/placebo.gov | 829f3e2c45b7d0454e7245d1dc688687b2ffeb54 | [
"BSD-3-Clause"
] | null | null | null | migrations/versions/375bc72d1423_.py | Zarkantho/placebo.gov | 829f3e2c45b7d0454e7245d1dc688687b2ffeb54 | [
"BSD-3-Clause"
] | null | null | null | migrations/versions/375bc72d1423_.py | Zarkantho/placebo.gov | 829f3e2c45b7d0454e7245d1dc688687b2ffeb54 | [
"BSD-3-Clause"
] | null | null | null | """empty message
Revision ID: 375bc72d1423
Revises: 4cd5bb2931c
Create Date: 2015-05-09 22:02:15.181823
"""
# revision identifiers, used by Alembic.
revision = '375bc72d1423'
down_revision = '4cd5bb2931c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('posts')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('posts',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(length=80), nullable=False),
sa.Column('title', sa.TEXT(), nullable=False),
sa.Column('content', sa.TEXT(), nullable=False),
sa.Column('user_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], [u'users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
| 26.085714 | 63 | 0.670318 |
6be5bebe5ada0f086eeb10ef2597c8cb31f31db1 | 23,124 | py | Python | evennia/utils/dbserialize.py | fermuch/evennia | 8961baa0a5b9b5419f864a144f080acc68a7ad0f | [
"BSD-3-Clause"
] | null | null | null | evennia/utils/dbserialize.py | fermuch/evennia | 8961baa0a5b9b5419f864a144f080acc68a7ad0f | [
"BSD-3-Clause"
] | null | null | null | evennia/utils/dbserialize.py | fermuch/evennia | 8961baa0a5b9b5419f864a144f080acc68a7ad0f | [
"BSD-3-Clause"
] | null | null | null | """
This module handles serialization of arbitrary python structural data,
intended primarily to be stored in the database. It also supports
storing Django model instances (which plain pickle cannot do).
This serialization is used internally by the server, notably for
storing data in Attributes and for piping data to process pools.
The purpose of dbserialize is to handle all forms of data. For
well-structured non-arbitrary exchange, such as communicating with a
rich web client, a simpler JSON serialization makes more sense.
This module also implements the `SaverList`, `SaverDict` and `SaverSet`
classes. These are iterables that track their position in a nested
structure and makes sure to send updates up to their root. This is
used by Attributes - without it, one would not be able to update mutables
in-situ, e.g `obj.db.mynestedlist[3][5] = 3` would never be saved and
be out of sync with the database.
"""
from builtins import object, int
from functools import update_wrapper
from collections import defaultdict, MutableSequence, MutableSet, MutableMapping
from collections import OrderedDict, deque
try:
from pickle import dumps, loads
except ImportError:
from pickle import dumps, loads
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from django.utils.safestring import SafeString, SafeBytes
from evennia.utils.utils import uses_database, is_iter, to_str, to_bytes
from evennia.utils import logger
__all__ = ("to_pickle", "from_pickle", "do_pickle", "do_unpickle",
"dbserialize", "dbunserialize")
PICKLE_PROTOCOL = 2
# message to send if editing an already deleted Attribute in a savermutable
_ERROR_DELETED_ATTR = (
"{cls_name} {obj} has had its root Attribute deleted. "
"It must be cast to a {non_saver_name} before it can be modified further.")
def _get_mysql_db_version():
"""
This is a helper method for specifically getting the version
string of a MySQL database.
Returns:
mysql_version (str): The currently used mysql database
version.
"""
from django.db import connection
conn = connection.cursor()
conn.execute("SELECT VERSION()")
version = conn.fetchone()
return version and str(version[0]) or ""
# initialization and helpers
_GA = object.__getattribute__
_SA = object.__setattr__
_FROM_MODEL_MAP = None
_TO_MODEL_MAP = None
_IGNORE_DATETIME_MODELS = None
_SESSION_HANDLER = None
def _IS_PACKED_DBOBJ(o):
return isinstance(o, tuple) and len(o) == 4 and o[0] == '__packed_dbobj__'
def _IS_PACKED_SESSION(o):
return isinstance(o, tuple) and len(o) == 3 and o[0] == '__packed_session__'
if uses_database("mysql") and _get_mysql_db_version() < '5.6.4':
# mysql <5.6.4 don't support millisecond precision
_DATESTRING = "%Y:%m:%d-%H:%M:%S:000000"
else:
_DATESTRING = "%Y:%m:%d-%H:%M:%S:%f"
def _TO_DATESTRING(obj):
"""
Creates datestring hash.
Args:
obj (Object): Database object.
Returns:
datestring (str): A datestring hash.
"""
try:
return _GA(obj, "db_date_created").strftime(_DATESTRING)
except AttributeError:
# this can happen if object is not yet saved - no datestring is then set
try:
obj.save()
except AttributeError:
# we have received a None object, for example due to an erroneous save.
return None
return _GA(obj, "db_date_created").strftime(_DATESTRING)
def _init_globals():
"""Lazy importing to avoid circular import issues"""
global _FROM_MODEL_MAP, _TO_MODEL_MAP, _SESSION_HANDLER, _IGNORE_DATETIME_MODELS
if not _FROM_MODEL_MAP:
_FROM_MODEL_MAP = defaultdict(str)
_FROM_MODEL_MAP.update(dict((c.model, c.natural_key()) for c in ContentType.objects.all()))
if not _TO_MODEL_MAP:
from django.conf import settings
_TO_MODEL_MAP = defaultdict(str)
_TO_MODEL_MAP.update(dict((c.natural_key(), c.model_class()) for c in ContentType.objects.all()))
_IGNORE_DATETIME_MODELS = []
for src_key, dst_key in settings.ATTRIBUTE_STORED_MODEL_RENAME:
_TO_MODEL_MAP[src_key] = _TO_MODEL_MAP.get(dst_key, None)
_IGNORE_DATETIME_MODELS.append(src_key)
if not _SESSION_HANDLER:
from evennia.server.sessionhandler import SESSION_HANDLER as _SESSION_HANDLER
#
# SaverList, SaverDict, SaverSet - Attribute-specific helper classes and functions
#
def _save(method):
"""method decorator that saves data to Attribute"""
def save_wrapper(self, *args, **kwargs):
self.__doc__ = method.__doc__
ret = method(self, *args, **kwargs)
self._save_tree()
return ret
return update_wrapper(save_wrapper, method)
class _SaverMutable(object):
"""
Parent class for properly handling of nested mutables in
an Attribute. If not used something like
obj.db.mylist[1][2] = "test" (allocation to a nested list)
will not save the updated value to the database.
"""
def __init__(self, *args, **kwargs):
"""store all properties for tracking the tree"""
self._parent = kwargs.pop("_parent", None)
self._db_obj = kwargs.pop("_db_obj", None)
self._data = None
def __bool__(self):
"""Make sure to evaluate as False if empty"""
return bool(self._data)
def _save_tree(self):
"""recursively traverse back up the tree, save when we reach the root"""
if self._parent:
self._parent._save_tree()
elif self._db_obj:
if not self._db_obj.pk:
cls_name = self.__class__.__name__
try:
non_saver_name = cls_name.split("_Saver", 1)[1].lower()
except IndexError:
non_saver_name = cls_name
raise ValueError(_ERROR_DELETED_ATTR.format(cls_name=cls_name, obj=self,
non_saver_name=non_saver_name))
self._db_obj.value = self
else:
logger.log_err("_SaverMutable %s has no root Attribute to save to." % self)
def _convert_mutables(self, data):
"""converts mutables to Saver* variants and assigns ._parent property"""
def process_tree(item, parent):
"""recursively populate the tree, storing parents"""
dtype = type(item)
if dtype in (str, int, float, bool, tuple):
return item
elif dtype == list:
dat = _SaverList(_parent=parent)
dat._data.extend(process_tree(val, dat) for val in item)
return dat
elif dtype == dict:
dat = _SaverDict(_parent=parent)
dat._data.update((key, process_tree(val, dat)) for key, val in item.items())
return dat
elif dtype == set:
dat = _SaverSet(_parent=parent)
dat._data.update(process_tree(val, dat) for val in item)
return dat
return item
return process_tree(data, self)
def __repr__(self):
return self._data.__repr__()
def __len__(self):
return self._data.__len__()
def __iter__(self):
return self._data.__iter__()
def __getitem__(self, key):
return self._data.__getitem__(key)
def __eq__(self, other):
return self._data == other
def __ne__(self, other):
return self._data != other
@_save
def __setitem__(self, key, value):
self._data.__setitem__(key, self._convert_mutables(value))
@_save
def __delitem__(self, key):
self._data.__delitem__(key)
class _SaverList(_SaverMutable, MutableSequence):
"""
A list that saves itself to an Attribute when updated.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._data = list()
@_save
def __iadd__(self, otherlist):
self._data = self._data.__add__(otherlist)
return self._data
def __add__(self, otherlist):
return list(self._data) + otherlist
@_save
def insert(self, index, value):
self._data.insert(index, self._convert_mutables(value))
def __eq__(self, other):
try:
return list(self._data) == list(other)
except TypeError:
return False
def __ne__(self, other):
try:
return list(self._data) != list(other)
except TypeError:
return True
def index(self, value, *args):
return self._data.index(value, *args)
class _SaverDict(_SaverMutable, MutableMapping):
"""
A dict that stores changes to an Attribute when updated
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._data = dict()
def has_key(self, key):
return key in self._data
class _SaverSet(_SaverMutable, MutableSet):
"""
A set that saves to an Attribute when updated
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._data = set()
def __contains__(self, value):
return self._data.__contains__(value)
@_save
def add(self, value):
self._data.add(self._convert_mutables(value))
@_save
def discard(self, value):
self._data.discard(value)
class _SaverOrderedDict(_SaverMutable, MutableMapping):
"""
An ordereddict that can be saved and operated on.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._data = OrderedDict()
def has_key(self, key):
return key in self._data
class _SaverDeque(_SaverMutable):
"""
A deque that can be saved and operated on.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._data = deque()
@_save
def append(self, *args, **kwargs):
self._data.append(*args, **kwargs)
@_save
def appendleft(self, *args, **kwargs):
self._data.appendleft(*args, **kwargs)
@_save
def clear(self):
self._data.clear()
@_save
def extendleft(self, *args, **kwargs):
self._data.extendleft(*args, **kwargs)
# maxlen property
def _getmaxlen(self):
return self._data.maxlen
def _setmaxlen(self, value):
self._data.maxlen = value
def _delmaxlen(self):
del self._data.maxlen
maxlen = property(_getmaxlen, _setmaxlen, _delmaxlen)
@_save
def pop(self, *args, **kwargs):
return self._data.pop(*args, **kwargs)
@_save
def popleft(self, *args, **kwargs):
return self._data.popleft(*args, **kwargs)
@_save
def reverse(self):
self._data.reverse()
@_save
def rotate(self, *args):
self._data.rotate(*args)
_DESERIALIZE_MAPPING = {_SaverList.__name__: list, _SaverDict.__name__: dict,
_SaverSet.__name__: set, _SaverOrderedDict.__name__: OrderedDict,
_SaverDeque.__name__: deque}
def deserialize(obj):
"""
Make sure to *fully* decouple a structure from the database, by turning all _Saver*-mutables
inside it back into their normal Python forms.
"""
def _iter(obj):
typ = type(obj)
tname = typ.__name__
if tname in ('_SaverDict', 'dict'):
return {_iter(key): _iter(val) for key, val in obj.items()}
elif tname in _DESERIALIZE_MAPPING:
return _DESERIALIZE_MAPPING[tname](_iter(val) for val in obj)
elif is_iter(obj):
return typ(_iter(val) for val in obj)
return obj
return _iter(obj)
#
# serialization helpers
def pack_dbobj(item):
"""
Check and convert django database objects to an internal representation.
Args:
item (any): A database entity to pack
Returns:
packed (any or tuple): Either returns the original input item
or the packing tuple `("__packed_dbobj__", key, creation_time, id)`.
"""
_init_globals()
obj = item
natural_key = _FROM_MODEL_MAP[hasattr(obj, "id") and hasattr(obj, "db_date_created") and
hasattr(obj, '__dbclass__') and obj.__dbclass__.__name__.lower()]
# build the internal representation as a tuple
# ("__packed_dbobj__", key, creation_time, id)
return natural_key and ('__packed_dbobj__', natural_key,
_TO_DATESTRING(obj), _GA(obj, "id")) or item
def unpack_dbobj(item):
"""
Check and convert internal representations back to Django database
models.
Args:
item (packed_dbobj): The fact that item is a packed dbobj
should be checked before this call.
Returns:
unpacked (any): Either the original input or converts the
internal store back to a database representation (its
typeclass is returned if applicable).
"""
_init_globals()
try:
obj = item[3] and _TO_MODEL_MAP[item[1]].objects.get(id=item[3])
except ObjectDoesNotExist:
return None
except TypeError:
if hasattr(item, "pk"):
# this happens if item is already an obj
return item
return None
if item[1] in _IGNORE_DATETIME_MODELS:
# if we are replacing models we ignore the datatime
return obj
else:
# even if we got back a match, check the sanity of the date (some
# databases may 're-use' the id)
return _TO_DATESTRING(obj) == item[2] and obj or None
def pack_session(item):
"""
Handle the safe serializion of Sessions objects (these contain
hidden references to database objects (accounts, puppets) so they
can't be safely serialized).
Args:
item (Session)): This item must have all properties of a session
before entering this call.
Returns:
packed (tuple or None): A session-packed tuple on the form
`(__packed_session__, sessid, conn_time)`. If this sessid
does not match a session in the Session handler, None is returned.
"""
_init_globals()
session = _SESSION_HANDLER.get(item.sessid)
if session and session.conn_time == item.conn_time:
# we require connection times to be identical for the Session
# to be accepted as actually being a session (sessids gets
# reused all the time).
return item.conn_time and item.sessid and ('__packed_session__',
_GA(item, "sessid"),
_GA(item, "conn_time"))
return None
def unpack_session(item):
"""
Check and convert internal representations back to Sessions.
Args:
item (packed_session): The fact that item is a packed session
should be checked before this call.
Returns:
unpacked (any): Either the original input or converts the
internal store back to a Session. If Session no longer
exists, None will be returned.
"""
_init_globals()
session = _SESSION_HANDLER.get(item[1])
if session and session.conn_time == item[2]:
# we require connection times to be identical for the Session
# to be accepted as the same as the one stored (sessids gets
# reused all the time).
return session
return None
#
# Access methods
def to_pickle(data):
"""
This prepares data on arbitrary form to be pickled. It handles any
nested structure and returns data on a form that is safe to pickle
(including having converted any database models to their internal
representation). We also convert any Saver*-type objects back to
their normal representations, they are not pickle-safe.
Args:
data (any): Data to pickle.
Returns:
data (any): Pickled data.
"""
def process_item(item):
"""Recursive processor and identification of data"""
dtype = type(item)
if dtype in (str, int, float, bool, bytes, SafeString, SafeBytes):
return item
elif dtype == tuple:
return tuple(process_item(val) for val in item)
elif dtype in (list, _SaverList):
return [process_item(val) for val in item]
elif dtype in (dict, _SaverDict):
return dict((process_item(key), process_item(val)) for key, val in item.items())
elif dtype in (set, _SaverSet):
return set(process_item(val) for val in item)
elif dtype in (OrderedDict, _SaverOrderedDict):
return OrderedDict((process_item(key), process_item(val)) for key, val in item.items())
elif dtype in (deque, _SaverDeque):
return deque(process_item(val) for val in item)
elif hasattr(item, '__iter__'):
# we try to conserve the iterable class, if not convert to list
try:
return item.__class__([process_item(val) for val in item])
except (AttributeError, TypeError):
return [process_item(val) for val in item]
elif hasattr(item, "sessid") and hasattr(item, "conn_time"):
return pack_session(item)
return pack_dbobj(item)
return process_item(data)
# @transaction.autocommit
def from_pickle(data, db_obj=None):
"""
This should be fed a just de-pickled data object. It will be converted back
to a form that may contain database objects again. Note that if a database
object was removed (or changed in-place) in the database, None will be
returned.
Args_
data (any): Pickled data to unpickle.
db_obj (Atribute, any): This is the model instance (normally
an Attribute) that _Saver*-type iterables (_SaverList etc)
will save to when they update. It must have a 'value' property
that saves assigned data to the database. Skip if not
serializing onto a given object. If db_obj is given, this
function will convert lists, dicts and sets to their
_SaverList, _SaverDict and _SaverSet counterparts.
Returns:
data (any): Unpickled data.
"""
def process_item(item):
"""Recursive processor and identification of data"""
dtype = type(item)
if dtype in (str, int, float, bool, bytes, SafeString, SafeBytes):
return item
elif _IS_PACKED_DBOBJ(item):
# this must be checked before tuple
return unpack_dbobj(item)
elif _IS_PACKED_SESSION(item):
return unpack_session(item)
elif dtype == tuple:
return tuple(process_item(val) for val in item)
elif dtype == dict:
return dict((process_item(key), process_item(val)) for key, val in item.items())
elif dtype == set:
return set(process_item(val) for val in item)
elif dtype == OrderedDict:
return OrderedDict((process_item(key), process_item(val)) for key, val in item.items())
elif dtype == deque:
return deque(process_item(val) for val in item)
elif hasattr(item, '__iter__'):
try:
# we try to conserve the iterable class if
# it accepts an iterator
return item.__class__(process_item(val) for val in item)
except (AttributeError, TypeError):
return [process_item(val) for val in item]
return item
def process_tree(item, parent):
"""Recursive processor, building a parent-tree from iterable data"""
dtype = type(item)
if dtype in (str, int, float, bool, bytes, SafeString, SafeBytes):
return item
elif _IS_PACKED_DBOBJ(item):
# this must be checked before tuple
return unpack_dbobj(item)
elif dtype == tuple:
return tuple(process_tree(val, item) for val in item)
elif dtype == list:
dat = _SaverList(_parent=parent)
dat._data.extend(process_tree(val, dat) for val in item)
return dat
elif dtype == dict:
dat = _SaverDict(_parent=parent)
dat._data.update((process_item(key), process_tree(val, dat))
for key, val in item.items())
return dat
elif dtype == set:
dat = _SaverSet(_parent=parent)
dat._data.update(set(process_tree(val, dat) for val in item))
return dat
elif dtype == OrderedDict:
dat = _SaverOrderedDict(_parent=parent)
dat._data.update((process_item(key), process_tree(val, dat))
for key, val in item.items())
return dat
elif dtype == deque:
dat = _SaverDeque(_parent=parent)
dat._data.extend(process_item(val) for val in item)
return dat
elif hasattr(item, '__iter__'):
try:
# we try to conserve the iterable class if it
# accepts an iterator
return item.__class__(process_tree(val, parent) for val in item)
except (AttributeError, TypeError):
dat = _SaverList(_parent=parent)
dat._data.extend(process_tree(val, dat) for val in item)
return dat
return item
if db_obj:
# convert lists, dicts and sets to their Saved* counterparts. It
# is only relevant if the "root" is an iterable of the right type.
dtype = type(data)
if dtype == list:
dat = _SaverList(_db_obj=db_obj)
dat._data.extend(process_tree(val, dat) for val in data)
return dat
elif dtype == dict:
dat = _SaverDict(_db_obj=db_obj)
dat._data.update((process_item(key), process_tree(val, dat))
for key, val in data.items())
return dat
elif dtype == set:
dat = _SaverSet(_db_obj=db_obj)
dat._data.update(process_tree(val, dat) for val in data)
return dat
elif dtype == OrderedDict:
dat = _SaverOrderedDict(_db_obj=db_obj)
dat._data.update((process_item(key), process_tree(val, dat))
for key, val in data.items())
return dat
elif dtype == deque:
dat = _SaverDeque(_db_obj=db_obj)
dat._data.extend(process_item(val) for val in data)
return dat
return process_item(data)
def do_pickle(data):
"""Perform pickle to string"""
return dumps(data, protocol=PICKLE_PROTOCOL)
def do_unpickle(data):
"""Retrieve pickle from pickled string"""
return loads(to_bytes(data))
def dbserialize(data):
"""Serialize to pickled form in one step"""
return do_pickle(to_pickle(data))
def dbunserialize(data, db_obj=None):
"""Un-serialize in one step. See from_pickle for help db_obj."""
return from_pickle(do_unpickle(data), db_obj=db_obj)
| 33.367965 | 105 | 0.628697 |
7eb4dae8bbb90e790e8622722c072a8f540dad44 | 6,297 | py | Python | src/agent.py | jhonsonlee/ddpg-in-reacher-env | f49b3dc219c89dc5adffbd96580849b36c66e4af | [
"MIT"
] | null | null | null | src/agent.py | jhonsonlee/ddpg-in-reacher-env | f49b3dc219c89dc5adffbd96580849b36c66e4af | [
"MIT"
] | 7 | 2019-12-16T22:20:20.000Z | 2022-02-10T01:31:07.000Z | src/agent.py | jhonsonlee/ddpg-in-reacher-env | f49b3dc219c89dc5adffbd96580849b36c66e4af | [
"MIT"
] | null | null | null | import numpy as np
import random
import copy
from collections import namedtuple, deque
from src.model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 128 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 2e-4 # learning rate of the actor
LR_CRITIC = 2e-4 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
GRAD_CLIPPING = 1 # Gradient clipping
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
def __init__(self, state_size, action_size, random_seed=42):
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
self.hard_copy_weights(self.actor_target, self.actor_local)
self.hard_copy_weights(self.critic_target, self.critic_local)
self.noise = OUNoise(action_size, random_seed)
#self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def step(self, memory, state, action, reward, next_state, done):
memory.add(state, action, reward, next_state, done)
if len(memory) > BATCH_SIZE:
experiences = memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, add_noise=True):
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma):
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
if GRAD_CLIPPING > 0.0:
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), GRAD_CLIPPING)
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
def soft_update(self, local_model, target_model, tau):
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
def hard_copy_weights(self, target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
class OUNoise:
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.1):
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
self.state = copy.copy(self.mu)
def sample(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
class ReplayBuffer:
def __init__(self, action_size, buffer_size, batch_size, seed):
self.action_size = action_size
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
return len(self.memory)
| 39.111801 | 127 | 0.631412 |
fe6db1f388ee0b5fa2c6aa9b1e6807333c82341b | 1,762 | py | Python | solutions/542.updateMatrix.py | lim1202/LeetCodeProblems | 26d8f799296da846dec284ccea07097d5cdcc1f1 | [
"MIT"
] | null | null | null | solutions/542.updateMatrix.py | lim1202/LeetCodeProblems | 26d8f799296da846dec284ccea07097d5cdcc1f1 | [
"MIT"
] | null | null | null | solutions/542.updateMatrix.py | lim1202/LeetCodeProblems | 26d8f799296da846dec284ccea07097d5cdcc1f1 | [
"MIT"
] | null | null | null | """
542. 01 Matrix
Given an m x n binary matrix mat, return the distance of the nearest 0 for each cell.
The distance between two adjacent cells is 1.
Example 1:
Input: mat = [[0,0,0],[0,1,0],[0,0,0]]
Output: [[0,0,0],[0,1,0],[0,0,0]]
Example 2:
Input: mat = [[0,0,0],[0,1,0],[1,1,1]]
Output: [[0,0,0],[0,1,0],[1,2,1]]
Constraints:
m == mat.length
n == mat[i].length
1 <= m, n <= 104
1 <= m * n <= 104
mat[i][j] is either 0 or 1.
There is at least one 0 in mat.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/01-matrix
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:
m, n = len(mat), len(mat[0])
# 初始化结果矩阵
dist = [[10**9] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if mat[i][j] == 0:
dist[i][j] = 0
# 只有 水平向左移动 和 竖直向上移动
for i in range(m):
for j in range(n):
for x, y in [(i - 1, j), (i, j - 1)]:
if x >= 0 and y >= 0:
dist[i][j] = min(dist[i][j], dist[x][y] + 1)
# 只有 水平向右移动 和 竖直向下移动
for i in range(m - 1, -1, -1):
for j in range(n - 1, -1, -1):
for x, y in [(i + 1, j), (i, j + 1)]:
if x < m and y < n:
dist[i][j] = min(dist[i][j], dist[x][y] + 1)
return dist
if __name__ == "__main__":
s = Solution()
print("Input: mat = [[0,0,0],[0,1,0],[0,0,0]]")
print("Output:", s.updateMatrix([[0, 0, 0], [0, 1, 0], [0, 0, 0]]))
print("Input: mat = [[0,0,0],[0,1,0],[1,1,1]]")
print("Output:", s.updateMatrix([[0, 0, 0], [0, 1, 0], [1, 1, 1]]))
| 24.816901 | 85 | 0.473326 |
062676a6978006735abefb7f9b1b05ce3b2c49fa | 15,395 | py | Python | tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/text_to_speech_evaluator.py | kblaszczak-intel/open_model_zoo | e313674d35050d2a4721bbccd9bd4c404f1ba7f8 | [
"Apache-2.0"
] | 2,201 | 2018-10-15T14:37:19.000Z | 2020-07-16T02:05:51.000Z | tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/text_to_speech_evaluator.py | kblaszczak-intel/open_model_zoo | e313674d35050d2a4721bbccd9bd4c404f1ba7f8 | [
"Apache-2.0"
] | 759 | 2018-10-18T07:43:55.000Z | 2020-07-16T01:23:12.000Z | tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/text_to_speech_evaluator.py | kblaszczak-intel/open_model_zoo | e313674d35050d2a4721bbccd9bd4c404f1ba7f8 | [
"Apache-2.0"
] | 808 | 2018-10-16T14:03:49.000Z | 2020-07-15T11:41:45.000Z | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functools import partial
import numpy as np
from .base_custom_evaluator import BaseCustomEvaluator
from .base_models import BaseCascadeModel, BaseDLSDKModel, create_model, BaseOpenVINOModel
from ...adapters import create_adapter
from ...config import ConfigError
from ...utils import contains_all, extract_image_representations, generate_layer_name, postprocess_output_name
class TextToSpeechEvaluator(BaseCustomEvaluator):
def __init__(self, dataset_config, launcher, model, orig_config):
super().__init__(dataset_config, launcher, orig_config)
self.model = model
if hasattr(self.model, 'adapter'):
self.adapter_type = self.model.adapter.__provider__
@classmethod
def from_configs(cls, config, delayed_model_loading=False, orig_config=None):
dataset_config, launcher, _ = cls.get_dataset_and_launcher_info(config)
adapter_info = config['adapter']
pos_mask_window = config['pos_mask_window']
model = SequentialModel(
config.get('network_info', {}), launcher, config.get('_models', []), adapter_info, pos_mask_window,
config.get('_model_is_blob'), delayed_model_loading
)
return cls(dataset_config, launcher, model, orig_config)
def _process(self, output_callback, calculate_metrics, progress_reporter, metric_config, csv_file):
for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset):
batch_inputs = self.preprocessor.process(batch_inputs, batch_annotation)
batch_data, batch_meta = extract_image_representations(batch_inputs)
input_names = ['{}{}'.format(
'forward_tacotron_duration_' if self.model.with_prefix else '',
s.split('.')[-1]) for s in batch_inputs[0].identifier]
temporal_output_callback = None
if output_callback:
temporal_output_callback = partial(output_callback, metrics_result=None,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids)
batch_raw_prediction, batch_prediction = self.model.predict(
batch_identifiers, batch_data, batch_meta, input_names, callback=temporal_output_callback
)
batch_annotation, batch_prediction = self.postprocessor.process_batch(batch_annotation, batch_prediction)
metrics_result = self._get_metrics_result(batch_input_ids, batch_annotation, batch_prediction,
calculate_metrics)
if output_callback:
output_callback(batch_raw_prediction, metrics_result=metrics_result,
element_identifiers=batch_identifiers, dataset_indices=batch_input_ids)
self._update_progress(progress_reporter, metric_config, batch_id, len(batch_prediction), csv_file)
class SequentialModel(BaseCascadeModel):
def __init__(self, network_info, launcher, models_args, adapter_info, pos_mask_window, is_blob=None,
delayed_model_loading=False):
super().__init__(network_info, launcher)
parts = ['forward_tacotron_duration', 'forward_tacotron_regression', 'melgan']
network_info = self.fill_part_with_model(network_info, parts, models_args, is_blob, delayed_model_loading)
if not contains_all(network_info, parts) and not delayed_model_loading:
raise ConfigError('network_info should contain forward_tacotron_duration,'
'forward_tacotron_regression and melgan fields')
self._duration_mapping = {
'dlsdk': TTSDLSDKModel,
'openvino': TTSOVModel
}
self._regression_mapping = {
'dlsdk': RegressionDLSDKModel,
'openvino': RegressionOVModel
}
self._melgan_mapping = {
'dlsdk': MelganDLSDKModel,
'openvino': MelganOVModel
}
self.forward_tacotron_duration = create_model(
network_info.get('forward_tacotron_duration', {}), launcher, self._duration_mapping,
'duration_prediction_att', delayed_model_loading
)
self.forward_tacotron_regression = create_model(
network_info.get('forward_tacotron_regression', {}), launcher, self._regression_mapping,
'regression_att', delayed_model_loading
)
self.melgan = create_model(
network_info.get('melgan', {}), launcher, self._melgan_mapping, "melganupsample", delayed_model_loading
)
if not delayed_model_loading:
self.forward_tacotron_duration_input = next(iter(self.forward_tacotron_duration.inputs))
self.melgan_input = next(iter(self.melgan.inputs))
else:
self.forward_tacotron_duration_input = None
self.melgan_input = None
self.duration_speaker_embeddings = (
'speaker_embedding' if 'speaker_embedding' in self.forward_tacotron_regression_input else None
)
self.duration_output = 'duration'
self.embeddings_output = 'embeddings'
self.mel_output = 'mel'
self.audio_output = 'audio'
self.pos_mask_window = int(pos_mask_window)
self.adapter = create_adapter(adapter_info)
self.adapter.output_blob = self.audio_output
if not delayed_model_loading:
self.update_inputs_outputs_info()
self.init_pos_mask(window_size=self.pos_mask_window)
self.with_prefix = False
self._part_by_name = {
'forward_tacotron_duration': self.forward_tacotron_duration,
'forward_tacotron_regression': self.forward_tacotron_regression,
'melgan': self.melgan
}
@property
def forward_tacotron_regression_input(self):
return self.forward_tacotron_regression.regression_input
@property
def max_mel_len(self):
return self.melgan.max_len
@property
def max_regression_len(self):
return self.forward_tacotron_regression.max_len
def init_pos_mask(self, mask_sz=6000, window_size=4):
mask_arr = np.zeros((1, 1, mask_sz, mask_sz), dtype=np.float32)
width = 2 * window_size + 1
for i in range(mask_sz - width):
mask_arr[0][0][i][i:i + width] = 1.0
self.pos_mask = mask_arr
@staticmethod
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = np.max(length)
x = np.arange(max_length, dtype=length.dtype)
x = np.expand_dims(x, axis=(0))
length = np.expand_dims(length, axis=(1))
return x < length
def predict(self, identifiers, input_data, input_meta=None, input_names=None, callback=None):
assert len(identifiers) == 1
duration_input = dict(zip(input_names, input_data[0]))
duration_output = self.forward_tacotron_duration.predict(identifiers, duration_input)
if isinstance(duration_output, tuple):
duration_output, raw_duration_output = duration_output
else:
raw_duration_output = duration_output
if callback:
callback(raw_duration_output)
duration = duration_output[self.duration_output]
duration = (duration + 0.5).astype('int').flatten()
duration = np.expand_dims(duration, axis=0)
preprocessed_emb = duration_output[self.embeddings_output]
indexes = self.build_index(duration, preprocessed_emb)
processed_emb = self.gather(preprocessed_emb, 1, indexes)
processed_emb = processed_emb[:, :self.max_regression_len, :]
if len(input_names) > 1: # in the case of network with attention
input_mask = self.sequence_mask(np.array([[processed_emb.shape[1]]]), processed_emb.shape[1])
pos_mask = self.pos_mask[:, :, :processed_emb.shape[1], :processed_emb.shape[1]]
input_to_regression = {
self.forward_tacotron_regression_input['data']: processed_emb,
self.forward_tacotron_regression_input['data_mask']: input_mask,
self.forward_tacotron_regression_input['pos_mask']: pos_mask}
if self.duration_speaker_embeddings:
sp_emb_input = self.forward_tacotron_regression_input['speaker_embedding']
input_to_regression[sp_emb_input] = duration_input[self.duration_speaker_embeddings]
mels = self.forward_tacotron_regression.predict(identifiers, input_to_regression)
else:
mels = self.forward_tacotron_regression.predict(identifiers,
{self.forward_tacotron_regression_input: processed_emb})
if isinstance(mels, tuple):
mels, raw_mels = mels
else:
raw_mels = mels
if callback:
callback(raw_mels)
melgan_input = mels[self.mel_output]
if np.ndim(melgan_input) != 3:
melgan_input = np.expand_dims(melgan_input, 0)
melgan_input = melgan_input[:, :, :self.max_mel_len]
audio = self.melgan.predict(identifiers, {self.melgan_input: melgan_input})
if isinstance(audio, tuple):
audio, raw_audio = audio
else:
raw_audio = audio
return raw_audio, self.adapter.process(audio, identifiers, input_meta)
def load_model(self, network_list, launcher):
super().load_model(network_list, launcher)
self.update_inputs_outputs_info()
def load_network(self, network_list, launcher):
super().load_network(network_list, launcher)
self.update_inputs_outputs_info()
@staticmethod
def build_index(duration, x):
duration[np.where(duration < 0)] = 0
tot_duration = np.cumsum(duration, 1)
max_duration = int(tot_duration.max().item())
index = np.zeros([x.shape[0], max_duration, x.shape[2]], dtype='long')
for i in range(tot_duration.shape[0]):
pos = 0
for j in range(tot_duration.shape[1]):
pos1 = tot_duration[i, j]
index[i, pos:pos1, :] = j
pos = pos1
index[i, pos:, :] = tot_duration.shape[1] - 1
return index
@staticmethod
def gather(a, dim, index):
expanded_index = [
index if dim == i else
np.arange(a.shape[i]).reshape([-1 if i == j else 1 for j in range(a.ndim)]) for i in range(a.ndim)
]
return a[tuple(expanded_index)]
def update_inputs_outputs_info(self):
if hasattr(self.forward_tacotron_duration, 'outputs'):
self.duration_output = postprocess_output_name(
self.duration_output,
self.forward_tacotron_duration.outputs,
additional_mapping=self.forward_tacotron_duration.additional_output_mapping, raise_error=False)
self.embeddings_output = postprocess_output_name(
self.embeddings_output, self.forward_tacotron_duration.outputs,
additional_mapping=self.forward_tacotron_duration.additional_output_mapping, raise_error=False)
self.mel_output = postprocess_output_name(
self.mel_output, self.forward_tacotron_regression.outputs,
additional_mapping=self.forward_tacotron_regression.additional_output_mapping, raise_error=False)
self.audio_output = postprocess_output_name(
self.audio_output, self.melgan.outputs,
additional_mapping=self.melgan.additional_output_mapping, raise_error=False)
self.adapter.output_blob = self.audio_output
current_name = next(iter(self.forward_tacotron_duration.inputs))
with_prefix = current_name.startswith('forward_tacotron_duration_')
if not hasattr(self, 'with_prefix') or with_prefix != self.with_prefix:
self.forward_tacotron_duration_input = next(iter(self.forward_tacotron_duration.inputs))
self.melgan_input = next(iter(self.melgan.inputs))
if self.duration_speaker_embeddings:
self.duration_speaker_embeddings = generate_layer_name(
self.duration_speaker_embeddings, 'forward_tacotron_duration_', with_prefix
)
for key, value in self.forward_tacotron_regression_input.items():
self.forward_tacotron_regression_input[key] = generate_layer_name(
value, 'forward_tacotron_regression_', with_prefix
)
self.with_prefix = with_prefix
class TTSDLSDKModel(BaseDLSDKModel):
def predict(self, identifiers, input_data):
if not self.is_dynamic and self.dynamic_inputs:
self._reshape_input({k: v.shape for k, v in input_data.items()})
return self.exec_network.infer(input_data)
@property
def inputs(self):
if self.network:
return self.network.input_info if hasattr(self.network, 'input_info') else self.network.inputs
return self.exec_network.input_info if hasattr(self.exec_network, 'input_info') else self.exec_network.inputs
class TTSOVModel(BaseOpenVINOModel):
def predict(self, identifiers, input_data):
if not self.is_dynamic and self.dynamic_inputs:
self._reshape_input({k: v.shape for k, v in input_data.items()})
return self.infer(input_data)
def infer(self, input_data, raw_results=True):
return super().infer(input_data, raw_results)
def set_input_and_output(self):
pass
class RegressionDLSDKModel(TTSDLSDKModel):
def __init__(self, network_info, launcher, suffix, delayed_model_loading=False):
self.max_len = int(network_info['max_regression_len'])
self.regression_input = network_info['inputs']
super().__init__(network_info, launcher, suffix, delayed_model_loading)
class MelganDLSDKModel(TTSDLSDKModel):
def __init__(self, network_info, launcher, suffix, delayed_model_loading=False):
self.max_len = int(network_info['max_mel_len'])
super().__init__(network_info, launcher, suffix, delayed_model_loading)
class RegressionOVModel(TTSOVModel):
def __init__(self, network_info, launcher, suffix, delayed_model_loading=False):
self.max_len = int(network_info['max_regression_len'])
self.regression_input = network_info['inputs']
super().__init__(network_info, launcher, suffix, delayed_model_loading)
class MelganOVModel(TTSOVModel):
def __init__(self, network_info, launcher, suffix, delayed_model_loading=False):
self.max_len = int(network_info['max_mel_len'])
super().__init__(network_info, launcher, suffix, delayed_model_loading)
| 47.223926 | 118 | 0.678142 |
aff22265dd36901db91828beb9b6d5b4e2b96a85 | 179,623 | py | Python | tensorflow/python/keras/backend.py | Faagerholm/tensorflow | 98e30b8748eb018f33836ac9269db67ab60483ab | [
"Apache-2.0"
] | 1 | 2019-06-10T12:00:23.000Z | 2019-06-10T12:00:23.000Z | tensorflow/python/keras/backend.py | Faagerholm/tensorflow | 98e30b8748eb018f33836ac9269db67ab60483ab | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/backend.py | Faagerholm/tensorflow | 98e30b8748eb018f33836ac9269db67ab60483ab | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import sys
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_module
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor_utils
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tfdev
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
py_all = all
py_sum = sum
py_any = any
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
_GRAPH = None
# A graph which is used for constructing functions in eager mode.
_CURRENT_SCRATCH_GRAPH = None
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping {graph: set_of_freezable_variables}.
# Each set tracks objects created via `freezable_variable` in the graph.
_FREEZABLE_VARS = weakref.WeakKeyDictionary()
# _DUMMY_EAGER_GRAPH is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES.
_DUMMY_EAGER_GRAPH = threading.local()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping between a graph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = weakref.WeakKeyDictionary()
# The below functions are kept accessible from backend for compatibility.
epsilon = backend_config.epsilon
floatx = backend_config.floatx
image_data_format = backend_config.image_data_format
set_epsilon = backend_config.set_epsilon
set_floatx = backend_config.set_floatx
set_image_data_format = backend_config.set_image_data_format
@keras_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@keras_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=floatx())
# A global dictionary mapping graph objects to an index of counters used
# for various layer/optimizer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary()
@keras_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
```
>>> get_uid('dense')
1
>>> get_uid('dense')
2
```
"""
graph = get_graph()
if graph not in PER_GRAPH_OBJECT_NAME_UIDS:
PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@keras_export('keras.backend.reset_uids')
def reset_uids():
"""Resets graph identifiers.
"""
PER_GRAPH_OBJECT_NAME_UIDS.clear()
@keras_export('keras.backend.clear_session')
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
global _GRAPH
global _FREEZABLE_VARS
_GRAPH = None
ops.reset_default_graph()
reset_uids()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
with name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[graph] = phase
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
_FREEZABLE_VARS.pop(graph, None)
@keras_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.compat.v1.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@keras_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
if ops.get_default_graph() is _GRAPH:
# Don't enter an init_scope for the learning phase if eager execution
# is enabled but we're inside the Keras workspace graph.
return symbolic_learning_phase()
with ops.init_scope():
# We always check & set the learning phase inside the init_scope,
# otherwise the wrong default_graph will be used to look up the learning
# phase inside of functions & defuns.
#
# This is because functions & defuns (both in graph & in eager mode)
# will always execute non-eagerly using a function-specific default
# subgraph.
if context.executing_eagerly():
if _DUMMY_EAGER_GRAPH not in _GRAPH_LEARNING_PHASES:
# Fallback to inference mode as default.
return 0
return _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
return symbolic_learning_phase()
def global_learning_phase_is_set():
return _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
if graph not in _GRAPH_LEARNING_PHASES:
with name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
@keras_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
# In an eager context, the learning phase values applies to both the eager
# context and the internal Keras graph.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
_GRAPH_LEARNING_PHASES[get_graph()] = value
@keras_export('keras.backend.learning_phase_scope')
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
previous_eager_value = _GRAPH_LEARNING_PHASES.get(
_DUMMY_EAGER_GRAPH, None)
previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)
try:
set_learning_phase(value)
yield
finally:
# Restore learning phase to initial value.
with ops.init_scope():
if context.executing_eagerly():
if previous_eager_value is not None:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_eager_value
elif _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
graph = get_graph()
if previous_graph_value is not None:
_GRAPH_LEARNING_PHASES[graph] = previous_graph_value
elif graph in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[graph]
@tf_contextlib.contextmanager
def eager_learning_phase_scope(value):
"""Internal scope that sets the learning phase in eager / tf.function only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert ops.executing_eagerly_outside_functions()
global_learning_phase_was_set = global_learning_phase_is_set()
if global_learning_phase_was_set:
previous_value = learning_phase()
try:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
yield
finally:
# Restore learning phase to initial value or unset.
if global_learning_phase_was_set:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_value
else:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
def _current_graph(op_input_list):
"""Return the graph members of `op_input_list`, or the current graph."""
return ops._get_graph_from_inputs(op_input_list)
def _get_session(op_input_list=()):
"""Returns the session object for the current thread."""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if ops.inside_function():
raise RuntimeError('Cannot get session inside Tensorflow graph function.')
# If we don't have a session, or that session does not match the current
# graph, create and cache a new session.
if (getattr(_SESSION, 'session', None) is None or
_SESSION.session.graph is not _current_graph(op_input_list)):
# If we are creating the Session inside a tf.distribute.Strategy scope,
# we ask the strategy for the right session options to use.
if distribution_strategy_context.has_strategy():
configure_and_create_distributed_session(
distribution_strategy_context.get_strategy())
else:
_SESSION.session = session_module.Session(
config=get_default_session_config())
session = _SESSION.session
return session
@keras_export(v1=['keras.backend.get_session'])
def get_session(op_input_list=()):
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches
the current graph.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Arguments:
op_input_list: An option sequence of tensors or ops, which will be used
to determine the current graph. Otherwise the default graph will be
used.
Returns:
A TensorFlow session.
"""
session = _get_session(op_input_list)
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
def get_graph():
if context.executing_eagerly():
global _GRAPH
if _GRAPH is None:
_GRAPH = func_graph.FuncGraph('keras_graph')
return _GRAPH
else:
return ops.get_default_graph()
@tf_contextlib.contextmanager
def _scratch_graph(graph=None):
"""Retrieve a shared and temporary func graph.
The eager execution path lifts a subgraph from the keras global graph into
a scratch graph in order to create a function. DistributionStrategies, in
turn, constructs multiple functions as well as a final combined function. In
order for that logic to work correctly, all of the functions need to be
created on the same scratch FuncGraph.
Args:
graph: A graph to be used as the current scratch graph. If not set then
a scratch graph will either be retrieved or created:
Yields:
The current scratch graph.
"""
global _CURRENT_SCRATCH_GRAPH
if (_CURRENT_SCRATCH_GRAPH is not None and graph is not None and
_CURRENT_SCRATCH_GRAPH is not graph):
raise ValueError('Multiple scratch graphs specified.')
if _CURRENT_SCRATCH_GRAPH:
yield _CURRENT_SCRATCH_GRAPH
return
graph = graph or func_graph.FuncGraph('keras_scratch_graph')
try:
_CURRENT_SCRATCH_GRAPH = graph
yield graph
finally:
_CURRENT_SCRATCH_GRAPH = None
@keras_export(v1=['keras.backend.set_session'])
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if os.environ.get('OMP_NUM_THREADS'):
logging.warning(
'OMP_NUM_THREADS is no longer used by the default Keras config. '
'To configure the number of threads, use tf.config.threading APIs.')
config = context.context().config
config.allow_soft_placement = True
return config
def get_default_graph_uid_map():
graph = ops.get_default_graph()
name_uid_map = PER_GRAPH_OBJECT_NAME_UIDS.get(graph, None)
if name_uid_map is None:
name_uid_map = collections.defaultdict(int)
PER_GRAPH_OBJECT_NAME_UIDS[graph] = name_uid_map
return name_uid_map
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
if tfdev.is_device_spec(device):
device = device.to_string()
self.device = device
def _set_device_from_string(self, device_str):
self.device = device_str
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
return tfdev.DeviceSpec.from_string(op.device)
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if ops.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [name for name in context.list_devices() if 'GPU' in name]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Arguments:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return constant_op.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor(x, dtype=dtype)
@keras_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
@keras_export('keras.backend.to_dense')
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
@keras_export('keras.backend.name_scope', v1=[])
def name_scope(name):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
# Define some computation that uses `a`.
return foo_op(..., name=scope)
```
When executed, the Tensor `a` will have the name `MyOp/a`.
Args:
name: The prefix to use on all names created within the name scope.
Returns:
Name scope context manager.
"""
return ops.name_scope_v2(name)
@keras_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
```python
>>> import numpy as np
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> kvar.eval()
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = resource_variable_ops.ResourceVariable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
graph = get_graph()
optimizers = _GRAPH_TF_OPTIMIZERS.setdefault(graph, weakref.WeakSet())
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
if graph not in _GRAPH_VARIABLES:
_GRAPH_VARIABLES[graph] = weakref.WeakSet()
_GRAPH_VARIABLES[graph].add(v)
def unique_object_name(name,
name_uid_map=None,
avoid_names=None,
namespace='',
zero_based=False):
"""Makes a object name (or arbitrary string) unique within a TensorFlow graph.
Arguments:
name: String name to make unique.
name_uid_map: An optional defaultdict(int) to use when creating unique
names. If None (default), uses a per-Graph dictionary.
avoid_names: An optional set or dict with names which should not be used. If
None (default) does not avoid any names.
namespace: Gets a name which is unique within the (graph, namespace). Layers
which are not Networks use a blank namespace and so get graph-global
names.
zero_based: If True, name sequences start with no suffix (e.g. "dense",
"dense_1"). If False, naming is one-based ("dense_1", "dense_2").
Returns:
Unique string name.
Example:
```python
_unique_layer_name('dense') # dense_1
_unique_layer_name('dense') # dense_2
```
"""
if name_uid_map is None:
name_uid_map = get_default_graph_uid_map()
if avoid_names is None:
avoid_names = set()
proposed_name = None
while proposed_name is None or proposed_name in avoid_names:
name_key = (namespace, name)
if zero_based:
number = name_uid_map[name_key]
if number:
proposed_name = name + '_' + str(number)
else:
proposed_name = name
name_uid_map[name_key] += 1
else:
name_uid_map[name_key] += 1
proposed_name = name + '_' + str(name_uid_map[name_key])
return proposed_name
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES.setdefault(graph, weakref.WeakSet())
for opt in _GRAPH_TF_OPTIMIZERS.get(graph, set()):
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@keras_export('keras.backend.constant')
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
```python
>>> import tensorflow as tf
>>> import numpy
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.compat.v1.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of
keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras
backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras
tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a
Keras tensor.
True
```
"""
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
@keras_export('keras.backend.placeholder')
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
Raises:
ValueError: If called with eager execution.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
return x.op.type == 'Placeholder'
except AttributeError:
return False
def freezable_variable(value, shape=None, name=None):
"""A tensor-like object whose value can be updated only up until execution.
After creating the freezable variable, you can update its value by calling
`var.update_value(new_value)` (similar to a regular variable).
Unlike an actual variable, the value used during execution is the current
value at the time the execution function (`backend.function()`) was created.
This is an internal API, expected to be temporary. It is used to implement a
mutable `trainable` property for `BatchNormalization` layers, with a frozen
value after model compilation.
We don't use a plain variable in this case because we need the value used
in a specific model to be frozen after `compile` has been called
(e.g. GAN use case).
Arguments:
value: The initial value for the tensor-like object.
shape: The shape for the tensor-like object (cannot be changed).
name: The name for the tensor-like object.
Returns:
A tensor-like object with a static value that can be updated via
`x.update_value(new_value)`, up until creating an execution function
(afterwards the value is fixed).
"""
graph = get_graph()
with graph.as_default():
x = array_ops.placeholder_with_default(
value, shape=shape, name=name)
x._initial_value = value
x._current_value = value
def update_value(new_value):
x._current_value = new_value
def get_value():
return x._current_value
x.update_value = update_value
x.get_value = get_value
global _FREEZABLE_VARS
if graph not in _FREEZABLE_VARS:
_FREEZABLE_VARS[graph] = weakref.WeakSet()
_FREEZABLE_VARS[graph].add(x)
return x
@keras_export('keras.backend.shape')
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> input = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(input)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(input).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
"""
return array_ops.shape(x)
@keras_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(input)
3
>>> K.ndim(kvar)
2
```
"""
dims = x.shape._dims
if dims is not None:
return len(dims)
return None
@keras_export('keras.backend.dtype')
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32'
```
"""
return x.dtype.base_dtype.name
@keras_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return get_value(to_dense(x))
@keras_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable
dtype: String, data type of returned Keras variable
name: String, name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.ones')
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.eye')
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@keras_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with zeros.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@keras_export('keras.backend.ones_like')
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@keras_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return np.prod(x.shape.as_list())
@keras_export('keras.backend.cast')
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Examples:
Cast a float32 variable to a float64 tensor
```python
>>> import tensorflow as tf
>>> from tensorflow.keras import backend as K
>>> input = K.ones(shape=(1,3))
>>> print(input)
>>> cast_input = K.cast(input, dtype='float64')
>>> print(cast_input)
<tf.Variable 'Variable:0' shape=(1, 3) dtype=float32,
numpy=array([[1., 1., 1.]], dtype=float32)>
tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)
```
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@keras_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@keras_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@keras_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@keras_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
# `training` is higher-up than the Keras backend in the abstraction hierarchy.
# In particular, `training` depends on layers, and thus on Keras.
# moving_averages, being low-level ops, should not be part of the training
# module.
from tensorflow.python.training import moving_averages # pylint: disable=g-import-not-at-top
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=True)
# LINEAR ALGEBRA
@keras_export('keras.backend.dot')
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@keras_export('keras.backend.batch_dot')
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: list of (or single) int with target dimensions.
The lengths of `axes[0]` and `axes[1]` should be the same.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`
`batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal
of `x.dot(y.T)`, although we never have to calculate the off-diagonal
elements.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
"""
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x_ndim - 1, y_ndim - 2]
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
@keras_export('keras.backend.transpose')
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> input = K.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(input)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
"""
return array_ops.transpose(x)
@keras_export('keras.backend.gather')
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@keras_export('keras.backend.max')
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@keras_export('keras.backend.min')
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@keras_export('keras.backend.sum')
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@keras_export('keras.backend.prod')
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
@keras_export('keras.backend.cumsum')
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
@keras_export('keras.backend.cumprod')
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@keras_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.std')
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the standard deviation of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.mean')
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@keras_export('keras.backend.any')
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@keras_export('keras.backend.all')
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@keras_export('keras.backend.argmax')
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@keras_export('keras.backend.argmin')
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@keras_export('keras.backend.square')
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@keras_export('keras.backend.abs')
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@keras_export('keras.backend.sqrt')
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _constant_to_tensor(0., x.dtype.base_dtype)
inf = _constant_to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@keras_export('keras.backend.exp')
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@keras_export('keras.backend.log')
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@keras_export('keras.backend.round')
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@keras_export('keras.backend.sign')
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@keras_export('keras.backend.pow')
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@keras_export('keras.backend.clip')
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
Returns:
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _constant_to_tensor(min_value, x.dtype.base_dtype)
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
@keras_export('keras.backend.equal')
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@keras_export('keras.backend.not_equal')
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@keras_export('keras.backend.greater')
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@keras_export('keras.backend.greater_equal')
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@keras_export('keras.backend.less')
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@keras_export('keras.backend.less_equal')
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@keras_export('keras.backend.maximum')
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.maximum(x, y)
@keras_export('keras.backend.minimum')
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@keras_export('keras.backend.sin')
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@keras_export('keras.backend.cos')
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@keras_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@keras_export('keras.backend.batch_normalization')
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@keras_export('keras.backend.concatenate')
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@keras_export('keras.backend.reshape')
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
"""
return array_ops.reshape(x, shape)
@keras_export('keras.backend.permute_dimensions')
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
"""
return array_ops.transpose(x, perm=pattern)
@keras_export('keras.backend.resize_images')
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_nearest_neighbor(x, new_shape)
elif interpolation == 'bilinear':
x = image_ops.resize_bilinear(x, new_shape)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
if data_format == 'channels_first':
output_shape = (None, None, new_height, new_width)
else:
output_shape = (None, new_height, new_width, None)
x.set_shape(output_shape)
return x
@keras_export('keras.backend.resize_volumes')
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@keras_export('keras.backend.repeat_elements')
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@keras_export('keras.backend.repeat')
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@keras_export('keras.backend.arange')
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@keras_export('keras.backend.tile')
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@keras_export('keras.backend.flatten')
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
"""
return array_ops.reshape(x, [-1])
@keras_export('keras.backend.batch_flatten')
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
Examples:
Flattening a 3D tensor to 2D by collapsing the last dimension.
```python
>>> from tensorflow.keras import backend as K
>>> x_batch = K.ones(shape=(2, 3, 4, 5))
>>> x_batch_flatten = K.batch_flatten(x_batch)
>>> K.int_shape(x_batch_flatten)
(2, 60)
```
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@keras_export('keras.backend.expand_dims')
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@keras_export('keras.backend.squeeze')
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@keras_export('keras.backend.temporal_padding')
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_2d_padding')
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_3d_padding')
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.stack')
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
"""
return array_ops.stack(x, axis=axis)
@keras_export('keras.backend.one_hot')
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@keras_export('keras.backend.reverse')
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
@keras_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
Arguments:
x: input variable.
Returns:
A Numpy array.
"""
if not tensor_util.is_tensor(x):
return x
if context.executing_eagerly():
return x.numpy()
if not getattr(x, '_in_graph_mode', True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with context.eager_mode():
return x.numpy()
if ops.executing_eagerly_outside_functions():
# This method of evaluating works inside the Keras FuncGraph.
return function([], x)(x)
return x.eval(session=get_session((x,)))
@keras_export('keras.backend.batch_get_value')
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session(tensors).run(tensors)
else:
return []
@keras_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if ops.executing_eagerly_outside_functions():
with ops.init_scope():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@keras_export('keras.backend.batch_set_value')
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if ops.executing_eagerly_outside_functions():
with ops.init_scope():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype,
shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
@keras_export('keras.backend.print_tensor')
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
```python
>>> x = K.print_tensor(x, message="x is: ")
```
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
if isinstance(x, ops.Tensor) and hasattr(x, 'graph'):
with get_graph().as_default():
op = logging_ops.print_v2(message, x, output_stream=sys.stdout)
with ops.control_dependencies([op]):
return array_ops.identity(x)
else:
logging_ops.print_v2(message, x, output_stream=sys.stdout)
return x
def is_tensor_or_composite_tensor(value):
"""Test if a passed value object is a tensor-like or composite tensor."""
return (tensor_util.is_tensor(value) or isinstance(value, np.ndarray) or
composite_tensor_utils.is_composite_or_composite_value(value))
def _try_process_scipy_sparse_input(value):
"""Converts 'value' to a SparseTensor if it is a scipy sparse matrix.
Arguments:
value: An object that may have the attributes of a scipy sparse matrix.
Returns:
Either a SparseTensor based off of 'value' or 'value' itself.
"""
try:
sparse_coo = value.tocoo()
row, col = sparse_coo.row, sparse_coo.col
data, shape = sparse_coo.data, sparse_coo.shape
except AttributeError:
# If we can't convert this object, it could be either a single data
# element (ie, a bool/int/float) which is OK to pass on, or something
# that we don't understand (which may or may not be OK). In either
# case, don't die here: the data standardization code will catch
# those issues.
return value
indices = np.concatenate((np.expand_dims(row, 1), np.expand_dims(col, 1)), 1)
return sparse_tensor.SparseTensor(indices, data, shape)
def try_convert_scipy_to_sparse(values):
"""Converts scipy sparse matrices in 'values' to SparseTensors, if possible.
Arguments:
values: An input or list of inputs to convert. These may be TensorLikes,
ndarrays, composite tensors, or scipy sparse values.
Returns:
An input or list of inputs where scipy sparse tensors have been converted
to tf.SparseTensors.
Raises:
ValueError: If input cannot be converted to a SparseTensor.
"""
# Convert scipy sparse data into sparse tensors.
value_structure = values
values = nest.flatten(values)
for idx, value in enumerate(values):
if not is_tensor_or_composite_tensor(value):
values[idx] = _try_process_scipy_sparse_input(value)
values = nest.pack_sequence_as(value_structure, values)
return values
# GRAPH MANIPULATION
class GraphExecutionFunction(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Arguments to `tf.Session.run()`:
`fetches`, `feed_dict`, `options`, `run_metadata`.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self._inputs_structure = inputs
self.inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
self.outputs = cast_variables_to_tensor(
nest.flatten(outputs, expand_composites=True))
# TODO(b/127668432): Consider using autograph to generate these
# dependencies in call.
# Index 0 = total loss or model output for `predict`.
with ops.control_dependencies([self.outputs[0]]):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
# additional tensor substitutions
self.feed_dict = session_kwargs.pop('feed_dict', None)
# additional operations
self.fetches = session_kwargs.pop('fetches', [])
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
self.run_options = session_kwargs.pop('options', None)
self.run_metadata = session_kwargs.pop('run_metadata', None)
# The main use case of `fetches` being passed to a model is the ability
# to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
# This mapping keeps track of the function that should receive the
# output from a fetch in `fetches`: { fetch: function(fetch_output) }
# A Callback can use this to register a function with access to the
# output values for a fetch it added.
self.fetch_callbacks = {}
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
'time: %s' % (session_kwargs.keys(),))
self._callable_fn = None
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
"""Generates a callable that runs the graph.
Arguments:
feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
session: Session to use to generate the callable.
Returns:
Function that runs the graph according to the above options.
"""
# Prepare callable options.
callable_opts = config_pb2.CallableOptions()
# Handle external-data feed.
for x in feed_arrays:
callable_opts.feed.append(x.name)
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
callable_opts.feed.append(key.name)
# Handle symbolic feed.
for x, y in zip(feed_symbols, symbol_vals):
connection = callable_opts.tensor_connection.add()
if x.dtype != y.dtype:
y = math_ops.cast(y, dtype=x.dtype)
from_tensor = ops._as_graph_element(y)
if from_tensor is None:
from_tensor = y
connection.from_tensor = from_tensor.name # Data tensor
connection.to_tensor = x.name # Placeholder
# Handle fetches.
for x in self.outputs + self.fetches:
callable_opts.fetch.append(x.name)
# Handle updates.
callable_opts.target.append(self.updates_op.name)
# Handle run_options.
if self.run_options:
callable_opts.run_options.CopyFrom(self.run_options)
# Create callable.
callable_fn = session._make_callable_from_options(callable_opts)
# Cache parameters corresponding to the generated callable, so that
# we can detect future mismatches and refresh the callable.
self._callable_fn = callable_fn
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
self._fetches = list(self.fetches)
self._session = session
def _call_fetch_callbacks(self, fetches_output):
for fetch, output in zip(self._fetches, fetches_output):
if fetch in self.fetch_callbacks:
self.fetch_callbacks[fetch](output)
def _eval_if_composite(self, tensor):
"""Helper method which evaluates any CompositeTensors passed to it."""
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
if isinstance(tensor, composite_tensor.CompositeTensor):
return self._session.run(tensor)
else:
return tensor
def __call__(self, inputs):
inputs = try_convert_scipy_to_sparse(inputs)
# Ensure that input value types match any expected composite tensor types.
# TODO(momernick): Once TensorSpecs are implemented for CTs, use that here.
inputs = nest.flatten(inputs, expand_composites=True)
session = get_session(inputs)
feed_arrays = []
array_vals = []
feed_symbols = []
symbol_vals = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
continue
if tensor_util.is_tensor(value):
# Case: feeding symbolic tensor.
feed_symbols.append(tensor)
symbol_vals.append(value)
else:
# Case: feeding Numpy array.
feed_arrays.append(tensor)
# We need to do array conversion and type casting at this level, since
# `callable_fn` only supports exact matches.
tensor_type = dtypes_module.as_dtype(tensor.dtype)
array_vals.append(np.asarray(value,
dtype=tensor_type.as_numpy_dtype))
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
array_vals.append(
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
self._call_fetch_callbacks(fetched[-len(self._fetches):])
output_structure = nest.pack_sequence_as(
self._outputs_structure,
fetched[:len(self.outputs)],
expand_composites=True)
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
return nest.map_structure(self._eval_if_composite, output_structure)
class EagerExecutionFunction(object):
"""Helper class for constructing a TF graph function from the Keras graph.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Unsupported.
"""
def __init__(self, inputs, outputs, updates=None, name=None):
self.name = name
self._inputs_structure = inputs
inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
outputs = nest.flatten(outputs, expand_composites=True)
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
if updates and not outputs:
# Edge case; never happens in practice
raise ValueError('Cannot create a Keras backend function with updates'
' but no outputs during eager execution.')
graphs = {
i.graph
for i in nest.flatten([inputs, outputs, updates])
if hasattr(i, 'graph')
}
if len(graphs) > 1:
raise ValueError('Cannot create an execution function which is comprised '
'of elements from multiple graphs.')
source_graph = graphs.pop()
global_graph = get_graph()
updates_ops = []
legacy_update_ops = []
for update in updates:
# For legacy reasons it is allowed to pass an update as a tuple
# `(variable, new_value)` (this maps to an assign op). Otherwise it
# is assumed to already be an op -- we cannot control its execution
# order.
if isinstance(update, tuple):
legacy_update_ops.append(update)
else:
if hasattr(update, 'op'):
update = update.op
if update is not None:
# `update.op` may have been None in certain cases.
updates_ops.append(update)
self._freezable_vars_to_feed = []
self._freezable_vars_values = []
freezable_vars_from_keras_graph = _FREEZABLE_VARS.get(global_graph, {})
with _scratch_graph() as exec_graph:
global_graph = get_graph()
if source_graph not in (exec_graph, global_graph):
raise ValueError('Unknown graph. Aborting.')
if source_graph is global_graph and exec_graph is not global_graph:
init_tensors = (
outputs + updates_ops + [p for [p, _] in legacy_update_ops] +
[p_new for [_, p_new] in legacy_update_ops
if isinstance(p_new, ops.Tensor)])
lifted_map = lift_to_graph.lift_to_graph(
init_tensors=init_tensors, graph=exec_graph, sources=inputs,
add_sources=True, handle_captures=True, base_graph=source_graph)
inputs = [lifted_map[i] for i in inputs]
outputs = [lifted_map[i] for i in outputs]
updates_ops = [lifted_map[i] for i in updates_ops]
legacy_update_ops = [(lifted_map[p], lifted_map.get(p_new, p_new))
for p, p_new in legacy_update_ops]
# Keep track of the value to feed to any "freezable variables"
# created in this graph.
for old_op, new_op in lifted_map.items():
if old_op in freezable_vars_from_keras_graph:
frozen_var = old_op
if frozen_var._initial_value != frozen_var._current_value:
# We only feed a frozen_variable if its value has changed;
# otherwise it can rely on the default value of the
# underlying placeholder_with_default.
self._freezable_vars_to_feed.append(new_op)
self._freezable_vars_values.append(frozen_var._current_value)
# Consolidate updates
with exec_graph.as_default():
outputs = cast_variables_to_tensor(outputs)
with ops.control_dependencies(outputs):
for p, p_new in legacy_update_ops:
updates_ops.append(state_ops.assign(p, p_new))
self.inputs, self.outputs = inputs, outputs
self._input_references = self.inputs + self._freezable_vars_to_feed
with ops.control_dependencies(updates_ops):
self.outputs[0] = array_ops.identity(self.outputs[0])
exec_graph.inputs = self._input_references + list(
exec_graph.captures.values())
exec_graph.outputs = self.outputs
graph_fn = eager_function.ConcreteFunction(exec_graph)
graph_fn._num_positional_args = len(self._input_references)
graph_fn._arg_keywords = []
self._graph_fn = graph_fn
# Handle placeholders with default
# (treated as required placeholder by graph functions)
self._placeholder_default_values = {}
with exec_graph.as_default():
for x in self.inputs:
if x.op.type == 'PlaceholderWithDefault':
self._placeholder_default_values[x] = tensor_util.constant_value(
x.op.inputs[0])
def __call__(self, inputs):
# Convert scipy sparse data into sparse tensors.
inputs = try_convert_scipy_to_sparse(inputs)
input_values = nest.flatten(inputs, expand_composites=True)
if self._freezable_vars_values:
input_values = input_values + self._freezable_vars_values
converted_inputs = []
for tensor, value in zip(self._input_references, input_values):
if value is None:
# Assume `value` is a placeholder with default
value = self._placeholder_default_values.get(tensor, None)
if value is None:
raise ValueError(
'You must feed a value for placeholder %s' % (tensor,))
if not isinstance(value, ops.Tensor):
value = ops.convert_to_tensor(value, dtype=tensor.dtype)
if value.dtype != tensor.dtype:
# Temporary workaround due to `convert_to_tensor` not casting floats.
# See b/119637405
value = math_ops.cast(value, tensor.dtype)
converted_inputs.append(value)
outputs = self._graph_fn(*converted_inputs)
# EagerTensor.numpy() will often make a copy to ensure memory safety.
# However in this case `outputs` is not directly returned, so it is always
# safe to reuse the underlying buffer without checking. In such a case the
# private numpy conversion method is preferred to guarantee performance. We
# also have to call `_cpu_nograd()` since the Tensor may not be on the CPU.
# (otherwise it's just a no-op.)
return nest.pack_sequence_as(
self._outputs_structure, [x._cpu_nograd()._numpy() for x in outputs], # pylint: disable=protected-access
expand_composites=True)
@keras_export('keras.backend.function')
def function(inputs, outputs, updates=None, name=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
"""
if ops.executing_eagerly_outside_functions():
if kwargs:
raise ValueError('Session keyword arguments are not support during '
'eager execution. You passed: %s' % (kwargs,))
return EagerExecutionFunction(inputs, outputs, updates=updates, name=name)
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]
and key not in ['inputs', 'outputs', 'updates', 'name']):
msg = ('Invalid argument "%s" passed to K.function with TensorFlow '
'backend') % key
raise ValueError(msg)
return GraphExecutionFunction(inputs, outputs, updates=updates, **kwargs)
@keras_export('keras.backend.gradients')
def gradients(loss, variables):
"""Returns the gradients of `loss` w.r.t. `variables`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
@keras_export('keras.backend.stop_gradient')
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
@keras_export('keras.backend.rnn')
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Args;
input; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; List of tensors.
Returns;
output; Tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states used
in the step function. In the case that state_size is in a nested
shape, the shape of initial_states will also follow the nested
structure.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: If specified, assume time dimension is of this length.
time_major: Boolean. If true, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean. If True, the output for masked timestep
will be zeros, whereas in the False case, output from previous
timestep is returned.
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return array_ops.transpose(input_t, axes)
if not time_major:
inputs = nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = array_ops.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
assert not nest.is_sequence(mask_t)
assert not nest.is_sequence(input_t)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = array_ops.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return array_ops.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError('Unrolling requires a fixed number of timesteps.')
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of nested
# input, the input is flattened and then transformed individually.
# The result of this will be a tuple of lists, each of the item in tuple is
# list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = array_ops.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if nest.is_sequence(inputs):
processed_input = nest.map_structure(_process_single_input_t, inputs)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(inp,
tuple(states) + tuple(constants))
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where(tiled_mask_t, output, prev_output)
return_states = []
for state, new_state in zip(states, new_states):
# (see earlier comment for tile explanation)
tiled_mask_t = _expand_mask(mask_t, new_state)
return_states.append(array_ops.where(tiled_mask_t, new_state, state))
states = return_states
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
if zero_output_for_mask:
last_output = array_ops.where(
_expand_mask(mask_list[-1], last_output),
last_output,
zeros_like(last_output))
outputs = array_ops.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
zeros_like(outputs))
else:
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(inp, tuple(states) + tuple(constants))
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it will
# be flattened first, and tensor array will be created one per flattened
# tensor.
input_ta = tuple(
tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name='input_ta_%s' % i)
for i, inp in enumerate(flatted_inputs))
input_ta = tuple(
ta.unstack(input_) if not go_backwards else ta
.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs))
# Get the time(0) input and compute the output for that, the output will be
# used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = nest.pack_sequence_as(inputs,
[inp[0] for inp in flatted_inputs])
# output_time_zero is used to determine the cell output shape and its dtype.
# the value is discarded.
output_time_zero, _ = step_function(
input_time_zero, tuple(initial_states) + tuple(constants))
output_ta = tuple(
tensor_array_ops.TensorArray(
dtype=out.dtype,
size=time_steps_t,
tensor_array_name='output_ta_%s' % i)
for i, out in enumerate(nest.flatten(output_time_zero)))
time = constant_op.constant(0, dtype='int32', name='time')
while_loop_kwargs = {
'cond': lambda time, *_: time < time_steps_t,
'maximum_iterations': input_length,
'parallel_iterations': 32,
'swap_memory': True,
}
if mask is not None:
if not states:
raise ValueError('No initial states provided! '
'When using masking in an RNN, you should '
'provide initial states '
'(and your step function should return '
'as its first state at time `t` '
'the output at time `t-1`).')
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps_t,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
# Mask for the T output will be base on the output of T - 1. In the case
# T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(array_ops.zeros_like(o)
for o in nest.flatten(output_time_zero))
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = nest.pack_sequence_as(inputs, current_input)
mask_t = mask_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
# mask output
flat_output = nest.flatten(output)
flat_mask_output = (flat_zero_output if zero_output_for_mask
else nest.flatten(prev_output))
tiled_mask_t = tuple(_expand_mask(mask_t, o) for o in flat_output)
flat_new_output = tuple(
array_ops.where(m, o, zo) for m, o, zo in zip(
tiled_mask_t, flat_output, flat_mask_output))
# mask states
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_state)
flat_final_state = tuple(
array_ops.where(m, s, ps)
for m, s, ps in zip(tiled_mask_t, flat_new_state, flat_state))
new_states = nest.pack_sequence_as(new_states, flat_final_state)
output_ta_t = tuple(
ta.write(time, out)
for ta, out in zip(output_ta_t, flat_new_output))
return (time + 1, output_ta_t,
tuple(flat_new_output)) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
flat_output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))
new_states = nest.pack_sequence_as(initial_states, flat_new_state)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = nest.pack_sequence_as(output_time_zero, outputs)
last_output = nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
if isinstance(output_, ops.Tensor):
shape = output_.shape.as_list()
shape[0] = time_steps
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = nest.map_structure(set_shape, outputs)
if not time_major:
outputs = nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@keras_export('keras.backend.switch')
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
@keras_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@keras_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@keras_export('keras.backend.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
zero = _constant_to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@keras_export('keras.backend.elu')
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
@keras_export('keras.backend.softmax')
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@keras_export('keras.backend.softplus')
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@keras_export('keras.backend.softsign')
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
@keras_export('keras.backend.categorical_crossentropy')
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.softmax_cross_entropy_with_logits_v2(
labels=target, logits=output, axis=axis)
@keras_export('keras.backend.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
if isinstance(output.shape, (tuple, list)):
output_rank = len(output.shape)
else:
output_rank = output.shape.ndims
if output_rank is not None:
axis %= output_rank
if axis != output_rank - 1:
permutation = list(
itertools.chain(range(axis), range(axis + 1, output_rank), [axis]))
output = array_ops.transpose(output, perm=permutation)
elif axis != -1:
raise ValueError(
'Cannot compute sparse categorical crossentropy with `axis={}` on an '
'output tensor with unknown rank'.format(axis))
target = cast(target, 'int64')
# Try to adjust the shape so that rank of labels = 1 - rank of logits.
output_shape = array_ops.shape_v2(output)
target_rank = target.shape.ndims
update_shape = (
target_rank is not None and output_rank is not None and
target_rank != output_rank - 1)
if update_shape:
target = flatten(target)
output = array_ops.reshape(output, [-1, output_shape[-1]])
if py_any([_is_symbolic_tensor(v) for v in [target, output]]):
with get_graph().as_default():
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
else:
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
if update_shape and output_rank >= 3:
# If our output includes timesteps or spatial dimensions we need to reshape
return array_ops.reshape(res, output_shape[:-1])
else:
return res
@keras_export('keras.backend.binary_crossentropy')
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Sigmoid'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
# Compute cross entropy from probabilities.
bce = target * math_ops.log(output + epsilon())
bce += (1 - target) * math_ops.log(1 - output + epsilon())
return -bce
else:
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
@keras_export('keras.backend.sigmoid')
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
@keras_export('keras.backend.hard_sigmoid')
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)
point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)
x = math_ops.mul(x, point_two)
x = math_ops.add(x, point_five)
x = clip_ops.clip_by_value(x, 0., 1.)
return x
@keras_export('keras.backend.tanh')
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
@keras_export('keras.backend.dropout')
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
if seed is None:
seed = np.random.randint(10e6)
return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_export('keras.backend.l2_normalize')
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, axis=axis)
@keras_export('keras.backend.in_top_k')
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv1d_input(x, data_format):
"""Transpose and cast the input before the conv1d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NWC' # to pass TF Conv2dNative operations
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC
else:
tf_data_format = 'NCW'
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
force_transpose: Boolean. If True, the input will always be transposed
from NCHW to NHWC if `data_format` is `"channels_first"`.
If False, the transposition only occurs on CPU (GPU ops are
assumed to support NCHW).
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support() or force_transpose:
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to TensorFlow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding
@keras_export('keras.backend.conv1d')
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = kernel.shape.as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.conv2d')
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv2d_transpose')
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (shape(x)[0],) + tuple(output_shape[1:])
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = nn.conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
else:
assert dilation_rate[0] == dilation_rate[1]
x = nn.atrous_conv2d_transpose(
x,
kernel,
output_shape,
rate=dilation_rate[0],
padding=padding)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv1d(x,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: stride integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NWC':
spatial_start_dim = 1
strides = (1,) + strides * 2 + (1,)
else:
spatial_start_dim = 2
strides = (1, 1) + strides * 2
x = array_ops.expand_dims(x, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
dilation_rate = (1,) + dilation_rate
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
x = array_ops.squeeze(x, [spatial_start_dim])
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.separable_conv2d')
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
ValueError: if `strides` is not a tuple of 2 integers.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv3d')
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_export('keras.backend.pool2d')
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_size` is not a tuple of 2 integers.
ValueError: if `strides` is not a tuple of 2 integers.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(pool_size) != 2:
raise ValueError('`pool_size` must be a tuple of 2 integers.')
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.pool3d')
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply N-D convolution with un-shared weights.
Arguments:
inputs: (N+2)-D tensor with shape
(batch_size, channels_in, d_in1, ..., d_inN)
if data_format='channels_first', or
(batch_size, d_in1, ..., d_inN, channels_in)
if data_format='channels_last'.
kernel: the unshared weight for N-D convolution,
with shape (output_items, feature_dim, channels_out), where
feature_dim = np.prod(kernel_size) * channels_in,
output_items = np.prod(output_shape).
kernel_size: a tuple of N integers, specifying the
spatial dimensions of the N-D convolution window.
strides: a tuple of N integers, specifying the strides
of the convolution along the spatial dimensions.
output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
dimensionality of the output.
data_format: string, "channels_first" or "channels_last".
Returns:
An (N+2)-D tensor with shape:
(batch_size, channels_out) + output_shape
if data_format='channels_first', or:
(batch_size,) + output_shape + (channels_out,)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` nor `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
channels_out = kernel_shape[-1]
ndims = len(output_shape)
spatial_dimensions = list(range(ndims))
xs = []
output_axes_ticks = [range(axis_max) for axis_max in output_shape]
for position in itertools.product(*output_axes_ticks):
slices = [slice(None)]
if data_format == 'channels_first':
slices.append(slice(None))
slices.extend([slice(position[d] * strides[d],
position[d] * strides[d] + kernel_size[d])
for d in spatial_dimensions])
if data_format == 'channels_last':
slices.append(slice(None))
xs.append(reshape(inputs[slices], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, output_shape + (-1, channels_out))
if data_format == 'channels_first':
permutation = [ndims, ndims + 1] + spatial_dimensions
else:
permutation = [ndims] + spatial_dimensions + [ndims + 1]
return permute_dimensions(output, permutation)
@keras_export('keras.backend.local_conv1d')
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape:
(batch_size, steps, input_dim)
if data_format is "channels_last" or
(batch_size, input_dim, steps)
if data_format is "channels_first".
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters).
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window.
strides: a tuple of a single integer,
specifying the stride length of the convolution.
data_format: the data format, channels_first or channels_last.
Returns:
A 3d tensor with shape:
(batch_size, output_length, filters)
if data_format='channels_first'
or 3D tensor with shape:
(batch_size, filters, output_length)
if data_format='channels_last'.
"""
output_shape = (kernel.shape[0],)
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.local_conv2d')
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters).
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col).
data_format: the data format, channels_first or channels_last.
Returns:
A 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
"""
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.bias_add')
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
# pylint: disable=g-no-augmented-assignment
if ndim(x) == 5:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = nn.bias_add(x, bias, data_format='NCHW')
else:
x = x + reshape(bias, (1, bias_shape[0], 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = nn.bias_add(x, bias, data_format='NHWC')
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1))
else:
x = x + reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
else:
x = nn.bias_add(x, bias)
# pylint: enable=g-no-augmented-assignment
return x
# RANDOMNESS
@keras_export('keras.backend.random_normal')
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, mean of the normal distribution to draw samples.
stddev: A float, standard deviation of the normal distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_uniform')
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_binomial')
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
The binomial distribution with parameters `n` and `p` is the probability
distribution of the number of successful Bernoulli process. Only supports
`n` = 1 for now.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
@keras_export('keras.backend.truncated_normal')
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
@keras_export('keras.backend.ctc_label_dense_to_sparse')
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(_, current_input):
return array_ops.expand_dims(
math_ops.range(label_shape[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.cast(indices, dtypes_module.int64), vals_sparse,
math_ops.cast(label_shape, dtypes_module.int64))
@keras_export('keras.backend.ctc_batch_cost')
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.cast(
array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)
input_length = math_ops.cast(
array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)
sparse_labels = math_ops.cast(
ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
@keras_export('keras.backend.ctc_decode')
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.cast(input_length, dtypes_module.int32)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
@keras_export('keras.backend.map_fn')
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)
@keras_export('keras.backend.foldl')
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
@keras_export('keras.backend.foldr')
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if 'KERAS_HOME' in os.environ:
_keras_dir = os.environ.get('KERAS_HOME')
else:
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
_config = json.load(open(_config_path))
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
def in_multi_worker_mode():
"""Whether we are operating in a Multi-Worker setting."""
# TODO(rchao): Consider a warning if user uses multiple `model` method
# calls in multi-worker setting.
tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_spec = server_lib.ClusterSpec(tf_config.get('cluster', {}))
return tf_config and 'master' not in cluster_spec.jobs
def configure_and_create_distributed_session(distribution_strategy):
"""Configure session config and create a session with it."""
def _create_session(distribution_strategy):
"""Create the Distributed Strategy session."""
session_config = get_default_session_config()
# If a session already exists, merge in its config; in the case there is a
# conflict, take values of the existing config.
global _SESSION
if getattr(_SESSION, 'session', None) and _SESSION.session._config:
session_config.MergeFrom(_SESSION.session._config)
if is_tpu_strategy(distribution_strategy):
# TODO(priyag, yuefengz): Remove this workaround when Distribute
# Coordinator is integrated with keras and we can create a session from
# there.
distribution_strategy.configure(session_config)
master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access
session = session_module.Session(config=session_config, target=master)
else:
worker_context = dc_context.get_current_worker_context()
if worker_context:
dc_session_config = worker_context.session_config
# Merge the default session config to the one from distribute
# coordinator, which is fine for now since they don't have
# conflicting configurations.
dc_session_config.MergeFrom(session_config)
session = session_module.Session(
config=dc_session_config, target=worker_context.master_target)
else:
distribution_strategy.configure(session_config)
session = session_module.Session(config=session_config)
set_session(session)
if in_multi_worker_mode():
dc.run_distribute_coordinator(
_create_session,
distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
else:
_create_session(distribution_strategy)
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return (strategy is not None and
strategy.__class__.__name__.startswith('TPUStrategy'))
def cast_variables_to_tensor(tensors):
def _cast_variables_to_tensor(tensor):
if isinstance(tensor, variables_module.Variable):
return array_ops.identity(tensor)
return tensor
return nest.map_structure(_cast_variables_to_tensor, tensors)
def _is_symbolic_tensor(x):
return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)
| 31.457618 | 113 | 0.665627 |
76dcb47b417a015f2c1674070b2e2ddd7af1feb6 | 419 | py | Python | utils.py | albina77/bio | 18d46bc12ad5f40ed44a995c87010e2504653313 | [
"Apache-2.0"
] | null | null | null | utils.py | albina77/bio | 18d46bc12ad5f40ed44a995c87010e2504653313 | [
"Apache-2.0"
] | null | null | null | utils.py | albina77/bio | 18d46bc12ad5f40ed44a995c87010e2504653313 | [
"Apache-2.0"
] | null | null | null | from typing import List
from models import Generation
def filter_generation_by_index(generations: List[Generation], generation_index: int = 0):
generation_list = list(filter(lambda generation: generation.index == generation_index, generations))
if not generation_list:
raise AttributeError(f"Поколения №{generation_index} не существует в переданном массиве")
return generation_list[0]
| 19.045455 | 104 | 0.763723 |
331973e9ccbee270dc61589ecf90116dac216f9d | 658 | py | Python | src/djanban/apps/boards/migrations/0068_auto_20170515_1844.py | diegojromerolopez/djanban | 6451688d49cf235d03c604b19a6a8480b33eed87 | [
"MIT"
] | 33 | 2017-06-14T18:04:25.000Z | 2021-06-15T07:07:56.000Z | src/djanban/apps/boards/migrations/0068_auto_20170515_1844.py | diegojromerolopez/djanban | 6451688d49cf235d03c604b19a6a8480b33eed87 | [
"MIT"
] | 1 | 2017-05-10T08:45:55.000Z | 2017-05-10T08:45:55.000Z | src/djanban/apps/boards/migrations/0068_auto_20170515_1844.py | diegojromerolopez/djanban | 6451688d49cf235d03c604b19a6a8480b33eed87 | [
"MIT"
] | 8 | 2017-08-27T11:14:25.000Z | 2021-03-03T12:11:16.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-15 16:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('boards', '0067_auto_20170515_1839'),
]
operations = [
migrations.AlterField(
model_name='cardcomment',
name='board',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='card_comments', to='boards.Board', verbose_name='Board this comment belongs to'),
preserve_default=False,
),
]
| 28.608696 | 187 | 0.667173 |
371fe9434cf7e6181a916e34450ae03e63d80de5 | 4,113 | py | Python | evaluation.py | WANGDow/Video_Based_Guitarist_Performance | bf27d1a817ff12e67e22f2f07a95a5e984962dd8 | [
"MIT"
] | null | null | null | evaluation.py | WANGDow/Video_Based_Guitarist_Performance | bf27d1a817ff12e67e22f2f07a95a5e984962dd8 | [
"MIT"
] | null | null | null | evaluation.py | WANGDow/Video_Based_Guitarist_Performance | bf27d1a817ff12e67e22f2f07a95a5e984962dd8 | [
"MIT"
] | null | null | null | '''
Author: WANG Zichen
The Evaluation class which carries out the whole evaluation procedure,
including video modification, keypoints extraction, data preparation,
and result generation.
'''
import torch
import numpy as np
from training import NotSimpleNet
from video_edit import EditVideo
from coor_extraction import CoorExtraction
import dataset_prepare as dp
import time
#For testing purposes only
MODEL_PATH = "torch_model/model_0.7608108108108108.model"
VIDEO_PATH = "cases/s_10_1_new_3.MOV"
PROTO_FILE_PATH = "caffe_model/pose_deploy.prototxt"
WEIGHTS_FILE_PATH = "caffe_model/pose_iter_102000.caffemodel"
class Evaluation():
'''
Carry out the whole evaluation procedure, including
video modification, keypoints extraction, data preparation
result generation.
Parameters:
video_path (str): the path of the input video
model_path (str): the path of pre-trained model
proto_file_path (str): the path of the proto file
weights_file_path (str): the path of the weight file
threshold (double): the detection threshold. the higher the
more accurate. default=0.5
'''
def __init__(self, video_path, model_path, proto_file_path, weights_file_path, threshold=0.5):
self._video_path = video_path
#self._model_path = model_path
self._model = NotSimpleNet()
self._model = torch.load(model_path)
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self._proto_file_path = proto_file_path
self._weights_file_path = weights_file_path
self._threshold = threshold
def _start_evaluation(self, log_file):
'''
Initiate the evaluation procedure
'''
t = time.time()
print("Evaluation has been initialized")
#extraction = CoorExtraction(self._proto_file_path, self._weights_file_path, self._video_path, self._threshold)
#log_file = extraction._run_extraction()
#log_file = "cases/s_9_9_new_1.txt"
csv_generation = dp.CSV_Generation(log_file)
#csv_file = "cases/s_10_1_new_3.csv"
csv_file = csv_generation._start_csv()
input = self._data_loader(csv_file)
prof, conf = self._eval_result(input)
print("Proficency: " + prof)
print("Confidence: {}".format(conf))
print("Total time taken for evaluation: {:.3f}".format((time.time() - t) / 60) + " MINUTES")
return prof, conf
def _data_loader(self, csv_path):
'''
Read the csv file to generate the test set
Return the input as Tense array
Parameter:
csv_path (str): the path of the input csv file
'''
xy = np.loadtxt(csv_path, delimiter=",", dtype=np.float32)
len = xy.shape[0]
input = torch.from_numpy(xy[:,0:])
return input
def _eval_result(self, testset):
'''
Generate the evaluation result..
Return the proficiency level and detection confidence
Parameter:
testset (tense[]): the dataset read from the csv file
'''
self._model.eval()
predictions = []
for data in testset:
data = data.to(self._device)
outputs = self._model(data)
prediction = outputs.data.max(1)[1].cpu().numpy()[0]
predictions.append(prediction)
result = [0,0,0]
for predict in predictions:
result[predict] += 1
if result[0] > result[1] and result[0] > result[2]:
return "New", (result[0] / len(predictions))
elif result[1] > result[0] and result[1] > result[2]:
return "Fluent", (result[1] / len(predictions))
else:
return "Skilled", (result[2] / len(predictions))
#For testing purposes only
'''
if __name__ == "__main__":
evaluation = Evaluation(VIDEO_PATH, MODEL_PATH, PROTO_FILE_PATH, WEIGHTS_FILE_PATH)
evaluation._start_evaluation()
print("Finish")
'''
| 36.078947 | 120 | 0.63506 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.